Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch kvm-arm64/pkvm_vm_handle into kvmarm-master/next

* kvm-arm64/pkvm_vm_handle:
: pKVM VM handle allocation fixes, courtesy of Fuad Tabba.
:
: From the cover letter (20250909072437.4110547-1-tabba@google.com):
:
: "In pKVM, this handle is allocated when the VM is initialized at the
: hypervisor, which is on the first vCPU run. However, the host starts
: initializing the VM and setting up its data structures earlier. MMU
: notifiers for the VMs are also registered before VM initialization at
: the hypervisor, and rely on the handle to identify the VM.
:
: Therefore, there is a potential gap between when the VM is (partially)
: setup at the host, but still without a valid pKVM handle to identify it
: when communicating with the hypervisor."
KVM: arm64: Reserve pKVM handle during pkvm_init_host_vm()
KVM: arm64: Introduce separate hypercalls for pKVM VM reservation and initialization
KVM: arm64: Consolidate pKVM hypervisor VM initialization logic
KVM: arm64: Separate allocation and insertion of pKVM VM table entries
KVM: arm64: Decouple hyp VM creation state from its handle
KVM: arm64: Clarify comments to distinguish pKVM mode from protected VMs
KVM: arm64: Rename 'host_kvm' to 'kvm' in pKVM host code
KVM: arm64: Rename pkvm.enabled to pkvm.is_protected
KVM: arm64: Add build-time check for duplicate DECLARE_REG use

Signed-off-by: Marc Zyngier <maz@kernel.org>

+221 -75
+2
arch/arm64/include/asm/kvm_asm.h
··· 81 81 __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff, 82 82 __KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs, 83 83 __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs, 84 + __KVM_HOST_SMCCC_FUNC___pkvm_reserve_vm, 85 + __KVM_HOST_SMCCC_FUNC___pkvm_unreserve_vm, 84 86 __KVM_HOST_SMCCC_FUNC___pkvm_init_vm, 85 87 __KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu, 86 88 __KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
+3 -2
arch/arm64/include/asm/kvm_host.h
··· 252 252 pkvm_handle_t handle; 253 253 struct kvm_hyp_memcache teardown_mc; 254 254 struct kvm_hyp_memcache stage2_teardown_mc; 255 - bool enabled; 255 + bool is_protected; 256 + bool is_created; 256 257 }; 257 258 258 259 struct kvm_mpidr_data { ··· 1442 1441 1443 1442 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE 1444 1443 1445 - #define kvm_vm_is_protected(kvm) (is_protected_kvm_enabled() && (kvm)->arch.pkvm.enabled) 1444 + #define kvm_vm_is_protected(kvm) (is_protected_kvm_enabled() && (kvm)->arch.pkvm.is_protected) 1446 1445 1447 1446 #define vcpu_is_protected(vcpu) kvm_vm_is_protected((vcpu)->kvm) 1448 1447
+1
arch/arm64/include/asm/kvm_pkvm.h
··· 18 18 19 19 int pkvm_init_host_vm(struct kvm *kvm); 20 20 int pkvm_create_hyp_vm(struct kvm *kvm); 21 + bool pkvm_hyp_vm_is_created(struct kvm *kvm); 21 22 void pkvm_destroy_hyp_vm(struct kvm *kvm); 22 23 int pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu); 23 24
+10 -4
arch/arm64/kvm/arm.c
··· 170 170 if (ret) 171 171 return ret; 172 172 173 - ret = pkvm_init_host_vm(kvm); 174 - if (ret) 175 - goto err_unshare_kvm; 176 - 177 173 if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL_ACCOUNT)) { 178 174 ret = -ENOMEM; 179 175 goto err_unshare_kvm; ··· 179 183 ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type); 180 184 if (ret) 181 185 goto err_free_cpumask; 186 + 187 + if (is_protected_kvm_enabled()) { 188 + /* 189 + * If any failures occur after this is successful, make sure to 190 + * call __pkvm_unreserve_vm to unreserve the VM in hyp. 191 + */ 192 + ret = pkvm_init_host_vm(kvm); 193 + if (ret) 194 + goto err_free_cpumask; 195 + } 182 196 183 197 kvm_vgic_early_init(kvm); 184 198
+3 -1
arch/arm64/kvm/hyp/include/nvhe/pkvm.h
··· 29 29 }; 30 30 31 31 /* 32 - * Holds the relevant data for running a protected vm. 32 + * Holds the relevant data for running a vm in protected mode. 33 33 */ 34 34 struct pkvm_hyp_vm { 35 35 struct kvm kvm; ··· 67 67 68 68 void pkvm_hyp_vm_table_init(void *tbl); 69 69 70 + int __pkvm_reserve_vm(void); 71 + void __pkvm_unreserve_vm(pkvm_handle_t handle); 70 72 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva, 71 73 unsigned long pgd_hva); 72 74 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
+2 -1
arch/arm64/kvm/hyp/include/nvhe/trap_handler.h
··· 12 12 #include <asm/kvm_host.h> 13 13 14 14 #define cpu_reg(ctxt, r) (ctxt)->regs.regs[r] 15 - #define DECLARE_REG(type, name, ctxt, reg) \ 15 + #define DECLARE_REG(type, name, ctxt, reg) \ 16 + __always_unused int ___check_reg_ ## reg; \ 16 17 type name = (type)cpu_reg(ctxt, (reg)) 17 18 18 19 #endif /* __ARM64_KVM_NVHE_TRAP_HANDLER_H__ */
+14
arch/arm64/kvm/hyp/nvhe/hyp-main.c
··· 546 546 cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize(); 547 547 } 548 548 549 + static void handle___pkvm_reserve_vm(struct kvm_cpu_context *host_ctxt) 550 + { 551 + cpu_reg(host_ctxt, 1) = __pkvm_reserve_vm(); 552 + } 553 + 554 + static void handle___pkvm_unreserve_vm(struct kvm_cpu_context *host_ctxt) 555 + { 556 + DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1); 557 + 558 + __pkvm_unreserve_vm(handle); 559 + } 560 + 549 561 static void handle___pkvm_init_vm(struct kvm_cpu_context *host_ctxt) 550 562 { 551 563 DECLARE_REG(struct kvm *, host_kvm, host_ctxt, 1); ··· 618 606 HANDLE_FUNC(__kvm_timer_set_cntvoff), 619 607 HANDLE_FUNC(__vgic_v3_save_vmcr_aprs), 620 608 HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs), 609 + HANDLE_FUNC(__pkvm_reserve_vm), 610 + HANDLE_FUNC(__pkvm_unreserve_vm), 621 611 HANDLE_FUNC(__pkvm_init_vm), 622 612 HANDLE_FUNC(__pkvm_init_vcpu), 623 613 HANDLE_FUNC(__pkvm_teardown_vm),
+134 -43
arch/arm64/kvm/hyp/nvhe/pkvm.c
··· 23 23 unsigned int kvm_host_sve_max_vl; 24 24 25 25 /* 26 - * The currently loaded hyp vCPU for each physical CPU. Used only when 27 - * protected KVM is enabled, but for both protected and non-protected VMs. 26 + * The currently loaded hyp vCPU for each physical CPU. Used in protected mode 27 + * for both protected and non-protected VMs. 28 28 */ 29 29 static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu); 30 30 ··· 135 135 { 136 136 struct kvm *kvm = vcpu->kvm; 137 137 138 - /* Protected KVM does not support AArch32 guests. */ 138 + /* No AArch32 support for protected guests. */ 139 139 if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL0, AARCH32) || 140 140 kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL1, AARCH32)) 141 141 return -EINVAL; ··· 192 192 */ 193 193 #define HANDLE_OFFSET 0x1000 194 194 195 + /* 196 + * Marks a reserved but not yet used entry in the VM table. 197 + */ 198 + #define RESERVED_ENTRY ((void *)0xa110ca7ed) 199 + 195 200 static unsigned int vm_handle_to_idx(pkvm_handle_t handle) 196 201 { 197 202 return handle - HANDLE_OFFSET; ··· 215 210 DEFINE_HYP_SPINLOCK(vm_table_lock); 216 211 217 212 /* 218 - * The table of VM entries for protected VMs in hyp. 219 - * Allocated at hyp initialization and setup. 213 + * A table that tracks all VMs in protected mode. 214 + * Allocated during hyp initialization and setup. 220 215 */ 221 216 static struct pkvm_hyp_vm **vm_table; 222 217 ··· 234 229 unsigned int idx = vm_handle_to_idx(handle); 235 230 236 231 if (unlikely(idx >= KVM_MAX_PVMS)) 232 + return NULL; 233 + 234 + /* A reserved entry doesn't represent an initialized VM. */ 235 + if (unlikely(vm_table[idx] == RESERVED_ENTRY)) 237 236 return NULL; 238 237 239 238 return vm_table[idx]; ··· 410 401 } 411 402 412 403 static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm, 413 - unsigned int nr_vcpus) 404 + unsigned int nr_vcpus, pkvm_handle_t handle) 414 405 { 406 + struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu; 407 + int idx = vm_handle_to_idx(handle); 408 + 409 + hyp_vm->kvm.arch.pkvm.handle = handle; 410 + 415 411 hyp_vm->host_kvm = host_kvm; 416 412 hyp_vm->kvm.created_vcpus = nr_vcpus; 417 - hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr; 418 - hyp_vm->kvm.arch.pkvm.enabled = READ_ONCE(host_kvm->arch.pkvm.enabled); 413 + hyp_vm->kvm.arch.pkvm.is_protected = READ_ONCE(host_kvm->arch.pkvm.is_protected); 414 + hyp_vm->kvm.arch.pkvm.is_created = true; 419 415 hyp_vm->kvm.arch.flags = 0; 420 416 pkvm_init_features_from_host(hyp_vm, host_kvm); 417 + 418 + /* VMID 0 is reserved for the host */ 419 + atomic64_set(&mmu->vmid.id, idx + 1); 420 + 421 + mmu->vtcr = host_mmu.arch.mmu.vtcr; 422 + mmu->arch = &hyp_vm->kvm.arch; 423 + mmu->pgt = &hyp_vm->pgt; 421 424 } 422 425 423 426 static int pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *host_vcpu) ··· 501 480 return ret; 502 481 } 503 482 504 - static int find_free_vm_table_entry(struct kvm *host_kvm) 483 + static int find_free_vm_table_entry(void) 505 484 { 506 485 int i; 507 486 ··· 514 493 } 515 494 516 495 /* 517 - * Allocate a VM table entry and insert a pointer to the new vm. 496 + * Reserve a VM table entry. 518 497 * 519 - * Return a unique handle to the protected VM on success, 498 + * Return a unique handle to the VM on success, 520 499 * negative error code on failure. 521 500 */ 522 - static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm, 523 - struct pkvm_hyp_vm *hyp_vm) 501 + static int allocate_vm_table_entry(void) 524 502 { 525 - struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu; 526 503 int idx; 527 504 528 505 hyp_assert_lock_held(&vm_table_lock); ··· 533 514 if (unlikely(!vm_table)) 534 515 return -EINVAL; 535 516 536 - idx = find_free_vm_table_entry(host_kvm); 537 - if (idx < 0) 517 + idx = find_free_vm_table_entry(); 518 + if (unlikely(idx < 0)) 538 519 return idx; 539 520 540 - hyp_vm->kvm.arch.pkvm.handle = idx_to_vm_handle(idx); 521 + vm_table[idx] = RESERVED_ENTRY; 541 522 542 - /* VMID 0 is reserved for the host */ 543 - atomic64_set(&mmu->vmid.id, idx + 1); 523 + return idx; 524 + } 544 525 545 - mmu->arch = &hyp_vm->kvm.arch; 546 - mmu->pgt = &hyp_vm->pgt; 526 + static int __insert_vm_table_entry(pkvm_handle_t handle, 527 + struct pkvm_hyp_vm *hyp_vm) 528 + { 529 + unsigned int idx; 530 + 531 + hyp_assert_lock_held(&vm_table_lock); 532 + 533 + /* 534 + * Initializing protected state might have failed, yet a malicious 535 + * host could trigger this function. Thus, ensure that 'vm_table' 536 + * exists. 537 + */ 538 + if (unlikely(!vm_table)) 539 + return -EINVAL; 540 + 541 + idx = vm_handle_to_idx(handle); 542 + if (unlikely(idx >= KVM_MAX_PVMS)) 543 + return -EINVAL; 544 + 545 + if (unlikely(vm_table[idx] != RESERVED_ENTRY)) 546 + return -EINVAL; 547 547 548 548 vm_table[idx] = hyp_vm; 549 - return hyp_vm->kvm.arch.pkvm.handle; 549 + 550 + return 0; 551 + } 552 + 553 + /* 554 + * Insert a pointer to the initialized VM into the VM table. 555 + * 556 + * Return 0 on success, or negative error code on failure. 557 + */ 558 + static int insert_vm_table_entry(pkvm_handle_t handle, 559 + struct pkvm_hyp_vm *hyp_vm) 560 + { 561 + int ret; 562 + 563 + hyp_spin_lock(&vm_table_lock); 564 + ret = __insert_vm_table_entry(handle, hyp_vm); 565 + hyp_spin_unlock(&vm_table_lock); 566 + 567 + return ret; 550 568 } 551 569 552 570 /* ··· 650 594 } 651 595 652 596 /* 653 - * Initialize the hypervisor copy of the protected VM state using the 654 - * memory donated by the host. 597 + * Reserves an entry in the hypervisor for a new VM in protected mode. 655 598 * 656 - * Unmaps the donated memory from the host at stage 2. 599 + * Return a unique handle to the VM on success, negative error code on failure. 600 + */ 601 + int __pkvm_reserve_vm(void) 602 + { 603 + int ret; 604 + 605 + hyp_spin_lock(&vm_table_lock); 606 + ret = allocate_vm_table_entry(); 607 + hyp_spin_unlock(&vm_table_lock); 608 + 609 + if (ret < 0) 610 + return ret; 611 + 612 + return idx_to_vm_handle(ret); 613 + } 614 + 615 + /* 616 + * Removes a reserved entry, but only if is hasn't been used yet. 617 + * Otherwise, the VM needs to be destroyed. 618 + */ 619 + void __pkvm_unreserve_vm(pkvm_handle_t handle) 620 + { 621 + unsigned int idx = vm_handle_to_idx(handle); 622 + 623 + if (unlikely(!vm_table)) 624 + return; 625 + 626 + hyp_spin_lock(&vm_table_lock); 627 + if (likely(idx < KVM_MAX_PVMS && vm_table[idx] == RESERVED_ENTRY)) 628 + remove_vm_table_entry(handle); 629 + hyp_spin_unlock(&vm_table_lock); 630 + } 631 + 632 + /* 633 + * Initialize the hypervisor copy of the VM state using host-donated memory. 634 + * 635 + * Unmap the donated memory from the host at stage 2. 657 636 * 658 637 * host_kvm: A pointer to the host's struct kvm. 659 638 * vm_hva: The host va of the area being donated for the VM state. ··· 697 606 * the VM. Must be page aligned. Its size is implied by the VM's 698 607 * VTCR. 699 608 * 700 - * Return a unique handle to the protected VM on success, 701 - * negative error code on failure. 609 + * Return 0 success, negative error code on failure. 702 610 */ 703 611 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva, 704 612 unsigned long pgd_hva) ··· 705 615 struct pkvm_hyp_vm *hyp_vm = NULL; 706 616 size_t vm_size, pgd_size; 707 617 unsigned int nr_vcpus; 618 + pkvm_handle_t handle; 708 619 void *pgd = NULL; 709 620 int ret; 710 621 ··· 715 624 716 625 nr_vcpus = READ_ONCE(host_kvm->created_vcpus); 717 626 if (nr_vcpus < 1) { 627 + ret = -EINVAL; 628 + goto err_unpin_kvm; 629 + } 630 + 631 + handle = READ_ONCE(host_kvm->arch.pkvm.handle); 632 + if (unlikely(handle < HANDLE_OFFSET)) { 718 633 ret = -EINVAL; 719 634 goto err_unpin_kvm; 720 635 } ··· 738 641 if (!pgd) 739 642 goto err_remove_mappings; 740 643 741 - init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus); 742 - 743 - hyp_spin_lock(&vm_table_lock); 744 - ret = insert_vm_table_entry(host_kvm, hyp_vm); 745 - if (ret < 0) 746 - goto err_unlock; 644 + init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus, handle); 747 645 748 646 ret = kvm_guest_prepare_stage2(hyp_vm, pgd); 749 647 if (ret) 750 - goto err_remove_vm_table_entry; 751 - hyp_spin_unlock(&vm_table_lock); 648 + goto err_remove_mappings; 752 649 753 - return hyp_vm->kvm.arch.pkvm.handle; 650 + /* Must be called last since this publishes the VM. */ 651 + ret = insert_vm_table_entry(handle, hyp_vm); 652 + if (ret) 653 + goto err_remove_mappings; 754 654 755 - err_remove_vm_table_entry: 756 - remove_vm_table_entry(hyp_vm->kvm.arch.pkvm.handle); 757 - err_unlock: 758 - hyp_spin_unlock(&vm_table_lock); 655 + return 0; 656 + 759 657 err_remove_mappings: 760 658 unmap_donated_memory(hyp_vm, vm_size); 761 659 unmap_donated_memory(pgd, pgd_size); ··· 760 668 } 761 669 762 670 /* 763 - * Initialize the hypervisor copy of the protected vCPU state using the 764 - * memory donated by the host. 671 + * Initialize the hypervisor copy of the vCPU state using host-donated memory. 765 672 * 766 - * handle: The handle for the protected vm. 673 + * handle: The hypervisor handle for the vm. 767 674 * host_vcpu: A pointer to the corresponding host vcpu. 768 675 * vcpu_hva: The host va of the area being donated for the vcpu state. 769 676 * Must be page aligned. The size of the area must be equal to
+52 -24
arch/arm64/kvm/pkvm.c
··· 85 85 hyp_mem_base); 86 86 } 87 87 88 - static void __pkvm_destroy_hyp_vm(struct kvm *host_kvm) 88 + static void __pkvm_destroy_hyp_vm(struct kvm *kvm) 89 89 { 90 - if (host_kvm->arch.pkvm.handle) { 90 + if (pkvm_hyp_vm_is_created(kvm)) { 91 91 WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm, 92 - host_kvm->arch.pkvm.handle)); 92 + kvm->arch.pkvm.handle)); 93 + } else if (kvm->arch.pkvm.handle) { 94 + /* 95 + * The VM could have been reserved but hyp initialization has 96 + * failed. Make sure to unreserve it. 97 + */ 98 + kvm_call_hyp_nvhe(__pkvm_unreserve_vm, kvm->arch.pkvm.handle); 93 99 } 94 100 95 - host_kvm->arch.pkvm.handle = 0; 96 - free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc); 97 - free_hyp_memcache(&host_kvm->arch.pkvm.stage2_teardown_mc); 101 + kvm->arch.pkvm.handle = 0; 102 + kvm->arch.pkvm.is_created = false; 103 + free_hyp_memcache(&kvm->arch.pkvm.teardown_mc); 104 + free_hyp_memcache(&kvm->arch.pkvm.stage2_teardown_mc); 98 105 } 99 106 100 107 static int __pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu) ··· 136 129 * 137 130 * Return 0 on success, negative error code on failure. 138 131 */ 139 - static int __pkvm_create_hyp_vm(struct kvm *host_kvm) 132 + static int __pkvm_create_hyp_vm(struct kvm *kvm) 140 133 { 141 134 size_t pgd_sz, hyp_vm_sz; 142 135 void *pgd, *hyp_vm; 143 136 int ret; 144 137 145 - if (host_kvm->created_vcpus < 1) 138 + if (kvm->created_vcpus < 1) 146 139 return -EINVAL; 147 140 148 - pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.mmu.vtcr); 141 + pgd_sz = kvm_pgtable_stage2_pgd_size(kvm->arch.mmu.vtcr); 149 142 150 143 /* 151 144 * The PGD pages will be reclaimed using a hyp_memcache which implies ··· 159 152 /* Allocate memory to donate to hyp for vm and vcpu pointers. */ 160 153 hyp_vm_sz = PAGE_ALIGN(size_add(PKVM_HYP_VM_SIZE, 161 154 size_mul(sizeof(void *), 162 - host_kvm->created_vcpus))); 155 + kvm->created_vcpus))); 163 156 hyp_vm = alloc_pages_exact(hyp_vm_sz, GFP_KERNEL_ACCOUNT); 164 157 if (!hyp_vm) { 165 158 ret = -ENOMEM; ··· 167 160 } 168 161 169 162 /* Donate the VM memory to hyp and let hyp initialize it. */ 170 - ret = kvm_call_hyp_nvhe(__pkvm_init_vm, host_kvm, hyp_vm, pgd); 171 - if (ret < 0) 163 + ret = kvm_call_hyp_nvhe(__pkvm_init_vm, kvm, hyp_vm, pgd); 164 + if (ret) 172 165 goto free_vm; 173 166 174 - host_kvm->arch.pkvm.handle = ret; 175 - host_kvm->arch.pkvm.stage2_teardown_mc.flags |= HYP_MEMCACHE_ACCOUNT_STAGE2; 167 + kvm->arch.pkvm.is_created = true; 168 + kvm->arch.pkvm.stage2_teardown_mc.flags |= HYP_MEMCACHE_ACCOUNT_STAGE2; 176 169 kvm_account_pgtable_pages(pgd, pgd_sz / PAGE_SIZE); 177 170 178 171 return 0; ··· 183 176 return ret; 184 177 } 185 178 186 - int pkvm_create_hyp_vm(struct kvm *host_kvm) 179 + bool pkvm_hyp_vm_is_created(struct kvm *kvm) 180 + { 181 + return READ_ONCE(kvm->arch.pkvm.is_created); 182 + } 183 + 184 + int pkvm_create_hyp_vm(struct kvm *kvm) 187 185 { 188 186 int ret = 0; 189 187 190 - mutex_lock(&host_kvm->arch.config_lock); 191 - if (!host_kvm->arch.pkvm.handle) 192 - ret = __pkvm_create_hyp_vm(host_kvm); 193 - mutex_unlock(&host_kvm->arch.config_lock); 188 + mutex_lock(&kvm->arch.config_lock); 189 + if (!pkvm_hyp_vm_is_created(kvm)) 190 + ret = __pkvm_create_hyp_vm(kvm); 191 + mutex_unlock(&kvm->arch.config_lock); 194 192 195 193 return ret; 196 194 } ··· 212 200 return ret; 213 201 } 214 202 215 - void pkvm_destroy_hyp_vm(struct kvm *host_kvm) 203 + void pkvm_destroy_hyp_vm(struct kvm *kvm) 216 204 { 217 - mutex_lock(&host_kvm->arch.config_lock); 218 - __pkvm_destroy_hyp_vm(host_kvm); 219 - mutex_unlock(&host_kvm->arch.config_lock); 205 + mutex_lock(&kvm->arch.config_lock); 206 + __pkvm_destroy_hyp_vm(kvm); 207 + mutex_unlock(&kvm->arch.config_lock); 220 208 } 221 209 222 - int pkvm_init_host_vm(struct kvm *host_kvm) 210 + int pkvm_init_host_vm(struct kvm *kvm) 223 211 { 212 + int ret; 213 + 214 + if (pkvm_hyp_vm_is_created(kvm)) 215 + return -EINVAL; 216 + 217 + /* VM is already reserved, no need to proceed. */ 218 + if (kvm->arch.pkvm.handle) 219 + return 0; 220 + 221 + /* Reserve the VM in hyp and obtain a hyp handle for the VM. */ 222 + ret = kvm_call_hyp_nvhe(__pkvm_reserve_vm); 223 + if (ret < 0) 224 + return ret; 225 + 226 + kvm->arch.pkvm.handle = ret; 227 + 224 228 return 0; 225 229 } 226 230