Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
"Arm:

- Make sure we don't leak any S1POE state from guest to guest when
the feature is supported on the HW, but not enabled on the host

- Propagate the ID registers from the host into non-protected VMs
managed by pKVM, ensuring that the guest sees the intended feature
set

- Drop double kern_hyp_va() from unpin_host_sve_state(), which could
bite us if we were to change kern_hyp_va() to not being idempotent

- Don't leak stage-2 mappings in protected mode

- Correctly align the faulting address when dealing with single page
stage-2 mappings for PAGE_SIZE > 4kB

- Fix detection of virtualisation-capable GICv5 IRS, due to the
maintainer being obviously fat fingered... [his words, not mine]

- Remove duplication of code retrieving the ASID for the purpose of
S1 PT handling

- Fix slightly abusive const-ification in vgic_set_kvm_info()

Generic:

- Remove internal Kconfigs that are now set on all architectures

- Remove per-architecture code to enable KVM_CAP_SYNC_MMU, all
architectures finally enable it in Linux 7.0"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: always define KVM_CAP_SYNC_MMU
KVM: remove CONFIG_KVM_GENERIC_MMU_NOTIFIER
KVM: arm64: Deduplicate ASID retrieval code
irqchip/gic-v5: Fix inversion of IRS_IDR0.virt flag
KVM: arm64: Revert accidental drop of kvm_uninit_stage2_mmu() for non-NV VMs
KVM: arm64: Fix protected mode handling of pages larger than 4kB
KVM: arm64: vgic: Handle const qualifier from gic_kvm_info allocation type
KVM: arm64: Remove redundant kern_hyp_va() in unpin_host_sve_state()
KVM: arm64: Fix ID register initialization for non-protected pKVM guests
KVM: arm64: Optimise away S1POE handling when not supported by host
KVM: arm64: Hide S1POE from guests when not supported by the host

+87 -128
+4 -6
Documentation/virt/kvm/api.rst
··· 1396 1396 Memory for the region is taken starting at the address denoted by the 1397 1397 field userspace_addr, which must point at user addressable memory for 1398 1398 the entire memory slot size. Any object may back this memory, including 1399 - anonymous memory, ordinary files, and hugetlbfs. 1399 + anonymous memory, ordinary files, and hugetlbfs. Changes in the backing 1400 + of the memory region are automatically reflected into the guest. 1401 + For example, an mmap() that affects the region will be made visible 1402 + immediately. Another example is madvise(MADV_DROP). 1400 1403 1401 1404 On architectures that support a form of address tagging, userspace_addr must 1402 1405 be an untagged address. ··· 1414 1411 use it. The latter can be set, if KVM_CAP_READONLY_MEM capability allows it, 1415 1412 to make a new slot read-only. In this case, writes to this memory will be 1416 1413 posted to userspace as KVM_EXIT_MMIO exits. 1417 - 1418 - When the KVM_CAP_SYNC_MMU capability is available, changes in the backing of 1419 - the memory region are automatically reflected into the guest. For example, an 1420 - mmap() that affects the region will be made visible immediately. Another 1421 - example is madvise(MADV_DROP). 1422 1414 1423 1415 For TDX guest, deleting/moving memory region loses guest memory contents. 1424 1416 Read only region isn't supported. Only as-id 0 is supported.
+2 -1
arch/arm64/include/asm/kvm_host.h
··· 1616 1616 (kvm_has_feat((k), ID_AA64MMFR3_EL1, S1PIE, IMP)) 1617 1617 1618 1618 #define kvm_has_s1poe(k) \ 1619 - (kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP)) 1619 + (system_supports_poe() && \ 1620 + kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP)) 1620 1621 1621 1622 #define kvm_has_ras(k) \ 1622 1623 (kvm_has_feat((k), ID_AA64PFR0_EL1, RAS, IMP))
+2
arch/arm64/include/asm/kvm_nested.h
··· 397 397 int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu); 398 398 void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val); 399 399 400 + u16 get_asid_by_regime(struct kvm_vcpu *vcpu, enum trans_regime regime); 401 + 400 402 #define vncr_fixmap(c) \ 401 403 ({ \ 402 404 u32 __c = (c); \
-1
arch/arm64/kvm/Kconfig
··· 21 21 bool "Kernel-based Virtual Machine (KVM) support" 22 22 select KVM_COMMON 23 23 select KVM_GENERIC_HARDWARE_ENABLING 24 - select KVM_GENERIC_MMU_NOTIFIER 25 24 select HAVE_KVM_CPU_RELAX_INTERCEPT 26 25 select KVM_MMIO 27 26 select KVM_GENERIC_DIRTYLOG_READ_PROTECT
-1
arch/arm64/kvm/arm.c
··· 358 358 break; 359 359 case KVM_CAP_IOEVENTFD: 360 360 case KVM_CAP_USER_MEMORY: 361 - case KVM_CAP_SYNC_MMU: 362 361 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 363 362 case KVM_CAP_ONE_REG: 364 363 case KVM_CAP_ARM_PSCI:
+2 -25
arch/arm64/kvm/at.c
··· 540 540 wr->pa |= va & GENMASK_ULL(va_bottom - 1, 0); 541 541 542 542 wr->nG = (wi->regime != TR_EL2) && (desc & PTE_NG); 543 - if (wr->nG) { 544 - u64 asid_ttbr, tcr; 545 - 546 - switch (wi->regime) { 547 - case TR_EL10: 548 - tcr = vcpu_read_sys_reg(vcpu, TCR_EL1); 549 - asid_ttbr = ((tcr & TCR_A1) ? 550 - vcpu_read_sys_reg(vcpu, TTBR1_EL1) : 551 - vcpu_read_sys_reg(vcpu, TTBR0_EL1)); 552 - break; 553 - case TR_EL20: 554 - tcr = vcpu_read_sys_reg(vcpu, TCR_EL2); 555 - asid_ttbr = ((tcr & TCR_A1) ? 556 - vcpu_read_sys_reg(vcpu, TTBR1_EL2) : 557 - vcpu_read_sys_reg(vcpu, TTBR0_EL2)); 558 - break; 559 - default: 560 - BUG(); 561 - } 562 - 563 - wr->asid = FIELD_GET(TTBR_ASID_MASK, asid_ttbr); 564 - if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) || 565 - !(tcr & TCR_ASID16)) 566 - wr->asid &= GENMASK(7, 0); 567 - } 543 + if (wr->nG) 544 + wr->asid = get_asid_by_regime(vcpu, wi->regime); 568 545 569 546 return 0; 570 547
+34 -3
arch/arm64/kvm/hyp/nvhe/pkvm.c
··· 342 342 /* No restrictions for non-protected VMs. */ 343 343 if (!kvm_vm_is_protected(kvm)) { 344 344 hyp_vm->kvm.arch.flags = host_arch_flags; 345 + hyp_vm->kvm.arch.flags &= ~BIT_ULL(KVM_ARCH_FLAG_ID_REGS_INITIALIZED); 345 346 346 347 bitmap_copy(kvm->arch.vcpu_features, 347 348 host_kvm->arch.vcpu_features, ··· 392 391 if (!vcpu_has_feature(&hyp_vcpu->vcpu, KVM_ARM_VCPU_SVE)) 393 392 return; 394 393 395 - sve_state = kern_hyp_va(hyp_vcpu->vcpu.arch.sve_state); 394 + sve_state = hyp_vcpu->vcpu.arch.sve_state; 396 395 hyp_unpin_shared_mem(sve_state, 397 396 sve_state + vcpu_sve_state_size(&hyp_vcpu->vcpu)); 398 397 } ··· 472 471 return ret; 473 472 } 474 473 474 + static int vm_copy_id_regs(struct pkvm_hyp_vcpu *hyp_vcpu) 475 + { 476 + struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu); 477 + const struct kvm *host_kvm = hyp_vm->host_kvm; 478 + struct kvm *kvm = &hyp_vm->kvm; 479 + 480 + if (!test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &host_kvm->arch.flags)) 481 + return -EINVAL; 482 + 483 + if (test_and_set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags)) 484 + return 0; 485 + 486 + memcpy(kvm->arch.id_regs, host_kvm->arch.id_regs, sizeof(kvm->arch.id_regs)); 487 + 488 + return 0; 489 + } 490 + 491 + static int pkvm_vcpu_init_sysregs(struct pkvm_hyp_vcpu *hyp_vcpu) 492 + { 493 + int ret = 0; 494 + 495 + if (pkvm_hyp_vcpu_is_protected(hyp_vcpu)) 496 + kvm_init_pvm_id_regs(&hyp_vcpu->vcpu); 497 + else 498 + ret = vm_copy_id_regs(hyp_vcpu); 499 + 500 + return ret; 501 + } 502 + 475 503 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu, 476 504 struct pkvm_hyp_vm *hyp_vm, 477 505 struct kvm_vcpu *host_vcpu) ··· 520 490 hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags); 521 491 hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED; 522 492 523 - if (pkvm_hyp_vcpu_is_protected(hyp_vcpu)) 524 - kvm_init_pvm_id_regs(&hyp_vcpu->vcpu); 493 + ret = pkvm_vcpu_init_sysregs(hyp_vcpu); 494 + if (ret) 495 + goto done; 525 496 526 497 ret = pkvm_vcpu_init_traps(hyp_vcpu); 527 498 if (ret)
+5 -7
arch/arm64/kvm/mmu.c
··· 1754 1754 } 1755 1755 1756 1756 /* 1757 - * Both the canonical IPA and fault IPA must be hugepage-aligned to 1758 - * ensure we find the right PFN and lay down the mapping in the right 1759 - * place. 1757 + * Both the canonical IPA and fault IPA must be aligned to the 1758 + * mapping size to ensure we find the right PFN and lay down the 1759 + * mapping in the right place. 1760 1760 */ 1761 - if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) { 1762 - fault_ipa &= ~(vma_pagesize - 1); 1763 - ipa &= ~(vma_pagesize - 1); 1764 - } 1761 + fault_ipa = ALIGN_DOWN(fault_ipa, vma_pagesize); 1762 + ipa = ALIGN_DOWN(ipa, vma_pagesize); 1765 1763 1766 1764 gfn = ipa >> PAGE_SHIFT; 1767 1765 mte_allowed = kvm_vma_mte_allowed(vma);
+31 -32
arch/arm64/kvm/nested.c
··· 854 854 return kvm_inject_nested_sync(vcpu, esr_el2); 855 855 } 856 856 857 + u16 get_asid_by_regime(struct kvm_vcpu *vcpu, enum trans_regime regime) 858 + { 859 + enum vcpu_sysreg ttbr_elx; 860 + u64 tcr; 861 + u16 asid; 862 + 863 + switch (regime) { 864 + case TR_EL10: 865 + tcr = vcpu_read_sys_reg(vcpu, TCR_EL1); 866 + ttbr_elx = (tcr & TCR_A1) ? TTBR1_EL1 : TTBR0_EL1; 867 + break; 868 + case TR_EL20: 869 + tcr = vcpu_read_sys_reg(vcpu, TCR_EL2); 870 + ttbr_elx = (tcr & TCR_A1) ? TTBR1_EL2 : TTBR0_EL2; 871 + break; 872 + default: 873 + BUG(); 874 + } 875 + 876 + asid = FIELD_GET(TTBRx_EL1_ASID, vcpu_read_sys_reg(vcpu, ttbr_elx)); 877 + if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) || 878 + !(tcr & TCR_ASID16)) 879 + asid &= GENMASK(7, 0); 880 + 881 + return asid; 882 + } 883 + 857 884 static void invalidate_vncr(struct vncr_tlb *vt) 858 885 { 859 886 vt->valid = false; ··· 1181 1154 { 1182 1155 int i; 1183 1156 1184 - if (!kvm->arch.nested_mmus_size) 1185 - return; 1186 - 1187 1157 for (i = 0; i < kvm->arch.nested_mmus_size; i++) { 1188 1158 struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i]; 1189 1159 ··· 1360 1336 if (read_vncr_el2(vcpu) != vt->gva) 1361 1337 return false; 1362 1338 1363 - if (vt->wr.nG) { 1364 - u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2); 1365 - u64 ttbr = ((tcr & TCR_A1) ? 1366 - vcpu_read_sys_reg(vcpu, TTBR1_EL2) : 1367 - vcpu_read_sys_reg(vcpu, TTBR0_EL2)); 1368 - u16 asid; 1369 - 1370 - asid = FIELD_GET(TTBR_ASID_MASK, ttbr); 1371 - if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) || 1372 - !(tcr & TCR_ASID16)) 1373 - asid &= GENMASK(7, 0); 1374 - 1375 - return asid == vt->wr.asid; 1376 - } 1339 + if (vt->wr.nG) 1340 + return get_asid_by_regime(vcpu, TR_EL20) == vt->wr.asid; 1377 1341 1378 1342 return true; 1379 1343 } ··· 1464 1452 if (read_vncr_el2(vcpu) != vt->gva) 1465 1453 return; 1466 1454 1467 - if (vt->wr.nG) { 1468 - u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2); 1469 - u64 ttbr = ((tcr & TCR_A1) ? 1470 - vcpu_read_sys_reg(vcpu, TTBR1_EL2) : 1471 - vcpu_read_sys_reg(vcpu, TTBR0_EL2)); 1472 - u16 asid; 1473 - 1474 - asid = FIELD_GET(TTBR_ASID_MASK, ttbr); 1475 - if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) || 1476 - !(tcr & TCR_ASID16)) 1477 - asid &= GENMASK(7, 0); 1478 - 1479 - if (asid != vt->wr.asid) 1480 - return; 1481 - } 1455 + if (vt->wr.nG && get_asid_by_regime(vcpu, TR_EL20) != vt->wr.asid) 1456 + return; 1482 1457 1483 1458 vt->cpu = smp_processor_id(); 1484 1459
+3
arch/arm64/kvm/sys_regs.c
··· 1816 1816 ID_AA64MMFR3_EL1_SCTLRX | 1817 1817 ID_AA64MMFR3_EL1_S1POE | 1818 1818 ID_AA64MMFR3_EL1_S1PIE; 1819 + 1820 + if (!system_supports_poe()) 1821 + val &= ~ID_AA64MMFR3_EL1_S1POE; 1819 1822 break; 1820 1823 case SYS_ID_MMFR4_EL1: 1821 1824 val &= ~ID_MMFR4_EL1_CCIDX;
-1
arch/loongarch/kvm/Kconfig
··· 28 28 select KVM_COMMON 29 29 select KVM_GENERIC_DIRTYLOG_READ_PROTECT 30 30 select KVM_GENERIC_HARDWARE_ENABLING 31 - select KVM_GENERIC_MMU_NOTIFIER 32 31 select KVM_MMIO 33 32 select VIRT_XFER_TO_GUEST_WORK 34 33 select SCHED_INFO
-1
arch/loongarch/kvm/vm.c
··· 118 118 case KVM_CAP_ONE_REG: 119 119 case KVM_CAP_ENABLE_CAP: 120 120 case KVM_CAP_READONLY_MEM: 121 - case KVM_CAP_SYNC_MMU: 122 121 case KVM_CAP_IMMEDIATE_EXIT: 123 122 case KVM_CAP_IOEVENTFD: 124 123 case KVM_CAP_MP_STATE:
-1
arch/mips/kvm/Kconfig
··· 23 23 select KVM_COMMON 24 24 select KVM_GENERIC_DIRTYLOG_READ_PROTECT 25 25 select KVM_MMIO 26 - select KVM_GENERIC_MMU_NOTIFIER 27 26 select KVM_GENERIC_HARDWARE_ENABLING 28 27 select HAVE_KVM_READONLY_MEM 29 28 help
-1
arch/mips/kvm/mips.c
··· 1035 1035 case KVM_CAP_ONE_REG: 1036 1036 case KVM_CAP_ENABLE_CAP: 1037 1037 case KVM_CAP_READONLY_MEM: 1038 - case KVM_CAP_SYNC_MMU: 1039 1038 case KVM_CAP_IMMEDIATE_EXIT: 1040 1039 r = 1; 1041 1040 break;
-4
arch/powerpc/kvm/Kconfig
··· 38 38 config KVM_BOOK3S_PR_POSSIBLE 39 39 bool 40 40 select KVM_MMIO 41 - select KVM_GENERIC_MMU_NOTIFIER 42 41 43 42 config KVM_BOOK3S_HV_POSSIBLE 44 43 bool ··· 80 81 tristate "KVM for POWER7 and later using hypervisor mode in host" 81 82 depends on KVM_BOOK3S_64 && PPC_POWERNV 82 83 select KVM_BOOK3S_HV_POSSIBLE 83 - select KVM_GENERIC_MMU_NOTIFIER 84 84 select KVM_BOOK3S_HV_PMU 85 85 select CMA 86 86 help ··· 201 203 depends on !CONTEXT_TRACKING_USER 202 204 select KVM 203 205 select KVM_MMIO 204 - select KVM_GENERIC_MMU_NOTIFIER 205 206 help 206 207 Support running unmodified E500 guest kernels in virtual machines on 207 208 E500v2 host processors. ··· 217 220 select KVM 218 221 select KVM_MMIO 219 222 select KVM_BOOKE_HV 220 - select KVM_GENERIC_MMU_NOTIFIER 221 223 help 222 224 Support running unmodified E500MC/E5500/E6500 guest kernels in 223 225 virtual machines on E500MC/E5500/E6500 host processors.
-6
arch/powerpc/kvm/powerpc.c
··· 623 623 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested && 624 624 !kvmppc_hv_ops->enable_nested(NULL)); 625 625 break; 626 - #endif 627 - case KVM_CAP_SYNC_MMU: 628 - BUILD_BUG_ON(!IS_ENABLED(CONFIG_KVM_GENERIC_MMU_NOTIFIER)); 629 - r = 1; 630 - break; 631 - #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 632 626 case KVM_CAP_PPC_HTAB_FD: 633 627 r = hv_enabled; 634 628 break;
-1
arch/riscv/kvm/Kconfig
··· 30 30 select KVM_GENERIC_HARDWARE_ENABLING 31 31 select KVM_MMIO 32 32 select VIRT_XFER_TO_GUEST_WORK 33 - select KVM_GENERIC_MMU_NOTIFIER 34 33 select SCHED_INFO 35 34 select GUEST_PERF_EVENTS if PERF_EVENTS 36 35 help
-1
arch/riscv/kvm/vm.c
··· 181 181 break; 182 182 case KVM_CAP_IOEVENTFD: 183 183 case KVM_CAP_USER_MEMORY: 184 - case KVM_CAP_SYNC_MMU: 185 184 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 186 185 case KVM_CAP_ONE_REG: 187 186 case KVM_CAP_READONLY_MEM:
-2
arch/s390/kvm/Kconfig
··· 28 28 select HAVE_KVM_INVALID_WAKEUPS 29 29 select HAVE_KVM_NO_POLL 30 30 select KVM_VFIO 31 - select MMU_NOTIFIER 32 31 select VIRT_XFER_TO_GUEST_WORK 33 - select KVM_GENERIC_MMU_NOTIFIER 34 32 select KVM_MMU_LOCKLESS_AGING 35 33 help 36 34 Support hosting paravirtualized guest machines using the SIE
-1
arch/s390/kvm/kvm-s390.c
··· 601 601 switch (ext) { 602 602 case KVM_CAP_S390_PSW: 603 603 case KVM_CAP_S390_GMAP: 604 - case KVM_CAP_SYNC_MMU: 605 604 #ifdef CONFIG_KVM_S390_UCONTROL 606 605 case KVM_CAP_S390_UCONTROL: 607 606 #endif
-1
arch/x86/kvm/Kconfig
··· 20 20 config KVM_X86 21 21 def_tristate KVM if (KVM_INTEL != n || KVM_AMD != n) 22 22 select KVM_COMMON 23 - select KVM_GENERIC_MMU_NOTIFIER 24 23 select KVM_ELIDE_TLB_FLUSH_IF_YOUNG 25 24 select KVM_MMU_LOCKLESS_AGING 26 25 select HAVE_KVM_IRQCHIP
-1
arch/x86/kvm/x86.c
··· 4805 4805 #endif 4806 4806 case KVM_CAP_NOP_IO_DELAY: 4807 4807 case KVM_CAP_MP_STATE: 4808 - case KVM_CAP_SYNC_MMU: 4809 4808 case KVM_CAP_USER_NMI: 4810 4809 case KVM_CAP_IRQ_INJECT_STATUS: 4811 4810 case KVM_CAP_IOEVENTFD:
+1 -1
drivers/irqchip/irq-gic-v5-irs.c
··· 699 699 */ 700 700 if (list_empty(&irs_nodes)) { 701 701 idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR0); 702 - gicv5_global_data.virt_capable = !FIELD_GET(GICV5_IRS_IDR0_VIRT, idr); 702 + gicv5_global_data.virt_capable = !!FIELD_GET(GICV5_IRS_IDR0_VIRT, idr); 703 703 704 704 idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR1); 705 705 irs_setup_pri_bits(idr);
+1 -6
include/linux/kvm_host.h
··· 253 253 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); 254 254 #endif 255 255 256 - #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER 257 256 union kvm_mmu_notifier_arg { 258 257 unsigned long attributes; 259 258 }; ··· 274 275 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); 275 276 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 276 277 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 277 - #endif 278 278 279 279 enum { 280 280 OUTSIDE_GUEST_MODE, ··· 847 849 struct hlist_head irq_ack_notifier_list; 848 850 #endif 849 851 850 - #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER 851 852 struct mmu_notifier mmu_notifier; 852 853 unsigned long mmu_invalidate_seq; 853 854 long mmu_invalidate_in_progress; 854 855 gfn_t mmu_invalidate_range_start; 855 856 gfn_t mmu_invalidate_range_end; 856 - #endif 857 + 857 858 struct list_head devices; 858 859 u64 manual_dirty_log_protect; 859 860 struct dentry *debugfs_dentry; ··· 2115 2118 extern const struct kvm_stats_header kvm_vcpu_stats_header; 2116 2119 extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[]; 2117 2120 2118 - #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER 2119 2121 static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq) 2120 2122 { 2121 2123 if (unlikely(kvm->mmu_invalidate_in_progress)) ··· 2192 2196 2193 2197 return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq; 2194 2198 } 2195 - #endif 2196 2199 2197 2200 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2198 2201
+1 -8
virt/kvm/Kconfig
··· 5 5 bool 6 6 select EVENTFD 7 7 select INTERVAL_TREE 8 + select MMU_NOTIFIER 8 9 select PREEMPT_NOTIFIERS 9 10 10 11 config HAVE_KVM_PFNCACHE ··· 94 93 config KVM_GENERIC_HARDWARE_ENABLING 95 94 bool 96 95 97 - config KVM_GENERIC_MMU_NOTIFIER 98 - select MMU_NOTIFIER 99 - bool 100 - 101 96 config KVM_ELIDE_TLB_FLUSH_IF_YOUNG 102 - depends on KVM_GENERIC_MMU_NOTIFIER 103 97 bool 104 98 105 99 config KVM_MMU_LOCKLESS_AGING 106 - depends on KVM_GENERIC_MMU_NOTIFIER 107 100 bool 108 101 109 102 config KVM_GENERIC_MEMORY_ATTRIBUTES 110 - depends on KVM_GENERIC_MMU_NOTIFIER 111 103 bool 112 104 113 105 config KVM_GUEST_MEMFD 114 - depends on KVM_GENERIC_MMU_NOTIFIER 115 106 select XARRAY_MULTI 116 107 bool 117 108
+1 -16
virt/kvm/kvm_main.c
··· 502 502 } 503 503 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_destroy_vcpus); 504 504 505 - #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER 506 505 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 507 506 { 508 507 return container_of(mn, struct kvm, mmu_notifier); ··· 900 901 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 901 902 } 902 903 903 - #else /* !CONFIG_KVM_GENERIC_MMU_NOTIFIER */ 904 - 905 - static int kvm_init_mmu_notifier(struct kvm *kvm) 906 - { 907 - return 0; 908 - } 909 - 910 - #endif /* CONFIG_KVM_GENERIC_MMU_NOTIFIER */ 911 - 912 904 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 913 905 static int kvm_pm_notifier_call(struct notifier_block *bl, 914 906 unsigned long state, ··· 1216 1226 out_err_no_debugfs: 1217 1227 kvm_coalesced_mmio_free(kvm); 1218 1228 out_no_coalesced_mmio: 1219 - #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER 1220 1229 if (kvm->mmu_notifier.ops) 1221 1230 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); 1222 - #endif 1223 1231 out_err_no_mmu_notifier: 1224 1232 kvm_disable_virtualization(); 1225 1233 out_err_no_disable: ··· 1280 1292 kvm->buses[i] = NULL; 1281 1293 } 1282 1294 kvm_coalesced_mmio_free(kvm); 1283 - #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER 1284 1295 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 1285 1296 /* 1286 1297 * At this point, pending calls to invalidate_range_start() ··· 1298 1311 kvm->mn_active_invalidate_count = 0; 1299 1312 else 1300 1313 WARN_ON(kvm->mmu_invalidate_in_progress); 1301 - #else 1302 - kvm_flush_shadow_all(kvm); 1303 - #endif 1304 1314 kvm_arch_destroy_vm(kvm); 1305 1315 kvm_destroy_devices(kvm); 1306 1316 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { ··· 4870 4886 static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 4871 4887 { 4872 4888 switch (arg) { 4889 + case KVM_CAP_SYNC_MMU: 4873 4890 case KVM_CAP_USER_MEMORY: 4874 4891 case KVM_CAP_USER_MEMORY2: 4875 4892 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: