Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
"ARM:

- Take care of faults occuring between the PARange and IPA range by
injecting an exception

- Fix S2 faults taken from a host EL0 in protected mode

- Work around Oops caused by a PMU access from a 32bit guest when PMU
has been created. This is a temporary bodge until we fix it for
good.

x86:

- Fix potential races when walking host page table

- Fix shadow page table leak when KVM runs nested

- Work around bug in userspace when KVM synthesizes leaf 0x80000021
on older (pre-EPYC) or Intel processors

Generic (but affects only RISC-V):

- Fix bad user ABI for KVM_EXIT_SYSTEM_EVENT"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: x86: work around QEMU issue with synthetic CPUID leaves
Revert "x86/mm: Introduce lookup_address_in_mm()"
KVM: x86/mmu: fix potential races when walking host page table
KVM: fix bad user ABI for KVM_EXIT_SYSTEM_EVENT
KVM: x86/mmu: Do not create SPTEs for GFNs that exceed host.MAXPHYADDR
KVM: arm64: Inject exception on out-of-IPA-range translation fault
KVM/arm64: Don't emulate a PMU for 32-bit guests if feature not set
KVM: arm64: Handle host stage-2 faults from 32-bit EL0

+213 -61
+17 -7
Documentation/virt/kvm/api.rst
··· 5986 5986 #define KVM_SYSTEM_EVENT_RESET 2 5987 5987 #define KVM_SYSTEM_EVENT_CRASH 3 5988 5988 __u32 type; 5989 - __u64 flags; 5989 + __u32 ndata; 5990 + __u64 data[16]; 5990 5991 } system_event; 5991 5992 5992 5993 If exit_reason is KVM_EXIT_SYSTEM_EVENT then the vcpu has triggered 5993 5994 a system-level event using some architecture specific mechanism (hypercall 5994 5995 or some special instruction). In case of ARM64, this is triggered using 5995 - HVC instruction based PSCI call from the vcpu. The 'type' field describes 5996 - the system-level event type. The 'flags' field describes architecture 5997 - specific flags for the system-level event. 5996 + HVC instruction based PSCI call from the vcpu. 5998 5997 5998 + The 'type' field describes the system-level event type. 5999 5999 Valid values for 'type' are: 6000 6000 6001 6001 - KVM_SYSTEM_EVENT_SHUTDOWN -- the guest has requested a shutdown of the ··· 6010 6010 to ignore the request, or to gather VM memory core dump and/or 6011 6011 reset/shutdown of the VM. 6012 6012 6013 - Valid flags are: 6013 + If KVM_CAP_SYSTEM_EVENT_DATA is present, the 'data' field can contain 6014 + architecture specific information for the system-level event. Only 6015 + the first `ndata` items (possibly zero) of the data array are valid. 6014 6016 6015 - - KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2 (arm64 only) -- the guest issued 6016 - a SYSTEM_RESET2 call according to v1.1 of the PSCI specification. 6017 + - for arm64, data[0] is set to KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2 if 6018 + the guest issued a SYSTEM_RESET2 call according to v1.1 of the PSCI 6019 + specification. 6020 + 6021 + - for RISC-V, data[0] is set to the value of the second argument of the 6022 + ``sbi_system_reset`` call. 6023 + 6024 + Previous versions of Linux defined a `flags` member in this struct. The 6025 + field is now aliased to `data[0]`. Userspace can assume that it is only 6026 + written if ndata is greater than 0. 6017 6027 6018 6028 :: 6019 6029
+1
arch/arm64/include/asm/kvm_emulate.h
··· 40 40 void kvm_inject_vabt(struct kvm_vcpu *vcpu); 41 41 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 42 42 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); 43 + void kvm_inject_size_fault(struct kvm_vcpu *vcpu); 43 44 44 45 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu); 45 46
+8 -8
arch/arm64/kvm/hyp/nvhe/host.S
··· 198 198 invalid_host_el2_vect // FIQ EL2h 199 199 invalid_host_el2_vect // Error EL2h 200 200 201 - host_el1_sync_vect // Synchronous 64-bit EL1 202 - invalid_host_el1_vect // IRQ 64-bit EL1 203 - invalid_host_el1_vect // FIQ 64-bit EL1 204 - invalid_host_el1_vect // Error 64-bit EL1 201 + host_el1_sync_vect // Synchronous 64-bit EL1/EL0 202 + invalid_host_el1_vect // IRQ 64-bit EL1/EL0 203 + invalid_host_el1_vect // FIQ 64-bit EL1/EL0 204 + invalid_host_el1_vect // Error 64-bit EL1/EL0 205 205 206 - invalid_host_el1_vect // Synchronous 32-bit EL1 207 - invalid_host_el1_vect // IRQ 32-bit EL1 208 - invalid_host_el1_vect // FIQ 32-bit EL1 209 - invalid_host_el1_vect // Error 32-bit EL1 206 + host_el1_sync_vect // Synchronous 32-bit EL1/EL0 207 + invalid_host_el1_vect // IRQ 32-bit EL1/EL0 208 + invalid_host_el1_vect // FIQ 32-bit EL1/EL0 209 + invalid_host_el1_vect // Error 32-bit EL1/EL0 210 210 SYM_CODE_END(__kvm_hyp_host_vector) 211 211 212 212 /*
+28
arch/arm64/kvm/inject_fault.c
··· 145 145 inject_abt64(vcpu, true, addr); 146 146 } 147 147 148 + void kvm_inject_size_fault(struct kvm_vcpu *vcpu) 149 + { 150 + unsigned long addr, esr; 151 + 152 + addr = kvm_vcpu_get_fault_ipa(vcpu); 153 + addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); 154 + 155 + if (kvm_vcpu_trap_is_iabt(vcpu)) 156 + kvm_inject_pabt(vcpu, addr); 157 + else 158 + kvm_inject_dabt(vcpu, addr); 159 + 160 + /* 161 + * If AArch64 or LPAE, set FSC to 0 to indicate an Address 162 + * Size Fault at level 0, as if exceeding PARange. 163 + * 164 + * Non-LPAE guests will only get the external abort, as there 165 + * is no way to to describe the ASF. 166 + */ 167 + if (vcpu_el1_is_32bit(vcpu) && 168 + !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE)) 169 + return; 170 + 171 + esr = vcpu_read_sys_reg(vcpu, ESR_EL1); 172 + esr &= ~GENMASK_ULL(5, 0); 173 + vcpu_write_sys_reg(vcpu, esr, ESR_EL1); 174 + } 175 + 148 176 /** 149 177 * kvm_inject_undefined - inject an undefined instruction into the guest 150 178 * @vcpu: The vCPU in which to inject the exception
+19
arch/arm64/kvm/mmu.c
··· 1337 1337 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); 1338 1338 is_iabt = kvm_vcpu_trap_is_iabt(vcpu); 1339 1339 1340 + if (fault_status == FSC_FAULT) { 1341 + /* Beyond sanitised PARange (which is the IPA limit) */ 1342 + if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) { 1343 + kvm_inject_size_fault(vcpu); 1344 + return 1; 1345 + } 1346 + 1347 + /* Falls between the IPA range and the PARange? */ 1348 + if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) { 1349 + fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); 1350 + 1351 + if (is_iabt) 1352 + kvm_inject_pabt(vcpu, fault_ipa); 1353 + else 1354 + kvm_inject_dabt(vcpu, fault_ipa); 1355 + return 1; 1356 + } 1357 + } 1358 + 1340 1359 /* Synchronous External Abort? */ 1341 1360 if (kvm_vcpu_abt_issea(vcpu)) { 1342 1361 /*
+22 -1
arch/arm64/kvm/pmu-emul.c
··· 177 177 struct kvm_pmu *pmu = &vcpu->arch.pmu; 178 178 struct kvm_pmc *pmc = &pmu->pmc[select_idx]; 179 179 180 + if (!kvm_vcpu_has_pmu(vcpu)) 181 + return 0; 182 + 180 183 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); 181 184 182 185 if (kvm_pmu_pmc_is_chained(pmc) && ··· 200 197 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) 201 198 { 202 199 u64 reg; 200 + 201 + if (!kvm_vcpu_has_pmu(vcpu)) 202 + return; 203 203 204 204 reg = (select_idx == ARMV8_PMU_CYCLE_IDX) 205 205 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; ··· 328 322 struct kvm_pmu *pmu = &vcpu->arch.pmu; 329 323 struct kvm_pmc *pmc; 330 324 325 + if (!kvm_vcpu_has_pmu(vcpu)) 326 + return; 327 + 331 328 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) 332 329 return; 333 330 ··· 366 357 struct kvm_pmu *pmu = &vcpu->arch.pmu; 367 358 struct kvm_pmc *pmc; 368 359 369 - if (!val) 360 + if (!kvm_vcpu_has_pmu(vcpu) || !val) 370 361 return; 371 362 372 363 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { ··· 536 527 struct kvm_pmu *pmu = &vcpu->arch.pmu; 537 528 int i; 538 529 530 + if (!kvm_vcpu_has_pmu(vcpu)) 531 + return; 532 + 539 533 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) 540 534 return; 541 535 ··· 587 575 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) 588 576 { 589 577 int i; 578 + 579 + if (!kvm_vcpu_has_pmu(vcpu)) 580 + return; 590 581 591 582 if (val & ARMV8_PMU_PMCR_E) { 592 583 kvm_pmu_enable_counter_mask(vcpu, ··· 754 739 { 755 740 u64 reg, mask; 756 741 742 + if (!kvm_vcpu_has_pmu(vcpu)) 743 + return; 744 + 757 745 mask = ARMV8_PMU_EVTYPE_MASK; 758 746 mask &= ~ARMV8_PMU_EVTYPE_EVENT; 759 747 mask |= kvm_pmu_event_mask(vcpu->kvm); ··· 844 826 unsigned long *bmap = vcpu->kvm->arch.pmu_filter; 845 827 u64 val, mask = 0; 846 828 int base, i, nr_events; 829 + 830 + if (!kvm_vcpu_has_pmu(vcpu)) 831 + return 0; 847 832 848 833 if (!pmceid1) { 849 834 val = read_sysreg(pmceid0_el0);
+2 -1
arch/arm64/kvm/psci.c
··· 181 181 182 182 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); 183 183 vcpu->run->system_event.type = type; 184 - vcpu->run->system_event.flags = flags; 184 + vcpu->run->system_event.ndata = 1; 185 + vcpu->run->system_event.data[0] = flags; 185 186 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 186 187 } 187 188
+3 -2
arch/riscv/kvm/vcpu_sbi.c
··· 83 83 84 84 void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, 85 85 struct kvm_run *run, 86 - u32 type, u64 flags) 86 + u32 type, u64 reason) 87 87 { 88 88 unsigned long i; 89 89 struct kvm_vcpu *tmp; ··· 94 94 95 95 memset(&run->system_event, 0, sizeof(run->system_event)); 96 96 run->system_event.type = type; 97 - run->system_event.flags = flags; 97 + run->system_event.ndata = 1; 98 + run->system_event.data[0] = reason; 98 99 run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 99 100 } 100 101
-4
arch/x86/include/asm/pgtable_types.h
··· 559 559 extern pte_t *lookup_address(unsigned long address, unsigned int *level); 560 560 extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, 561 561 unsigned int *level); 562 - 563 - struct mm_struct; 564 - extern pte_t *lookup_address_in_mm(struct mm_struct *mm, unsigned long address, 565 - unsigned int *level); 566 562 extern pmd_t *lookup_pmd_address(unsigned long address); 567 563 extern phys_addr_t slow_virt_to_phys(void *__address); 568 564 extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
+14 -5
arch/x86/kvm/cpuid.c
··· 1085 1085 case 0x80000000: 1086 1086 entry->eax = min(entry->eax, 0x80000021); 1087 1087 /* 1088 - * Serializing LFENCE is reported in a multitude of ways, 1089 - * and NullSegClearsBase is not reported in CPUID on Zen2; 1090 - * help userspace by providing the CPUID leaf ourselves. 1088 + * Serializing LFENCE is reported in a multitude of ways, and 1089 + * NullSegClearsBase is not reported in CPUID on Zen2; help 1090 + * userspace by providing the CPUID leaf ourselves. 1091 + * 1092 + * However, only do it if the host has CPUID leaf 0x8000001d. 1093 + * QEMU thinks that it can query the host blindly for that 1094 + * CPUID leaf if KVM reports that it supports 0x8000001d or 1095 + * above. The processor merrily returns values from the 1096 + * highest Intel leaf which QEMU tries to use as the guest's 1097 + * 0x8000001d. Even worse, this can result in an infinite 1098 + * loop if said highest leaf has no subleaves indexed by ECX. 1091 1099 */ 1092 - if (static_cpu_has(X86_FEATURE_LFENCE_RDTSC) 1093 - || !static_cpu_has_bug(X86_BUG_NULL_SEG)) 1100 + if (entry->eax >= 0x8000001d && 1101 + (static_cpu_has(X86_FEATURE_LFENCE_RDTSC) 1102 + || !static_cpu_has_bug(X86_BUG_NULL_SEG))) 1094 1103 entry->eax = max(entry->eax, 0x80000021); 1095 1104 break; 1096 1105 case 0x80000001:
+24
arch/x86/kvm/mmu.h
··· 65 65 return ((2ULL << (e - s)) - 1) << s; 66 66 } 67 67 68 + /* 69 + * The number of non-reserved physical address bits irrespective of features 70 + * that repurpose legal bits, e.g. MKTME. 71 + */ 72 + extern u8 __read_mostly shadow_phys_bits; 73 + 74 + static inline gfn_t kvm_mmu_max_gfn(void) 75 + { 76 + /* 77 + * Note that this uses the host MAXPHYADDR, not the guest's. 78 + * EPT/NPT cannot support GPAs that would exceed host.MAXPHYADDR; 79 + * assuming KVM is running on bare metal, guest accesses beyond 80 + * host.MAXPHYADDR will hit a #PF(RSVD) and never cause a vmexit 81 + * (either EPT Violation/Misconfig or #NPF), and so KVM will never 82 + * install a SPTE for such addresses. If KVM is running as a VM 83 + * itself, on the other hand, it might see a MAXPHYADDR that is less 84 + * than hardware's real MAXPHYADDR. Using the host MAXPHYADDR 85 + * disallows such SPTEs entirely and simplifies the TDP MMU. 86 + */ 87 + int max_gpa_bits = likely(tdp_enabled) ? shadow_phys_bits : 52; 88 + 89 + return (1ULL << (max_gpa_bits - PAGE_SHIFT)) - 1; 90 + } 91 + 68 92 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask); 69 93 void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only); 70 94
+50 -7
arch/x86/kvm/mmu/mmu.c
··· 2804 2804 const struct kvm_memory_slot *slot) 2805 2805 { 2806 2806 unsigned long hva; 2807 - pte_t *pte; 2808 - int level; 2807 + unsigned long flags; 2808 + int level = PG_LEVEL_4K; 2809 + pgd_t pgd; 2810 + p4d_t p4d; 2811 + pud_t pud; 2812 + pmd_t pmd; 2809 2813 2810 2814 if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn)) 2811 2815 return PG_LEVEL_4K; ··· 2824 2820 */ 2825 2821 hva = __gfn_to_hva_memslot(slot, gfn); 2826 2822 2827 - pte = lookup_address_in_mm(kvm->mm, hva, &level); 2828 - if (unlikely(!pte)) 2829 - return PG_LEVEL_4K; 2823 + /* 2824 + * Lookup the mapping level in the current mm. The information 2825 + * may become stale soon, but it is safe to use as long as 2826 + * 1) mmu_notifier_retry was checked after taking mmu_lock, and 2827 + * 2) mmu_lock is taken now. 2828 + * 2829 + * We still need to disable IRQs to prevent concurrent tear down 2830 + * of page tables. 2831 + */ 2832 + local_irq_save(flags); 2830 2833 2834 + pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); 2835 + if (pgd_none(pgd)) 2836 + goto out; 2837 + 2838 + p4d = READ_ONCE(*p4d_offset(&pgd, hva)); 2839 + if (p4d_none(p4d) || !p4d_present(p4d)) 2840 + goto out; 2841 + 2842 + pud = READ_ONCE(*pud_offset(&p4d, hva)); 2843 + if (pud_none(pud) || !pud_present(pud)) 2844 + goto out; 2845 + 2846 + if (pud_large(pud)) { 2847 + level = PG_LEVEL_1G; 2848 + goto out; 2849 + } 2850 + 2851 + pmd = READ_ONCE(*pmd_offset(&pud, hva)); 2852 + if (pmd_none(pmd) || !pmd_present(pmd)) 2853 + goto out; 2854 + 2855 + if (pmd_large(pmd)) 2856 + level = PG_LEVEL_2M; 2857 + 2858 + out: 2859 + local_irq_restore(flags); 2831 2860 return level; 2832 2861 } 2833 2862 ··· 3029 2992 /* 3030 2993 * If MMIO caching is disabled, emulate immediately without 3031 2994 * touching the shadow page tables as attempting to install an 3032 - * MMIO SPTE will just be an expensive nop. 2995 + * MMIO SPTE will just be an expensive nop. Do not cache MMIO 2996 + * whose gfn is greater than host.MAXPHYADDR, any guest that 2997 + * generates such gfns is running nested and is being tricked 2998 + * by L0 userspace (you can observe gfn > L1.MAXPHYADDR if 2999 + * and only if L1's MAXPHYADDR is inaccurate with respect to 3000 + * the hardware's). 3033 3001 */ 3034 - if (unlikely(!shadow_mmio_value)) { 3002 + if (unlikely(!shadow_mmio_value) || 3003 + unlikely(fault->gfn > kvm_mmu_max_gfn())) { 3035 3004 *ret_val = RET_PF_EMULATE; 3036 3005 return true; 3037 3006 }
-6
arch/x86/kvm/mmu/spte.h
··· 201 201 */ 202 202 extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; 203 203 204 - /* 205 - * The number of non-reserved physical address bits irrespective of features 206 - * that repurpose legal bits, e.g. MKTME. 207 - */ 208 - extern u8 __read_mostly shadow_phys_bits; 209 - 210 204 static inline bool is_mmio_spte(u64 spte) 211 205 { 212 206 return (spte & shadow_mmio_mask) == shadow_mmio_value &&
+8 -7
arch/x86/kvm/mmu/tdp_mmu.c
··· 815 815 return iter->yielded; 816 816 } 817 817 818 - static inline gfn_t tdp_mmu_max_gfn_host(void) 818 + static inline gfn_t tdp_mmu_max_gfn_exclusive(void) 819 819 { 820 820 /* 821 - * Bound TDP MMU walks at host.MAXPHYADDR, guest accesses beyond that 822 - * will hit a #PF(RSVD) and never hit an EPT Violation/Misconfig / #NPF, 823 - * and so KVM will never install a SPTE for such addresses. 821 + * Bound TDP MMU walks at host.MAXPHYADDR. KVM disallows memslots with 822 + * a gpa range that would exceed the max gfn, and KVM does not create 823 + * MMIO SPTEs for "impossible" gfns, instead sending such accesses down 824 + * the slow emulation path every time. 824 825 */ 825 - return 1ULL << (shadow_phys_bits - PAGE_SHIFT); 826 + return kvm_mmu_max_gfn() + 1; 826 827 } 827 828 828 829 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, ··· 831 830 { 832 831 struct tdp_iter iter; 833 832 834 - gfn_t end = tdp_mmu_max_gfn_host(); 833 + gfn_t end = tdp_mmu_max_gfn_exclusive(); 835 834 gfn_t start = 0; 836 835 837 836 for_each_tdp_pte_min_level(iter, root, zap_level, start, end) { ··· 924 923 { 925 924 struct tdp_iter iter; 926 925 927 - end = min(end, tdp_mmu_max_gfn_host()); 926 + end = min(end, tdp_mmu_max_gfn_exclusive()); 928 927 929 928 lockdep_assert_held_write(&kvm->mmu_lock); 930 929
+7 -1
arch/x86/kvm/x86.c
··· 10020 10020 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { 10021 10021 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 10022 10022 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; 10023 + vcpu->run->system_event.ndata = 0; 10023 10024 r = 0; 10024 10025 goto out; 10025 10026 } 10026 10027 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) { 10027 10028 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 10028 10029 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; 10030 + vcpu->run->system_event.ndata = 0; 10029 10031 r = 0; 10030 10032 goto out; 10031 10033 } ··· 12011 12009 struct kvm_memory_slot *new, 12012 12010 enum kvm_mr_change change) 12013 12011 { 12014 - if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) 12012 + if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) { 12013 + if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn()) 12014 + return -EINVAL; 12015 + 12015 12016 return kvm_alloc_memslot_metadata(kvm, new); 12017 + } 12016 12018 12017 12019 if (change == KVM_MR_FLAGS_ONLY) 12018 12020 memcpy(&new->arch, &old->arch, sizeof(old->arch));
-11
arch/x86/mm/pat/set_memory.c
··· 638 638 } 639 639 EXPORT_SYMBOL_GPL(lookup_address); 640 640 641 - /* 642 - * Lookup the page table entry for a virtual address in a given mm. Return a 643 - * pointer to the entry and the level of the mapping. 644 - */ 645 - pte_t *lookup_address_in_mm(struct mm_struct *mm, unsigned long address, 646 - unsigned int *level) 647 - { 648 - return lookup_address_in_pgd(pgd_offset(mm, address), address, level); 649 - } 650 - EXPORT_SYMBOL_GPL(lookup_address_in_mm); 651 - 652 641 static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, 653 642 unsigned int *level) 654 643 {
+9 -1
include/uapi/linux/kvm.h
··· 445 445 #define KVM_SYSTEM_EVENT_RESET 2 446 446 #define KVM_SYSTEM_EVENT_CRASH 3 447 447 __u32 type; 448 - __u64 flags; 448 + __u32 ndata; 449 + union { 450 + #ifndef __KERNEL__ 451 + __u64 flags; 452 + #endif 453 + __u64 data[16]; 454 + }; 449 455 } system_event; 450 456 /* KVM_EXIT_S390_STSI */ 451 457 struct { ··· 1150 1144 #define KVM_CAP_S390_MEM_OP_EXTENSION 211 1151 1145 #define KVM_CAP_PMU_CAPABILITY 212 1152 1146 #define KVM_CAP_DISABLE_QUIRKS2 213 1147 + /* #define KVM_CAP_VM_TSC_CONTROL 214 */ 1148 + #define KVM_CAP_SYSTEM_EVENT_DATA 215 1153 1149 1154 1150 #ifdef KVM_CAP_IRQ_ROUTING 1155 1151
+1
virt/kvm/kvm_main.c
··· 4354 4354 return 0; 4355 4355 #endif 4356 4356 case KVM_CAP_BINARY_STATS_FD: 4357 + case KVM_CAP_SYSTEM_EVENT_DATA: 4357 4358 return 1; 4358 4359 default: 4359 4360 break;