Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch kvm-arm64/misc-6.20 into kvmarm-master/next

* kvm-arm64/misc-6.20:
: .
: Misc KVM/arm64 changes for 6.20
:
: - Trivial FPSIMD cleanups
:
: - Calculate hyp VA size only once, avoiding potential mapping issues when
: VA bits is smaller than expected
:
: - Silence sparse warning for the HYP stack base
:
: - Fix error checking when handling FFA_VERSION
:
: - Add missing trap configuration for DBGWCR15_EL1
:
: - Don't try to deal with nested S2 when NV isn't enabled for a guest
:
: - Various spelling fixes
: .
KVM: arm64: nv: Avoid NV stage-2 code when NV is not supported
KVM: arm64: Fix various comments
KVM: arm64: nv: Add trap config for DBGWCR<15>_EL1
KVM: arm64: Fix error checking for FFA_VERSION
KVM: arm64: Fix missing <asm/stackpage/nvhe.h> include
KVM: arm64: Calculate hyp VA size only once
KVM: arm64: Remove ISB after writing FPEXC32_EL2
KVM: arm64: Shuffle KVM_HOST_DATA_FLAG_* indices
KVM: arm64: Fix comment in fpsimd_lazy_switch_to_host()

Signed-off-by: Marc Zyngier <maz@kernel.org>

+62 -46
+6 -6
arch/arm64/include/asm/kvm_host.h
··· 201 201 * host to parse the guest S2. 202 202 * This either contains: 203 203 * - the virtual VTTBR programmed by the guest hypervisor with 204 - * CnP cleared 204 + * CnP cleared 205 205 * - The value 1 (VMID=0, BADDR=0, CnP=1) if invalid 206 206 * 207 207 * We also cache the full VTCR which gets used for TLB invalidation, ··· 734 734 struct kvm_host_data { 735 735 #define KVM_HOST_DATA_FLAG_HAS_SPE 0 736 736 #define KVM_HOST_DATA_FLAG_HAS_TRBE 1 737 - #define KVM_HOST_DATA_FLAG_TRBE_ENABLED 4 738 - #define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5 739 - #define KVM_HOST_DATA_FLAG_VCPU_IN_HYP_CONTEXT 6 740 - #define KVM_HOST_DATA_FLAG_L1_VNCR_MAPPED 7 741 - #define KVM_HOST_DATA_FLAG_HAS_BRBE 8 737 + #define KVM_HOST_DATA_FLAG_TRBE_ENABLED 2 738 + #define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 3 739 + #define KVM_HOST_DATA_FLAG_VCPU_IN_HYP_CONTEXT 4 740 + #define KVM_HOST_DATA_FLAG_L1_VNCR_MAPPED 5 741 + #define KVM_HOST_DATA_FLAG_HAS_BRBE 6 742 742 unsigned long flags; 743 743 744 744 struct kvm_cpu_context host_ctxt;
+2 -1
arch/arm64/include/asm/kvm_mmu.h
··· 103 103 void kvm_update_va_mask(struct alt_instr *alt, 104 104 __le32 *origptr, __le32 *updptr, int nr_inst); 105 105 void kvm_compute_layout(void); 106 + u32 kvm_hyp_va_bits(void); 106 107 void kvm_apply_hyp_relocations(void); 107 108 108 109 #define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset) ··· 186 185 187 186 phys_addr_t kvm_mmu_get_httbr(void); 188 187 phys_addr_t kvm_get_idmap_vector(void); 189 - int __init kvm_mmu_init(u32 *hyp_va_bits); 188 + int __init kvm_mmu_init(u32 hyp_va_bits); 190 189 191 190 static inline void *__kvm_vector_slot2addr(void *base, 192 191 enum arm64_hyp_spectre_vector slot)
+3 -2
arch/arm64/kvm/arm.c
··· 40 40 #include <asm/kvm_pkvm.h> 41 41 #include <asm/kvm_ptrauth.h> 42 42 #include <asm/sections.h> 43 + #include <asm/stacktrace/nvhe.h> 43 44 44 45 #include <kvm/arm_hypercalls.h> 45 46 #include <kvm/arm_pmu.h> ··· 2624 2623 /* Inits Hyp-mode on all online CPUs */ 2625 2624 static int __init init_hyp_mode(void) 2626 2625 { 2627 - u32 hyp_va_bits; 2626 + u32 hyp_va_bits = kvm_hyp_va_bits(); 2628 2627 int cpu; 2629 2628 int err = -ENOMEM; 2630 2629 ··· 2638 2637 /* 2639 2638 * Allocate Hyp PGD and setup Hyp identity mapping 2640 2639 */ 2641 - err = kvm_mmu_init(&hyp_va_bits); 2640 + err = kvm_mmu_init(hyp_va_bits); 2642 2641 if (err) 2643 2642 goto out_err; 2644 2643
+1
arch/arm64/kvm/emulate-nested.c
··· 1174 1174 SR_TRAP(SYS_DBGWCRn_EL1(12), CGT_MDCR_TDE_TDA), 1175 1175 SR_TRAP(SYS_DBGWCRn_EL1(13), CGT_MDCR_TDE_TDA), 1176 1176 SR_TRAP(SYS_DBGWCRn_EL1(14), CGT_MDCR_TDE_TDA), 1177 + SR_TRAP(SYS_DBGWCRn_EL1(15), CGT_MDCR_TDE_TDA), 1177 1178 SR_TRAP(SYS_DBGCLAIMSET_EL1, CGT_MDCR_TDE_TDA), 1178 1179 SR_TRAP(SYS_DBGCLAIMCLR_EL1, CGT_MDCR_TDE_TDA), 1179 1180 SR_TRAP(SYS_DBGAUTHSTATUS_EL1, CGT_MDCR_TDE_TDA),
+2 -4
arch/arm64/kvm/hyp/include/hyp/switch.h
··· 59 59 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to 60 60 * it will cause an exception. 61 61 */ 62 - if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) { 62 + if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) 63 63 write_sysreg(1 << 30, fpexc32_el2); 64 - isb(); 65 - } 66 64 } 67 65 68 66 static inline void __activate_cptr_traps_nvhe(struct kvm_vcpu *vcpu) ··· 493 495 /* 494 496 * When the guest owns the FP regs, we know that guest+hyp traps for 495 497 * any FPSIMD/SVE/SME features exposed to the guest have been disabled 496 - * by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd() 498 + * by either __activate_cptr_traps() or kvm_hyp_handle_fpsimd() 497 499 * prior to __guest_entry(). As __guest_entry() guarantees a context 498 500 * synchronization event, we don't need an ISB here to avoid taking 499 501 * traps for anything that was exposed to the guest.
+2 -2
arch/arm64/kvm/hyp/nvhe/ffa.c
··· 792 792 .a0 = FFA_VERSION, 793 793 .a1 = ffa_req_version, 794 794 }, res); 795 - if (res->a0 == FFA_RET_NOT_SUPPORTED) 795 + if ((s32)res->a0 == FFA_RET_NOT_SUPPORTED) 796 796 goto unlock; 797 797 798 798 hyp_ffa_version = ffa_req_version; ··· 943 943 .a0 = FFA_VERSION, 944 944 .a1 = FFA_VERSION_1_2, 945 945 }, &res); 946 - if (res.a0 == FFA_RET_NOT_SUPPORTED) 946 + if ((s32)res.a0 == FFA_RET_NOT_SUPPORTED) 947 947 return 0; 948 948 949 949 /*
+1 -1
arch/arm64/kvm/hyp/vhe/sysreg-sr.c
··· 205 205 206 206 /* 207 207 * When running a normal EL1 guest, we only load a new vcpu 208 - * after a context switch, which imvolves a DSB, so all 208 + * after a context switch, which involves a DSB, so all 209 209 * speculative EL1&0 walks will have already completed. 210 210 * If running NV, the vcpu may transition between vEL1 and 211 211 * vEL2 without a context switch, so make sure we complete
+4 -24
arch/arm64/kvm/mmu.c
··· 2294 2294 .virt_to_phys = kvm_host_pa, 2295 2295 }; 2296 2296 2297 - int __init kvm_mmu_init(u32 *hyp_va_bits) 2297 + int __init kvm_mmu_init(u32 hyp_va_bits) 2298 2298 { 2299 2299 int err; 2300 - u32 idmap_bits; 2301 - u32 kernel_bits; 2302 2300 2303 2301 hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start); 2304 2302 hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE); ··· 2310 2312 */ 2311 2313 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); 2312 2314 2313 - /* 2314 - * The ID map is always configured for 48 bits of translation, which 2315 - * may be fewer than the number of VA bits used by the regular kernel 2316 - * stage 1, when VA_BITS=52. 2317 - * 2318 - * At EL2, there is only one TTBR register, and we can't switch between 2319 - * translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom 2320 - * line: we need to use the extended range with *both* our translation 2321 - * tables. 2322 - * 2323 - * So use the maximum of the idmap VA bits and the regular kernel stage 2324 - * 1 VA bits to assure that the hypervisor can both ID map its code page 2325 - * and map any kernel memory. 2326 - */ 2327 - idmap_bits = IDMAP_VA_BITS; 2328 - kernel_bits = vabits_actual; 2329 - *hyp_va_bits = max(idmap_bits, kernel_bits); 2330 - 2331 - kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits); 2315 + kvm_debug("Using %u-bit virtual addresses at EL2\n", hyp_va_bits); 2332 2316 kvm_debug("IDMAP page: %lx\n", hyp_idmap_start); 2333 2317 kvm_debug("HYP VA range: %lx:%lx\n", 2334 2318 kern_hyp_va(PAGE_OFFSET), ··· 2335 2355 goto out; 2336 2356 } 2337 2357 2338 - err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops); 2358 + err = kvm_pgtable_hyp_init(hyp_pgtable, hyp_va_bits, &kvm_hyp_mm_ops); 2339 2359 if (err) 2340 2360 goto out_free_pgtable; 2341 2361 ··· 2344 2364 goto out_destroy_pgtable; 2345 2365 2346 2366 io_map_base = hyp_idmap_start; 2347 - __hyp_va_bits = *hyp_va_bits; 2367 + __hyp_va_bits = hyp_va_bits; 2348 2368 return 0; 2349 2369 2350 2370 out_destroy_pgtable:
+12
arch/arm64/kvm/nested.c
··· 1101 1101 1102 1102 lockdep_assert_held_write(&kvm->mmu_lock); 1103 1103 1104 + if (!kvm->arch.nested_mmus_size) 1105 + return; 1106 + 1104 1107 for (i = 0; i < kvm->arch.nested_mmus_size; i++) { 1105 1108 struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i]; 1106 1109 ··· 1119 1116 int i; 1120 1117 1121 1118 lockdep_assert_held_write(&kvm->mmu_lock); 1119 + 1120 + if (!kvm->arch.nested_mmus_size) 1121 + return; 1122 1122 1123 1123 for (i = 0; i < kvm->arch.nested_mmus_size; i++) { 1124 1124 struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i]; ··· 1139 1133 1140 1134 lockdep_assert_held_write(&kvm->mmu_lock); 1141 1135 1136 + if (!kvm->arch.nested_mmus_size) 1137 + return; 1138 + 1142 1139 for (i = 0; i < kvm->arch.nested_mmus_size; i++) { 1143 1140 struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i]; 1144 1141 ··· 1153 1144 void kvm_arch_flush_shadow_all(struct kvm *kvm) 1154 1145 { 1155 1146 int i; 1147 + 1148 + if (!kvm->arch.nested_mmus_size) 1149 + return; 1156 1150 1157 1151 for (i = 0; i < kvm->arch.nested_mmus_size; i++) { 1158 1152 struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+28 -5
arch/arm64/kvm/va_layout.c
··· 47 47 } 48 48 49 49 /* 50 + * Calculate the actual VA size used by the hypervisor 51 + */ 52 + __init u32 kvm_hyp_va_bits(void) 53 + { 54 + /* 55 + * The ID map is always configured for 48 bits of translation, which may 56 + * be different from the number of VA bits used by the regular kernel 57 + * stage 1. 58 + * 59 + * At EL2, there is only one TTBR register, and we can't switch between 60 + * translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom 61 + * line: we need to use the extended range with *both* our translation 62 + * tables. 63 + * 64 + * So use the maximum of the idmap VA bits and the regular kernel stage 65 + * 1 VA bits as the hypervisor VA size to assure that the hypervisor can 66 + * both ID map its code page and map any kernel memory. 67 + */ 68 + return max(IDMAP_VA_BITS, vabits_actual); 69 + } 70 + 71 + /* 50 72 * We want to generate a hyp VA with the following format (with V == 51 - * vabits_actual): 73 + * hypervisor VA bits): 52 74 * 53 75 * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0 54 76 * --------------------------------------------------------- ··· 83 61 { 84 62 phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start); 85 63 u64 hyp_va_msb; 64 + u32 hyp_va_bits = kvm_hyp_va_bits(); 86 65 87 66 /* Where is my RAM region? */ 88 - hyp_va_msb = idmap_addr & BIT(vabits_actual - 1); 89 - hyp_va_msb ^= BIT(vabits_actual - 1); 67 + hyp_va_msb = idmap_addr & BIT(hyp_va_bits - 1); 68 + hyp_va_msb ^= BIT(hyp_va_bits - 1); 90 69 91 70 tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^ 92 71 (u64)(high_memory - 1)); ··· 95 72 va_mask = GENMASK_ULL(tag_lsb - 1, 0); 96 73 tag_val = hyp_va_msb; 97 74 98 - if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits_actual - 1)) { 75 + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (hyp_va_bits - 1)) { 99 76 /* We have some free bits to insert a random tag. */ 100 - tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb); 77 + tag_val |= get_random_long() & GENMASK_ULL(hyp_va_bits - 2, tag_lsb); 101 78 } 102 79 tag_val >>= tag_lsb; 103 80
+1 -1
arch/arm64/kvm/vgic/vgic-v3-nested.c
··· 57 57 * as the L1 guest is in charge of provisioning the interrupts via its own 58 58 * view of the ICH_LR*_EL2 registers, which conveniently live in the VNCR 59 59 * page. This means that the flow described above does work (there is no 60 - * state to rebuild in the L0 hypervisor), and that most things happed on L2 60 + * state to rebuild in the L0 hypervisor), and that most things happen on L2 61 61 * load/put: 62 62 * 63 63 * - on L2 load: move the in-memory L1 vGIC configuration into a shadow,