Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'kvmarm-fixes-7.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 7.1, take #1

- Allow tracing for non-pKVM, which was accidentally disabled when
the series was merged

- Rationalise the way the pKVM hypercall ranges are defined by using
the same mechanism as already used for the vcpu_sysreg enum

- Enforce that SMCCC function numbers relayed by the pKVM proxy are
actually compliant with the specification

- Fix a couple of feature to idreg mappings which resulted in the
wrong sanitisation being applied

- Fix the GICD_IIDR revision number field that could never been
written correctly by userspace

- Make kvm_vcpu_initialized() correctly use its parameter instead
of relying on the surrounding context

- Enforce correct ordering in __pkvm_init_vcpu(), plugging a
potential pin leak at the same time

- Move __pkvm_init_finalise() to a less dangerous spot, avoiding
future problems

- Restore functional userspace irqchip support after a four year
breakage (last functional kernel was 5.18...). This is obviously
ripe for garbage collection.

- ... and the usual lot of spelling fixes

+87 -53
+18 -10
arch/arm64/include/asm/kvm_asm.h
··· 50 50 51 51 #include <linux/mm.h> 52 52 53 + #define MARKER(m) \ 54 + m, __after_##m = m - 1 55 + 53 56 enum __kvm_host_smccc_func { 54 57 /* Hypercalls that are unavailable once pKVM has finalised. */ 55 58 /* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */ ··· 62 59 __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs, 63 60 __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs, 64 61 __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config, 62 + 63 + MARKER(__KVM_HOST_SMCCC_FUNC_MIN_PKVM), 64 + 65 65 __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize, 66 - __KVM_HOST_SMCCC_FUNC_MIN_PKVM = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize, 67 66 68 67 /* Hypercalls that are always available and common to [nh]VHE/pKVM. */ 69 68 __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc, ··· 77 72 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range, 78 73 __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context, 79 74 __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff, 75 + __KVM_HOST_SMCCC_FUNC___tracing_load, 76 + __KVM_HOST_SMCCC_FUNC___tracing_unload, 77 + __KVM_HOST_SMCCC_FUNC___tracing_enable, 78 + __KVM_HOST_SMCCC_FUNC___tracing_swap_reader, 79 + __KVM_HOST_SMCCC_FUNC___tracing_update_clock, 80 + __KVM_HOST_SMCCC_FUNC___tracing_reset, 81 + __KVM_HOST_SMCCC_FUNC___tracing_enable_event, 82 + __KVM_HOST_SMCCC_FUNC___tracing_write_event, 80 83 __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs, 81 84 __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs, 82 85 __KVM_HOST_SMCCC_FUNC___vgic_v5_save_apr, 83 86 __KVM_HOST_SMCCC_FUNC___vgic_v5_restore_vmcr_apr, 84 - __KVM_HOST_SMCCC_FUNC_MAX_NO_PKVM = __KVM_HOST_SMCCC_FUNC___vgic_v5_restore_vmcr_apr, 87 + 88 + MARKER(__KVM_HOST_SMCCC_FUNC_PKVM_ONLY), 85 89 86 90 /* Hypercalls that are available only when pKVM has finalised. */ 87 91 __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp, ··· 114 100 __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load, 115 101 __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put, 116 102 __KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid, 117 - __KVM_HOST_SMCCC_FUNC___tracing_load, 118 - __KVM_HOST_SMCCC_FUNC___tracing_unload, 119 - __KVM_HOST_SMCCC_FUNC___tracing_enable, 120 - __KVM_HOST_SMCCC_FUNC___tracing_swap_reader, 121 - __KVM_HOST_SMCCC_FUNC___tracing_update_clock, 122 - __KVM_HOST_SMCCC_FUNC___tracing_reset, 123 - __KVM_HOST_SMCCC_FUNC___tracing_enable_event, 124 - __KVM_HOST_SMCCC_FUNC___tracing_write_event, 103 + 104 + MARKER(__KVM_HOST_SMCCC_FUNC_MAX) 125 105 }; 126 106 127 107 #define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
+1 -4
arch/arm64/include/asm/kvm_host.h
··· 450 450 r = __VNCR_START__ + ((VNCR_ ## r) / 8), \ 451 451 __after_##r = __MAX__(__before_##r - 1, r) 452 452 453 - #define MARKER(m) \ 454 - m, __after_##m = m - 1 455 - 456 453 enum vcpu_sysreg { 457 454 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */ 458 455 MPIDR_EL1, /* MultiProcessor Affinity Register */ ··· 1545 1548 #define kvm_vcpu_has_feature(k, f) __vcpu_has_feature(&(k)->arch, (f)) 1546 1549 #define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f)) 1547 1550 1548 - #define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED) 1551 + #define kvm_vcpu_initialized(v) vcpu_get_flag(v, VCPU_INITIALIZED) 1549 1552 1550 1553 int kvm_trng_call(struct kvm_vcpu *vcpu); 1551 1554 #ifdef CONFIG_KVM
+4
arch/arm64/kvm/arm.c
··· 824 824 { 825 825 bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF | HCR_VSE); 826 826 827 + irq_lines |= (!irqchip_in_kernel(v->kvm) && 828 + (kvm_timer_should_notify_user(v) || 829 + kvm_pmu_should_notify_user(v))); 830 + 827 831 return ((irq_lines || kvm_vgic_vcpu_pending_irq(v)) 828 832 && !kvm_arm_vcpu_stopped(v) && !v->arch.pause); 829 833 }
+16 -7
arch/arm64/kvm/config.c
··· 131 131 } 132 132 133 133 #define FEAT_SPE ID_AA64DFR0_EL1, PMSVer, IMP 134 - #define FEAT_SPE_FnE ID_AA64DFR0_EL1, PMSVer, V1P2 135 134 #define FEAT_BRBE ID_AA64DFR0_EL1, BRBE, IMP 136 135 #define FEAT_TRC_SR ID_AA64DFR0_EL1, TraceVer, IMP 137 136 #define FEAT_PMUv3 ID_AA64DFR0_EL1, PMUVer, IMP ··· 191 192 #define FEAT_SRMASK ID_AA64MMFR4_EL1, SRMASK, IMP 192 193 #define FEAT_PoPS ID_AA64MMFR4_EL1, PoPS, IMP 193 194 #define FEAT_PFAR ID_AA64PFR1_EL1, PFAR, IMP 194 - #define FEAT_Debugv8p9 ID_AA64DFR0_EL1, PMUVer, V3P9 195 + #define FEAT_Debugv8p9 ID_AA64DFR0_EL1, DebugVer, V8P9 195 196 #define FEAT_PMUv3_SS ID_AA64DFR0_EL1, PMSS, IMP 196 197 #define FEAT_SEBEP ID_AA64DFR0_EL1, SEBEP, IMP 197 198 #define FEAT_EBEP ID_AA64DFR1_EL1, EBEP, IMP ··· 282 283 static bool feat_sme_smps(struct kvm *kvm) 283 284 { 284 285 /* 285 - * Revists this if KVM ever supports SME -- this really should 286 + * Revisit this if KVM ever supports SME -- this really should 286 287 * look at the guest's view of SMIDR_EL1. Funnily enough, this 287 288 * is not captured in the JSON file, but only as a note in the 288 289 * ARM ARM. ··· 294 295 static bool feat_spe_fds(struct kvm *kvm) 295 296 { 296 297 /* 297 - * Revists this if KVM ever supports SPE -- this really should 298 + * Revisit this if KVM ever supports SPE -- this really should 298 299 * look at the guest's view of PMSIDR_EL1. 299 300 */ 300 301 return (kvm_has_feat(kvm, FEAT_SPEv1p4) && 301 302 (read_sysreg_s(SYS_PMSIDR_EL1) & PMSIDR_EL1_FDS)); 302 303 } 303 304 305 + static bool feat_spe_fne(struct kvm *kvm) 306 + { 307 + /* 308 + * Revisit this if KVM ever supports SPE -- this really should 309 + * look at the guest's view of PMSIDR_EL1. 310 + */ 311 + return (kvm_has_feat(kvm, FEAT_SPEv1p2) && 312 + (read_sysreg_s(SYS_PMSIDR_EL1) & PMSIDR_EL1_FnE)); 313 + } 314 + 304 315 static bool feat_trbe_mpam(struct kvm *kvm) 305 316 { 306 317 /* 307 - * Revists this if KVM ever supports both MPAM and TRBE -- 318 + * Revisit this if KVM ever supports both MPAM and TRBE -- 308 319 * this really should look at the guest's view of TRBIDR_EL1. 309 320 */ 310 321 return (kvm_has_feat(kvm, FEAT_TRBE) && ··· 546 537 HDFGRTR_EL2_PMBPTR_EL1 | 547 538 HDFGRTR_EL2_PMBLIMITR_EL1, 548 539 FEAT_SPE), 549 - NEEDS_FEAT(HDFGRTR_EL2_nPMSNEVFR_EL1, FEAT_SPE_FnE), 540 + NEEDS_FEAT(HDFGRTR_EL2_nPMSNEVFR_EL1, feat_spe_fne), 550 541 NEEDS_FEAT(HDFGRTR_EL2_nBRBDATA | 551 542 HDFGRTR_EL2_nBRBCTL | 552 543 HDFGRTR_EL2_nBRBIDR, ··· 614 605 HDFGWTR_EL2_PMBPTR_EL1 | 615 606 HDFGWTR_EL2_PMBLIMITR_EL1, 616 607 FEAT_SPE), 617 - NEEDS_FEAT(HDFGWTR_EL2_nPMSNEVFR_EL1, FEAT_SPE_FnE), 608 + NEEDS_FEAT(HDFGWTR_EL2_nPMSNEVFR_EL1, feat_spe_fne), 618 609 NEEDS_FEAT(HDFGWTR_EL2_nBRBDATA | 619 610 HDFGWTR_EL2_nBRBCTL, 620 611 FEAT_BRBE),
+17 -13
arch/arm64/kvm/hyp/nvhe/hyp-main.c
··· 709 709 HANDLE_FUNC(__kvm_tlb_flush_vmid_range), 710 710 HANDLE_FUNC(__kvm_flush_cpu_context), 711 711 HANDLE_FUNC(__kvm_timer_set_cntvoff), 712 + HANDLE_FUNC(__tracing_load), 713 + HANDLE_FUNC(__tracing_unload), 714 + HANDLE_FUNC(__tracing_enable), 715 + HANDLE_FUNC(__tracing_swap_reader), 716 + HANDLE_FUNC(__tracing_update_clock), 717 + HANDLE_FUNC(__tracing_reset), 718 + HANDLE_FUNC(__tracing_enable_event), 719 + HANDLE_FUNC(__tracing_write_event), 712 720 HANDLE_FUNC(__vgic_v3_save_aprs), 713 721 HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs), 714 722 HANDLE_FUNC(__vgic_v5_save_apr), ··· 743 735 HANDLE_FUNC(__pkvm_vcpu_load), 744 736 HANDLE_FUNC(__pkvm_vcpu_put), 745 737 HANDLE_FUNC(__pkvm_tlb_flush_vmid), 746 - HANDLE_FUNC(__tracing_load), 747 - HANDLE_FUNC(__tracing_unload), 748 - HANDLE_FUNC(__tracing_enable), 749 - HANDLE_FUNC(__tracing_swap_reader), 750 - HANDLE_FUNC(__tracing_update_clock), 751 - HANDLE_FUNC(__tracing_reset), 752 - HANDLE_FUNC(__tracing_enable_event), 753 - HANDLE_FUNC(__tracing_write_event), 754 738 }; 755 739 756 740 static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) 757 741 { 758 742 DECLARE_REG(unsigned long, id, host_ctxt, 0); 759 - unsigned long hcall_min = 0, hcall_max = -1; 743 + unsigned long hcall_min = 0, hcall_max = __KVM_HOST_SMCCC_FUNC_MAX; 760 744 hcall_t hfn; 745 + 746 + BUILD_BUG_ON(ARRAY_SIZE(host_hcall) != __KVM_HOST_SMCCC_FUNC_MAX); 761 747 762 748 /* 763 749 * If pKVM has been initialised then reject any calls to the ··· 765 763 if (static_branch_unlikely(&kvm_protected_mode_initialized)) { 766 764 hcall_min = __KVM_HOST_SMCCC_FUNC_MIN_PKVM; 767 765 } else { 768 - hcall_max = __KVM_HOST_SMCCC_FUNC_MAX_NO_PKVM; 766 + hcall_max = __KVM_HOST_SMCCC_FUNC_PKVM_ONLY; 769 767 } 770 768 771 769 id &= ~ARM_SMCCC_CALL_HINTS; 772 770 id -= KVM_HOST_SMCCC_ID(0); 773 771 774 - if (unlikely(id < hcall_min || id > hcall_max || 775 - id >= ARRAY_SIZE(host_hcall))) { 772 + if (unlikely(id < hcall_min || id >= hcall_max)) 776 773 goto inval; 777 - } 778 774 779 775 hfn = host_hcall[id]; 780 776 if (unlikely(!hfn)) ··· 805 805 } 806 806 807 807 func_id &= ~ARM_SMCCC_CALL_HINTS; 808 + if (upper_32_bits(func_id)) { 809 + cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED; 810 + goto exit_skip_instr; 811 + } 808 812 809 813 handled = kvm_host_psci_handler(host_ctxt, func_id); 810 814 if (!handled)
+25 -13
arch/arm64/kvm/hyp/nvhe/pkvm.c
··· 266 266 if (hyp_vm->kvm.created_vcpus <= vcpu_idx) 267 267 goto unlock; 268 268 269 - hyp_vcpu = hyp_vm->vcpus[vcpu_idx]; 269 + /* Pairs with smp_store_release() in register_hyp_vcpu(). */ 270 + hyp_vcpu = smp_load_acquire(&hyp_vm->vcpus[vcpu_idx]); 270 271 if (!hyp_vcpu) 271 272 goto unlock; 272 273 ··· 861 860 * the page-aligned size of 'struct pkvm_hyp_vcpu'. 862 861 * Return 0 on success, negative error code on failure. 863 862 */ 863 + static int register_hyp_vcpu(struct pkvm_hyp_vm *hyp_vm, 864 + struct pkvm_hyp_vcpu *hyp_vcpu) 865 + { 866 + unsigned int idx = hyp_vcpu->vcpu.vcpu_idx; 867 + 868 + if (idx >= hyp_vm->kvm.created_vcpus) 869 + return -EINVAL; 870 + 871 + if (hyp_vm->vcpus[idx]) 872 + return -EINVAL; 873 + 874 + /* 875 + * Ensure the hyp_vcpu is initialised before publishing it to 876 + * the vCPU-load path via 'hyp_vm->vcpus[]'. 877 + */ 878 + smp_store_release(&hyp_vm->vcpus[idx], hyp_vcpu); 879 + return 0; 880 + } 881 + 864 882 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu, 865 883 unsigned long vcpu_hva) 866 884 { 867 885 struct pkvm_hyp_vcpu *hyp_vcpu; 868 886 struct pkvm_hyp_vm *hyp_vm; 869 - unsigned int idx; 870 887 int ret; 871 888 872 889 hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu)); ··· 903 884 if (ret) 904 885 goto unlock; 905 886 906 - idx = hyp_vcpu->vcpu.vcpu_idx; 907 - if (idx >= hyp_vm->kvm.created_vcpus) { 908 - ret = -EINVAL; 909 - goto unlock; 887 + ret = register_hyp_vcpu(hyp_vm, hyp_vcpu); 888 + if (ret) { 889 + unpin_host_vcpu(host_vcpu); 890 + unpin_host_sve_state(hyp_vcpu); 910 891 } 911 - 912 - if (hyp_vm->vcpus[idx]) { 913 - ret = -EINVAL; 914 - goto unlock; 915 - } 916 - 917 - hyp_vm->vcpus[idx] = hyp_vcpu; 918 892 unlock: 919 893 hyp_spin_unlock(&vm_table_lock); 920 894
+4 -4
arch/arm64/kvm/hyp/nvhe/setup.c
··· 312 312 }; 313 313 pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops; 314 314 315 - ret = fix_host_ownership(); 316 - if (ret) 317 - goto out; 318 - 319 315 ret = fix_hyp_pgtable_refcnt(); 320 316 if (ret) 321 317 goto out; 322 318 323 319 ret = hyp_create_fixmap(); 320 + if (ret) 321 + goto out; 322 + 323 + ret = fix_host_ownership(); 324 324 if (ret) 325 325 goto out; 326 326
+1 -1
arch/arm64/kvm/vgic/vgic-mmio-v2.c
··· 91 91 * migration from old kernels to new kernels with legacy 92 92 * userspace. 93 93 */ 94 - reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg); 94 + reg = FIELD_GET(GICD_IIDR_REVISION_MASK, val); 95 95 switch (reg) { 96 96 case KVM_VGIC_IMP_REV_2: 97 97 case KVM_VGIC_IMP_REV_3:
+1 -1
arch/arm64/kvm/vgic/vgic-mmio-v3.c
··· 194 194 if ((reg ^ val) & ~GICD_IIDR_REVISION_MASK) 195 195 return -EINVAL; 196 196 197 - reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg); 197 + reg = FIELD_GET(GICD_IIDR_REVISION_MASK, val); 198 198 switch (reg) { 199 199 case KVM_VGIC_IMP_REV_2: 200 200 case KVM_VGIC_IMP_REV_3: