Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch kvm-arm64/pkvm-no-mte into kvmarm-master/next

* kvm-arm64/pkvm-no-mte:
: .
: pKVM updates preventing the host from using MTE-related system
: sysrem registers when the feature is disabled from the kernel
: command-line (arm64.nomte), courtesy of Fuad Taba.
:
: From the cover letter:
:
: "If MTE is supported by the hardware (and is enabled at EL3), it remains
: available to lower exception levels by default. Disabling it in the host
: kernel (e.g., via 'arm64.nomte') only stops the kernel from advertising
: the feature; it does not physically disable MTE in the hardware.
:
: The ability to disable MTE in the host kernel is used by some systems,
: such as Android, so that the physical memory otherwise used as tag
: storage can be used for other things (i.e. treated just like the rest of
: memory). In this scenario, a malicious host could still access tags in
: pages donated to a guest using MTE instructions (e.g., STG and LDG),
: bypassing the kernel's configuration."
: .
KVM: arm64: Use kvm_has_mte() in pKVM trap initialization
KVM: arm64: Inject UNDEF when accessing MTE sysregs with MTE disabled
KVM: arm64: Trap MTE access and discovery when MTE is disabled
KVM: arm64: Remove dead code resetting HCR_EL2 for pKVM

Signed-off-by: Marc Zyngier <maz@kernel.org>

+76 -8
+1 -1
arch/arm64/include/asm/kvm_arm.h
··· 101 101 HCR_BSU_IS | HCR_FB | HCR_TACR | \ 102 102 HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ 103 103 HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID1) 104 - #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA) 104 + #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK) 105 105 #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC) 106 106 #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H | HCR_AMO | HCR_IMO | HCR_FMO) 107 107
+1 -1
arch/arm64/kernel/head.S
··· 299 299 isb 300 300 0: 301 301 302 - init_el2_hcr HCR_HOST_NVHE_FLAGS 302 + init_el2_hcr HCR_HOST_NVHE_FLAGS | HCR_ATA 303 303 init_el2_state 304 304 305 305 /* Hypervisor stub */
+6
arch/arm64/kvm/arm.c
··· 2093 2093 params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS; 2094 2094 else 2095 2095 params->hcr_el2 = HCR_HOST_NVHE_FLAGS; 2096 + 2097 + if (system_supports_mte()) 2098 + params->hcr_el2 |= HCR_ATA; 2099 + else 2100 + params->hcr_el2 |= HCR_TID5; 2101 + 2096 2102 if (cpus_have_final_cap(ARM64_KVM_HVHE)) 2097 2103 params->hcr_el2 |= HCR_E2H; 2098 2104 params->vttbr = params->vtcr = 0;
-5
arch/arm64/kvm/hyp/nvhe/hyp-init.S
··· 260 260 msr sctlr_el2, x5 261 261 isb 262 262 263 - alternative_if ARM64_KVM_PROTECTED_MODE 264 - mov_q x5, HCR_HOST_NVHE_FLAGS 265 - msr_hcr_el2 x5 266 - alternative_else_nop_endif 267 - 268 263 /* Install stub vectors */ 269 264 adr_l x5, __hyp_stub_vectors 270 265 msr vbar_el2, x5
+67
arch/arm64/kvm/hyp/nvhe/hyp-main.c
··· 690 690 kvm_skip_host_instr(); 691 691 } 692 692 693 + /* 694 + * Inject an Undefined Instruction exception into the host. 695 + * 696 + * This is open-coded to allow control over PSTATE construction without 697 + * complicating the generic exception entry helpers. 698 + */ 699 + static void inject_undef64(void) 700 + { 701 + u64 spsr_mask, vbar, sctlr, old_spsr, new_spsr, esr, offset; 702 + 703 + spsr_mask = PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT | PSR_DIT_BIT | PSR_PAN_BIT; 704 + 705 + vbar = read_sysreg_el1(SYS_VBAR); 706 + sctlr = read_sysreg_el1(SYS_SCTLR); 707 + old_spsr = read_sysreg_el2(SYS_SPSR); 708 + 709 + new_spsr = old_spsr & spsr_mask; 710 + new_spsr |= PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT; 711 + new_spsr |= PSR_MODE_EL1h; 712 + 713 + if (!(sctlr & SCTLR_EL1_SPAN)) 714 + new_spsr |= PSR_PAN_BIT; 715 + 716 + if (sctlr & SCTLR_ELx_DSSBS) 717 + new_spsr |= PSR_SSBS_BIT; 718 + 719 + if (system_supports_mte()) 720 + new_spsr |= PSR_TCO_BIT; 721 + 722 + esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT) | ESR_ELx_IL; 723 + offset = CURRENT_EL_SP_ELx_VECTOR + except_type_sync; 724 + 725 + write_sysreg_el1(esr, SYS_ESR); 726 + write_sysreg_el1(read_sysreg_el2(SYS_ELR), SYS_ELR); 727 + write_sysreg_el1(old_spsr, SYS_SPSR); 728 + write_sysreg_el2(vbar + offset, SYS_ELR); 729 + write_sysreg_el2(new_spsr, SYS_SPSR); 730 + } 731 + 732 + static bool handle_host_mte(u64 esr) 733 + { 734 + switch (esr_sys64_to_sysreg(esr)) { 735 + case SYS_RGSR_EL1: 736 + case SYS_GCR_EL1: 737 + case SYS_TFSR_EL1: 738 + case SYS_TFSRE0_EL1: 739 + /* If we're here for any reason other than MTE, it's a bug. */ 740 + if (read_sysreg(HCR_EL2) & HCR_ATA) 741 + return false; 742 + break; 743 + case SYS_GMID_EL1: 744 + /* If we're here for any reason other than MTE, it's a bug. */ 745 + if (!(read_sysreg(HCR_EL2) & HCR_TID5)) 746 + return false; 747 + break; 748 + default: 749 + return false; 750 + } 751 + 752 + inject_undef64(); 753 + return true; 754 + } 755 + 693 756 void handle_trap(struct kvm_cpu_context *host_ctxt) 694 757 { 695 758 u64 esr = read_sysreg_el2(SYS_ESR); ··· 768 705 case ESR_ELx_EC_DABT_LOW: 769 706 handle_host_mem_abort(host_ctxt); 770 707 break; 708 + case ESR_ELx_EC_SYS64: 709 + if (handle_host_mte(esr)) 710 + break; 711 + fallthrough; 771 712 default: 772 713 BUG(); 773 714 }
+1 -1
arch/arm64/kvm/hyp/nvhe/pkvm.c
··· 82 82 if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP)) 83 83 val &= ~(HCR_AMVOFFEN); 84 84 85 - if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, IMP)) { 85 + if (!kvm_has_mte(kvm)) { 86 86 val |= HCR_TID5; 87 87 val &= ~(HCR_DCT | HCR_ATA); 88 88 }