Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

KVM: arm64: Inject UNDEF when accessing MTE sysregs with MTE disabled

When MTE hardware is present but disabled via software (`arm64.nomte` or
`CONFIG_ARM64_MTE=n`), the kernel clears `HCR_EL2.ATA` and sets
`HCR_EL2.TID5`, to prevent the use of MTE instructions.

Additionally, accesses to certain MTE system registers trap to EL2 with
exception class ESR_ELx_EC_SYS64. To emulate hardware without MTE (where
such accesses would cause an Undefined Instruction exception), inject
UNDEF into the host.

Signed-off-by: Fuad Tabba <tabba@google.com>
Link: https://patch.msgid.link/20260122112218.531948-4-tabba@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>

authored by

Fuad Tabba and committed by
Marc Zyngier
5ee8ad69 f35abcbb

+67
+67
arch/arm64/kvm/hyp/nvhe/hyp-main.c
··· 687 687 kvm_skip_host_instr(); 688 688 } 689 689 690 + /* 691 + * Inject an Undefined Instruction exception into the host. 692 + * 693 + * This is open-coded to allow control over PSTATE construction without 694 + * complicating the generic exception entry helpers. 695 + */ 696 + static void inject_undef64(void) 697 + { 698 + u64 spsr_mask, vbar, sctlr, old_spsr, new_spsr, esr, offset; 699 + 700 + spsr_mask = PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT | PSR_DIT_BIT | PSR_PAN_BIT; 701 + 702 + vbar = read_sysreg_el1(SYS_VBAR); 703 + sctlr = read_sysreg_el1(SYS_SCTLR); 704 + old_spsr = read_sysreg_el2(SYS_SPSR); 705 + 706 + new_spsr = old_spsr & spsr_mask; 707 + new_spsr |= PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT; 708 + new_spsr |= PSR_MODE_EL1h; 709 + 710 + if (!(sctlr & SCTLR_EL1_SPAN)) 711 + new_spsr |= PSR_PAN_BIT; 712 + 713 + if (sctlr & SCTLR_ELx_DSSBS) 714 + new_spsr |= PSR_SSBS_BIT; 715 + 716 + if (system_supports_mte()) 717 + new_spsr |= PSR_TCO_BIT; 718 + 719 + esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT) | ESR_ELx_IL; 720 + offset = CURRENT_EL_SP_ELx_VECTOR + except_type_sync; 721 + 722 + write_sysreg_el1(esr, SYS_ESR); 723 + write_sysreg_el1(read_sysreg_el2(SYS_ELR), SYS_ELR); 724 + write_sysreg_el1(old_spsr, SYS_SPSR); 725 + write_sysreg_el2(vbar + offset, SYS_ELR); 726 + write_sysreg_el2(new_spsr, SYS_SPSR); 727 + } 728 + 729 + static bool handle_host_mte(u64 esr) 730 + { 731 + switch (esr_sys64_to_sysreg(esr)) { 732 + case SYS_RGSR_EL1: 733 + case SYS_GCR_EL1: 734 + case SYS_TFSR_EL1: 735 + case SYS_TFSRE0_EL1: 736 + /* If we're here for any reason other than MTE, it's a bug. */ 737 + if (read_sysreg(HCR_EL2) & HCR_ATA) 738 + return false; 739 + break; 740 + case SYS_GMID_EL1: 741 + /* If we're here for any reason other than MTE, it's a bug. */ 742 + if (!(read_sysreg(HCR_EL2) & HCR_TID5)) 743 + return false; 744 + break; 745 + default: 746 + return false; 747 + } 748 + 749 + inject_undef64(); 750 + return true; 751 + } 752 + 690 753 void handle_trap(struct kvm_cpu_context *host_ctxt) 691 754 { 692 755 u64 esr = read_sysreg_el2(SYS_ESR); ··· 765 702 case ESR_ELx_EC_DABT_LOW: 766 703 handle_host_mem_abort(host_ctxt); 767 704 break; 705 + case ESR_ELx_EC_SYS64: 706 + if (handle_host_mte(esr)) 707 + break; 708 + fallthrough; 768 709 default: 769 710 BUG(); 770 711 }