Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch kvm-arm64/feat_idst into kvmarm-master/next

* kvm-arm64/feat_idst:
: .
: Add support for FEAT_IDST, allowing ID registers that are not implemented
: to be reported as a normal trap rather than as an UNDEF exception.
: .
KVM: arm64: selftests: Add a test for FEAT_IDST
KVM: arm64: pkvm: Report optional ID register traps with a 0x18 syndrome
KVM: arm64: pkvm: Add a generic synchronous exception injection primitive
KVM: arm64: Force trap of GMID_EL1 when the guest doesn't have MTE
KVM: arm64: Handle CSSIDR2_EL1 and SMIDR_EL1 in a generic way
KVM: arm64: Handle FEAT_IDST for sysregs without specific handlers
KVM: arm64: Add a generic synchronous exception injection primitive
KVM: arm64: Add trap routing for GMID_EL1
arm64: Repaint ID_AA64MMFR2_EL1.IDS description

Signed-off-by: Marc Zyngier <maz@kernel.org>

+194 -16
+1
arch/arm64/include/asm/kvm_emulate.h
··· 45 45 void kvm_skip_instr32(struct kvm_vcpu *vcpu); 46 46 47 47 void kvm_inject_undefined(struct kvm_vcpu *vcpu); 48 + void kvm_inject_sync(struct kvm_vcpu *vcpu, u64 esr); 48 49 int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr); 49 50 int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr); 50 51 int kvm_inject_dabt_excl_atomic(struct kvm_vcpu *vcpu, u64 addr);
+21
arch/arm64/kvm/emulate-nested.c
··· 70 70 CGT_HCR_ENSCXT, 71 71 CGT_HCR_TTLBIS, 72 72 CGT_HCR_TTLBOS, 73 + CGT_HCR_TID5, 73 74 74 75 CGT_MDCR_TPMCR, 75 76 CGT_MDCR_TPM, ··· 307 306 .index = HCR_EL2, 308 307 .value = HCR_TTLBOS, 309 308 .mask = HCR_TTLBOS, 309 + .behaviour = BEHAVE_FORWARD_RW, 310 + }, 311 + [CGT_HCR_TID5] = { 312 + .index = HCR_EL2, 313 + .value = HCR_TID5, 314 + .mask = HCR_TID5, 310 315 .behaviour = BEHAVE_FORWARD_RW, 311 316 }, 312 317 [CGT_MDCR_TPMCR] = { ··· 672 665 SR_TRAP(SYS_CCSIDR2_EL1, CGT_HCR_TID2_TID4), 673 666 SR_TRAP(SYS_CLIDR_EL1, CGT_HCR_TID2_TID4), 674 667 SR_TRAP(SYS_CSSELR_EL1, CGT_HCR_TID2_TID4), 668 + SR_TRAP(SYS_GMID_EL1, CGT_HCR_TID5), 675 669 SR_RANGE_TRAP(SYS_ID_PFR0_EL1, 676 670 sys_reg(3, 0, 0, 7, 7), CGT_HCR_TID3), 677 671 SR_TRAP(SYS_ICC_SGI0R_EL1, CGT_HCR_IMO_FMO_ICH_HCR_TC), ··· 2594 2586 struct sys_reg_params params; 2595 2587 2596 2588 params = esr_sys64_to_params(esr); 2589 + 2590 + /* 2591 + * This implements the pseudocode UnimplementedIDRegister() 2592 + * helper for the purpose of dealing with FEAT_IDST. 2593 + */ 2594 + if (in_feat_id_space(&params)) { 2595 + if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR2_EL1, IDS, IMP)) 2596 + kvm_inject_sync(vcpu, kvm_vcpu_get_esr(vcpu)); 2597 + else 2598 + kvm_inject_undefined(vcpu); 2599 + 2600 + return true; 2601 + } 2597 2602 2598 2603 /* 2599 2604 * Check for the IMPDEF range, as per DDI0487 J.a,
+31 -8
arch/arm64/kvm/hyp/nvhe/sys_regs.c
··· 134 134 MAX_FEAT(ID_AA64MMFR2_EL1, UAO, IMP), 135 135 MAX_FEAT(ID_AA64MMFR2_EL1, IESB, IMP), 136 136 MAX_FEAT(ID_AA64MMFR2_EL1, AT, IMP), 137 - MAX_FEAT_ENUM(ID_AA64MMFR2_EL1, IDS, 0x18), 137 + MAX_FEAT(ID_AA64MMFR2_EL1, IDS, IMP), 138 138 MAX_FEAT(ID_AA64MMFR2_EL1, TTL, IMP), 139 139 MAX_FEAT(ID_AA64MMFR2_EL1, BBM, 2), 140 140 MAX_FEAT(ID_AA64MMFR2_EL1, E0PD, IMP), ··· 243 243 } 244 244 } 245 245 246 - /* 247 - * Inject an unknown/undefined exception to an AArch64 guest while most of its 248 - * sysregs are live. 249 - */ 250 - static void inject_undef64(struct kvm_vcpu *vcpu) 246 + static void inject_sync64(struct kvm_vcpu *vcpu, u64 esr) 251 247 { 252 - u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT); 253 - 254 248 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); 255 249 *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR); 250 + 251 + /* 252 + * Make sure we have the latest update to VBAR_EL1, as pKVM 253 + * handles traps very early, before sysregs are resync'ed 254 + */ 256 255 __vcpu_assign_sys_reg(vcpu, VBAR_EL1, read_sysreg_el1(SYS_VBAR)); 257 256 258 257 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); ··· 262 263 write_sysreg_el1(read_sysreg_el2(SYS_ELR), SYS_ELR); 263 264 write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); 264 265 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR); 266 + } 267 + 268 + /* 269 + * Inject an unknown/undefined exception to an AArch64 guest while most of its 270 + * sysregs are live. 271 + */ 272 + static void inject_undef64(struct kvm_vcpu *vcpu) 273 + { 274 + inject_sync64(vcpu, (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT)); 265 275 } 266 276 267 277 static u64 read_id_reg(const struct kvm_vcpu *vcpu, ··· 345 337 p->regval = ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB | ICC_SRE_EL1_SRE; 346 338 347 339 return true; 340 + } 341 + 342 + static bool pvm_idst_access(struct kvm_vcpu *vcpu, 343 + struct sys_reg_params *p, 344 + const struct sys_reg_desc *r) 345 + { 346 + if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR2_EL1, IDS, IMP)) 347 + inject_sync64(vcpu, kvm_vcpu_get_esr(vcpu)); 348 + else 349 + inject_undef64(vcpu); 350 + 351 + return false; 348 352 } 349 353 350 354 /* Mark the specified system register as an AArch32 feature id register. */ ··· 489 469 490 470 HOST_HANDLED(SYS_CCSIDR_EL1), 491 471 HOST_HANDLED(SYS_CLIDR_EL1), 472 + { SYS_DESC(SYS_CCSIDR2_EL1), .access = pvm_idst_access }, 473 + { SYS_DESC(SYS_GMID_EL1), .access = pvm_idst_access }, 474 + { SYS_DESC(SYS_SMIDR_EL1), .access = pvm_idst_access }, 492 475 HOST_HANDLED(SYS_AIDR_EL1), 493 476 HOST_HANDLED(SYS_CSSELR_EL1), 494 477 HOST_HANDLED(SYS_CTR_EL0),
+7 -3
arch/arm64/kvm/inject_fault.c
··· 162 162 vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); 163 163 } 164 164 165 + void kvm_inject_sync(struct kvm_vcpu *vcpu, u64 esr) 166 + { 167 + pend_sync_exception(vcpu); 168 + vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); 169 + } 170 + 165 171 static void inject_undef64(struct kvm_vcpu *vcpu) 166 172 { 167 173 u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT); 168 - 169 - pend_sync_exception(vcpu); 170 174 171 175 /* 172 176 * Build an unknown exception, depending on the instruction ··· 179 175 if (kvm_vcpu_trap_il_is32bit(vcpu)) 180 176 esr |= ESR_ELx_IL; 181 177 182 - vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); 178 + kvm_inject_sync(vcpu, esr); 183 179 } 184 180 185 181 #define DFSR_FSC_EXTABT_LPAE 0x10
+2 -2
arch/arm64/kvm/sys_regs.c
··· 3414 3414 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr }, 3415 3415 { SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1, 3416 3416 .set_user = set_clidr, .val = ~CLIDR_EL1_RES0 }, 3417 - { SYS_DESC(SYS_CCSIDR2_EL1), undef_access }, 3418 - { SYS_DESC(SYS_SMIDR_EL1), undef_access }, 3419 3417 IMPLEMENTATION_ID(AIDR_EL1, GENMASK_ULL(63, 0)), 3420 3418 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 }, 3421 3419 ID_FILTERED(CTR_EL0, ctr_el0, ··· 5579 5581 5580 5582 if (kvm_has_mte(vcpu->kvm)) 5581 5583 vcpu->arch.hcr_el2 |= HCR_ATA; 5584 + else 5585 + vcpu->arch.hcr_el2 |= HCR_TID5; 5582 5586 5583 5587 /* 5584 5588 * In the absence of FGT, we cannot independently trap TLBI
+10
arch/arm64/kvm/sys_regs.h
··· 49 49 .Op2 = ((esr) >> 17) & 0x7, \ 50 50 .is_write = !((esr) & 1) }) 51 51 52 + /* 53 + * The Feature ID space is defined as the System register space in AArch64 54 + * with op0==3, op1=={0, 1, 3}, CRn==0, CRm=={0-7}, op2=={0-7}. 55 + */ 56 + static inline bool in_feat_id_space(struct sys_reg_params *p) 57 + { 58 + return (p->Op0 == 3 && !(p->Op1 & 0b100) && p->Op1 != 2 && 59 + p->CRn == 0 && !(p->CRm & 0b1000)); 60 + } 61 + 52 62 struct sys_reg_desc { 53 63 /* Sysreg string for debug */ 54 64 const char *name;
+4 -3
arch/arm64/tools/sysreg
··· 2256 2256 0b0000 NI 2257 2257 0b0001 IMP 2258 2258 EndEnum 2259 - Enum 39:36 IDS 2260 - 0b0000 0x0 2261 - 0b0001 0x18 2259 + UnsignedEnum 39:36 IDS 2260 + 0b0000 NI 2261 + 0b0001 IMP 2262 + 0b0010 EL3 2262 2263 EndEnum 2263 2264 UnsignedEnum 35:32 AT 2264 2265 0b0000 NI
+1
tools/testing/selftests/kvm/Makefile.kvm
··· 175 175 TEST_GEN_PROGS_arm64 += arm64/vgic_lpi_stress 176 176 TEST_GEN_PROGS_arm64 += arm64/vpmu_counter_access 177 177 TEST_GEN_PROGS_arm64 += arm64/no-vgic-v3 178 + TEST_GEN_PROGS_arm64 += arm64/idreg-idst 178 179 TEST_GEN_PROGS_arm64 += arm64/kvm-uuid 179 180 TEST_GEN_PROGS_arm64 += access_tracking_perf_test 180 181 TEST_GEN_PROGS_arm64 += arch_timer
+117
tools/testing/selftests/kvm/arm64/idreg-idst.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + /* 4 + * Access all FEAT_IDST-handled registers that depend on more than 5 + * just FEAT_AA64, and fail if we don't get an a trap with an 0x18 EC. 6 + */ 7 + 8 + #include <test_util.h> 9 + #include <kvm_util.h> 10 + #include <processor.h> 11 + 12 + static volatile bool sys64, undef; 13 + 14 + #define __check_sr_read(r) \ 15 + ({ \ 16 + uint64_t val; \ 17 + \ 18 + sys64 = false; \ 19 + undef = false; \ 20 + dsb(sy); \ 21 + val = read_sysreg_s(SYS_ ## r); \ 22 + val; \ 23 + }) 24 + 25 + /* Fatal checks */ 26 + #define check_sr_read(r) \ 27 + do { \ 28 + __check_sr_read(r); \ 29 + __GUEST_ASSERT(!undef, #r " unexpected UNDEF"); \ 30 + __GUEST_ASSERT(sys64, #r " didn't trap"); \ 31 + } while(0) 32 + 33 + 34 + static void guest_code(void) 35 + { 36 + check_sr_read(CCSIDR2_EL1); 37 + check_sr_read(SMIDR_EL1); 38 + check_sr_read(GMID_EL1); 39 + 40 + GUEST_DONE(); 41 + } 42 + 43 + static void guest_sys64_handler(struct ex_regs *regs) 44 + { 45 + sys64 = true; 46 + undef = false; 47 + regs->pc += 4; 48 + } 49 + 50 + static void guest_undef_handler(struct ex_regs *regs) 51 + { 52 + sys64 = false; 53 + undef = true; 54 + regs->pc += 4; 55 + } 56 + 57 + static void test_run_vcpu(struct kvm_vcpu *vcpu) 58 + { 59 + struct ucall uc; 60 + 61 + do { 62 + vcpu_run(vcpu); 63 + 64 + switch (get_ucall(vcpu, &uc)) { 65 + case UCALL_ABORT: 66 + REPORT_GUEST_ASSERT(uc); 67 + break; 68 + case UCALL_PRINTF: 69 + printf("%s", uc.buffer); 70 + break; 71 + case UCALL_DONE: 72 + break; 73 + default: 74 + TEST_FAIL("Unknown ucall %lu", uc.cmd); 75 + } 76 + } while (uc.cmd != UCALL_DONE); 77 + } 78 + 79 + static void test_guest_feat_idst(void) 80 + { 81 + struct kvm_vcpu *vcpu; 82 + struct kvm_vm *vm; 83 + 84 + /* This VM has no MTE, no SME, no CCIDX */ 85 + vm = vm_create_with_one_vcpu(&vcpu, guest_code); 86 + 87 + vm_init_descriptor_tables(vm); 88 + vcpu_init_descriptor_tables(vcpu); 89 + 90 + vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, 91 + ESR_ELx_EC_SYS64, guest_sys64_handler); 92 + vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, 93 + ESR_ELx_EC_UNKNOWN, guest_undef_handler); 94 + 95 + test_run_vcpu(vcpu); 96 + 97 + kvm_vm_free(vm); 98 + } 99 + 100 + int main(int argc, char *argv[]) 101 + { 102 + struct kvm_vcpu *vcpu; 103 + struct kvm_vm *vm; 104 + uint64_t mmfr2; 105 + 106 + test_disable_default_vgic(); 107 + 108 + vm = vm_create_with_one_vcpu(&vcpu, NULL); 109 + mmfr2 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR2_EL1)); 110 + __TEST_REQUIRE(FIELD_GET(ID_AA64MMFR2_EL1_IDS, mmfr2) > 0, 111 + "FEAT_IDST not supported"); 112 + kvm_vm_free(vm); 113 + 114 + test_guest_feat_idst(); 115 + 116 + return 0; 117 + }