Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
"Arm:

- Only adjust the ID registers when no irqchip has been created once
per VM run, instead of doing it once per vcpu, as this otherwise
triggers a pretty bad conbsistency check failure in the sysreg code

- Make sure the per-vcpu Fine Grain Traps are computed before we load
the system registers on the HW, as we otherwise start running
without anything set until the first preemption of the vcpu

x86:

- Fix selftests failure on AMD, checking for an optimization that was
not happening anymore"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: SVM: Fix redundant updates of LBR MSR intercepts
KVM: arm64: VHE: Compute fgt traps before activating them
KVM: arm64: Finalize ID registers only once per VM

+15 -3
+1 -1
arch/arm64/kvm/arm.c
··· 624 624 kvm_timer_vcpu_load(vcpu); 625 625 kvm_vgic_load(vcpu); 626 626 kvm_vcpu_load_debug(vcpu); 627 + kvm_vcpu_load_fgt(vcpu); 627 628 if (has_vhe()) 628 629 kvm_vcpu_load_vhe(vcpu); 629 630 kvm_arch_vcpu_load_fp(vcpu); ··· 643 642 vcpu->arch.hcr_el2 |= HCR_TWI; 644 643 645 644 vcpu_set_pauth_traps(vcpu); 646 - kvm_vcpu_load_fgt(vcpu); 647 645 648 646 if (is_protected_kvm_enabled()) { 649 647 kvm_call_hyp_nvhe(__pkvm_vcpu_load,
+5 -1
arch/arm64/kvm/sys_regs.c
··· 5609 5609 5610 5610 guard(mutex)(&kvm->arch.config_lock); 5611 5611 5612 - if (!irqchip_in_kernel(kvm)) { 5612 + /* 5613 + * This hacks into the ID registers, so only perform it when the 5614 + * first vcpu runs, or the kvm_set_vm_id_reg() helper will scream. 5615 + */ 5616 + if (!irqchip_in_kernel(kvm) && !kvm_vm_has_ran_once(kvm)) { 5613 5617 u64 val; 5614 5618 5615 5619 val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
+8 -1
arch/x86/kvm/svm/svm.c
··· 705 705 706 706 static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu) 707 707 { 708 - bool intercept = !(to_svm(vcpu)->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK); 708 + struct vcpu_svm *svm = to_svm(vcpu); 709 + bool intercept = !(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK); 710 + 711 + if (intercept == svm->lbr_msrs_intercepted) 712 + return; 709 713 710 714 svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHFROMIP, MSR_TYPE_RW, intercept); 711 715 svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHTOIP, MSR_TYPE_RW, intercept); ··· 718 714 719 715 if (sev_es_guest(vcpu->kvm)) 720 716 svm_set_intercept_for_msr(vcpu, MSR_IA32_DEBUGCTLMSR, MSR_TYPE_RW, intercept); 717 + 718 + svm->lbr_msrs_intercepted = intercept; 721 719 } 722 720 723 721 void svm_vcpu_free_msrpm(void *msrpm) ··· 1227 1221 } 1228 1222 1229 1223 svm->x2avic_msrs_intercepted = true; 1224 + svm->lbr_msrs_intercepted = true; 1230 1225 1231 1226 svm->vmcb01.ptr = page_address(vmcb01_page); 1232 1227 svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
+1
arch/x86/kvm/svm/svm.h
··· 336 336 bool guest_state_loaded; 337 337 338 338 bool x2avic_msrs_intercepted; 339 + bool lbr_msrs_intercepted; 339 340 340 341 /* Guest GIF value, used when vGIF is not enabled */ 341 342 bool guest_gif;