Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2026, Google, Inc.
4 */
5
6#include "test_util.h"
7#include "kvm_util.h"
8#include "processor.h"
9#include "svm_util.h"
10
11
12#define L2_GUEST_STACK_SIZE 64
13
14#define DO_BRANCH() do { asm volatile("jmp 1f\n 1: nop"); } while (0)
15
16struct lbr_branch {
17 u64 from, to;
18};
19
20volatile struct lbr_branch l2_branch;
21
22#define RECORD_AND_CHECK_BRANCH(b) \
23do { \
24 wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); \
25 DO_BRANCH(); \
26 (b)->from = rdmsr(MSR_IA32_LASTBRANCHFROMIP); \
27 (b)->to = rdmsr(MSR_IA32_LASTBRANCHTOIP); \
28 /* Disable LBR right after to avoid overriding the IPs */ \
29 wrmsr(MSR_IA32_DEBUGCTLMSR, 0); \
30 \
31 GUEST_ASSERT_NE((b)->from, 0); \
32 GUEST_ASSERT_NE((b)->to, 0); \
33} while (0)
34
35#define CHECK_BRANCH_MSRS(b) \
36do { \
37 GUEST_ASSERT_EQ((b)->from, rdmsr(MSR_IA32_LASTBRANCHFROMIP)); \
38 GUEST_ASSERT_EQ((b)->to, rdmsr(MSR_IA32_LASTBRANCHTOIP)); \
39} while (0)
40
41#define CHECK_BRANCH_VMCB(b, vmcb) \
42do { \
43 GUEST_ASSERT_EQ((b)->from, vmcb->save.br_from); \
44 GUEST_ASSERT_EQ((b)->to, vmcb->save.br_to); \
45} while (0)
46
47static void l2_guest_code(struct svm_test_data *svm)
48{
49 /* Record a branch, trigger save/restore, and make sure LBRs are intact */
50 RECORD_AND_CHECK_BRANCH(&l2_branch);
51 GUEST_SYNC(true);
52 CHECK_BRANCH_MSRS(&l2_branch);
53 vmmcall();
54}
55
56static void l1_guest_code(struct svm_test_data *svm, bool nested_lbrv)
57{
58 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
59 struct vmcb *vmcb = svm->vmcb;
60 struct lbr_branch l1_branch;
61
62 /* Record a branch, trigger save/restore, and make sure LBRs are intact */
63 RECORD_AND_CHECK_BRANCH(&l1_branch);
64 GUEST_SYNC(true);
65 CHECK_BRANCH_MSRS(&l1_branch);
66
67 /* Run L2, which will also do the same */
68 generic_svm_setup(svm, l2_guest_code,
69 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
70
71 if (nested_lbrv)
72 vmcb->control.misc_ctl2 = SVM_MISC2_ENABLE_V_LBR;
73 else
74 vmcb->control.misc_ctl2 &= ~SVM_MISC2_ENABLE_V_LBR;
75
76 run_guest(vmcb, svm->vmcb_gpa);
77 GUEST_ASSERT(svm->vmcb->control.exit_code == SVM_EXIT_VMMCALL);
78
79 /* Trigger save/restore one more time before checking, just for kicks */
80 GUEST_SYNC(true);
81
82 /*
83 * If LBR_CTL_ENABLE is set, L1 and L2 should have separate LBR MSRs, so
84 * expect L1's LBRs to remain intact and L2 LBRs to be in the VMCB.
85 * Otherwise, the MSRs are shared between L1 & L2 so expect L2's LBRs.
86 */
87 if (nested_lbrv) {
88 CHECK_BRANCH_MSRS(&l1_branch);
89 CHECK_BRANCH_VMCB(&l2_branch, vmcb);
90 } else {
91 CHECK_BRANCH_MSRS(&l2_branch);
92 }
93 GUEST_DONE();
94}
95
96void test_lbrv_nested_state(bool nested_lbrv)
97{
98 struct kvm_x86_state *state = NULL;
99 struct kvm_vcpu *vcpu;
100 struct kvm_vm *vm;
101 struct ucall uc;
102 gva_t svm_gva;
103
104 pr_info("Testing with nested LBRV %s\n", nested_lbrv ? "enabled" : "disabled");
105
106 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
107 vcpu_alloc_svm(vm, &svm_gva);
108 vcpu_args_set(vcpu, 2, svm_gva, nested_lbrv);
109
110 for (;;) {
111 vcpu_run(vcpu);
112 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
113 switch (get_ucall(vcpu, &uc)) {
114 case UCALL_SYNC:
115 /* Save the vCPU state and restore it in a new VM on sync */
116 pr_info("Guest triggered save/restore.\n");
117 state = vcpu_save_state(vcpu);
118 kvm_vm_release(vm);
119 vcpu = vm_recreate_with_one_vcpu(vm);
120 vcpu_load_state(vcpu, state);
121 kvm_x86_state_cleanup(state);
122 break;
123 case UCALL_ABORT:
124 REPORT_GUEST_ASSERT(uc);
125 /* NOT REACHED */
126 case UCALL_DONE:
127 goto done;
128 default:
129 TEST_FAIL("Unknown ucall %lu", uc.cmd);
130 }
131 }
132done:
133 kvm_vm_free(vm);
134}
135
136int main(int argc, char *argv[])
137{
138 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
139 TEST_REQUIRE(kvm_is_lbrv_enabled());
140
141 test_lbrv_nested_state(/*nested_lbrv=*/false);
142 test_lbrv_nested_state(/*nested_lbrv=*/true);
143
144 return 0;
145}