Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * x86-specific extensions to memstress.c.
4 *
5 * Copyright (C) 2022, Google, Inc.
6 */
7#include <stdio.h>
8#include <stdlib.h>
9#include <linux/bitmap.h>
10#include <linux/bitops.h>
11
12#include "test_util.h"
13#include "kvm_util.h"
14#include "memstress.h"
15#include "processor.h"
16#include "svm_util.h"
17#include "vmx.h"
18
19void memstress_l2_guest_code(u64 vcpu_id)
20{
21 memstress_guest_code(vcpu_id);
22 vmcall();
23}
24
25extern char memstress_l2_guest_entry[];
26__asm__(
27"memstress_l2_guest_entry:"
28" mov (%rsp), %rdi;"
29" call memstress_l2_guest_code;"
30" ud2;"
31);
32
33#define L2_GUEST_STACK_SIZE 64
34
35static void l1_vmx_code(struct vmx_pages *vmx, u64 vcpu_id)
36{
37 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
38 unsigned long *rsp;
39
40 GUEST_ASSERT(vmx->vmcs_gpa);
41 GUEST_ASSERT(prepare_for_vmx_operation(vmx));
42 GUEST_ASSERT(load_vmcs(vmx));
43 GUEST_ASSERT(ept_1g_pages_supported());
44
45 rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
46 *rsp = vcpu_id;
47 prepare_vmcs(vmx, memstress_l2_guest_entry, rsp);
48
49 GUEST_ASSERT(!vmlaunch());
50 GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_VMCALL);
51 GUEST_DONE();
52}
53
54static void l1_svm_code(struct svm_test_data *svm, u64 vcpu_id)
55{
56 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
57 unsigned long *rsp;
58
59
60 rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
61 *rsp = vcpu_id;
62 generic_svm_setup(svm, memstress_l2_guest_entry, rsp);
63
64 run_guest(svm->vmcb, svm->vmcb_gpa);
65 GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMMCALL);
66 GUEST_DONE();
67}
68
69
70static void memstress_l1_guest_code(void *data, u64 vcpu_id)
71{
72 if (this_cpu_has(X86_FEATURE_VMX))
73 l1_vmx_code(data, vcpu_id);
74 else
75 l1_svm_code(data, vcpu_id);
76}
77
78u64 memstress_nested_pages(int nr_vcpus)
79{
80 /*
81 * 513 page tables is enough to identity-map 256 TiB of L2 with 1G
82 * pages and 4-level paging, plus a few pages per-vCPU for data
83 * structures such as the VMCS.
84 */
85 return 513 + 10 * nr_vcpus;
86}
87
88static void memstress_setup_ept_mappings(struct kvm_vm *vm)
89{
90 u64 start, end;
91
92 /*
93 * Identity map the first 4G and the test region with 1G pages so that
94 * KVM can shadow the EPT12 with the maximum huge page size supported
95 * by the backing source.
96 */
97 tdp_identity_map_1g(vm, 0, 0x100000000ULL);
98
99 start = align_down(memstress_args.gpa, PG_SIZE_1G);
100 end = align_up(memstress_args.gpa + memstress_args.size, PG_SIZE_1G);
101 tdp_identity_map_1g(vm, start, end - start);
102}
103
104void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
105{
106 struct kvm_regs regs;
107 gva_t nested_gva;
108 int vcpu_id;
109
110 TEST_REQUIRE(kvm_cpu_has_tdp());
111
112 vm_enable_tdp(vm);
113 memstress_setup_ept_mappings(vm);
114 for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
115 if (kvm_cpu_has(X86_FEATURE_VMX))
116 vcpu_alloc_vmx(vm, &nested_gva);
117 else
118 vcpu_alloc_svm(vm, &nested_gva);
119
120 /*
121 * Override the vCPU to run memstress_l1_guest_code() which will
122 * bounce it into L2 before calling memstress_guest_code().
123 */
124 vcpu_regs_get(vcpus[vcpu_id], ®s);
125 regs.rip = (unsigned long) memstress_l1_guest_code;
126 vcpu_regs_set(vcpus[vcpu_id], ®s);
127 vcpu_args_set(vcpus[vcpu_id], 2, nested_gva, vcpu_id);
128 }
129}