Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * tools/testing/selftests/kvm/lib/kvm_util.c
4 *
5 * Copyright (C) 2018, Google LLC.
6 */
7#include "test_util.h"
8#include "kvm_util.h"
9#include "processor.h"
10#include "ucall_common.h"
11
12#include <assert.h>
13#include <sched.h>
14#include <sys/mman.h>
15#include <sys/resource.h>
16#include <sys/types.h>
17#include <sys/stat.h>
18#include <unistd.h>
19#include <linux/kernel.h>
20
21#define KVM_UTIL_MIN_PFN 2
22
23u32 guest_random_seed;
24struct guest_random_state guest_rng;
25static u32 last_guest_seed;
26
27static size_t vcpu_mmap_sz(void);
28
29int __open_path_or_exit(const char *path, int flags, const char *enoent_help)
30{
31 int fd;
32
33 fd = open(path, flags);
34 if (fd < 0)
35 goto error;
36
37 return fd;
38
39error:
40 if (errno == EACCES || errno == ENOENT)
41 ksft_exit_skip("- Cannot open '%s': %s. %s\n",
42 path, strerror(errno),
43 errno == EACCES ? "Root required?" : enoent_help);
44 TEST_FAIL("Failed to open '%s'", path);
45}
46
47int open_path_or_exit(const char *path, int flags)
48{
49 return __open_path_or_exit(path, flags, "");
50}
51
52/*
53 * Open KVM_DEV_PATH if available, otherwise exit the entire program.
54 *
55 * Input Args:
56 * flags - The flags to pass when opening KVM_DEV_PATH.
57 *
58 * Return:
59 * The opened file descriptor of /dev/kvm.
60 */
61static int _open_kvm_dev_path_or_exit(int flags)
62{
63 return __open_path_or_exit(KVM_DEV_PATH, flags, "Is KVM loaded and enabled?");
64}
65
66int open_kvm_dev_path_or_exit(void)
67{
68 return _open_kvm_dev_path_or_exit(O_RDONLY);
69}
70
71static ssize_t get_module_param(const char *module_name, const char *param,
72 void *buffer, size_t buffer_size)
73{
74 const int path_size = 128;
75 char path[path_size];
76 ssize_t bytes_read;
77 int fd, r;
78
79 /* Verify KVM is loaded, to provide a more helpful SKIP message. */
80 close(open_kvm_dev_path_or_exit());
81
82 r = snprintf(path, path_size, "/sys/module/%s/parameters/%s",
83 module_name, param);
84 TEST_ASSERT(r < path_size,
85 "Failed to construct sysfs path in %d bytes.", path_size);
86
87 fd = open_path_or_exit(path, O_RDONLY);
88
89 bytes_read = read(fd, buffer, buffer_size);
90 TEST_ASSERT(bytes_read > 0, "read(%s) returned %ld, wanted %ld bytes",
91 path, bytes_read, buffer_size);
92
93 r = close(fd);
94 TEST_ASSERT(!r, "close(%s) failed", path);
95 return bytes_read;
96}
97
98int kvm_get_module_param_integer(const char *module_name, const char *param)
99{
100 /*
101 * 16 bytes to hold a 64-bit value (1 byte per char), 1 byte for the
102 * NUL char, and 1 byte because the kernel sucks and inserts a newline
103 * at the end.
104 */
105 char value[16 + 1 + 1];
106 ssize_t r;
107
108 memset(value, '\0', sizeof(value));
109
110 r = get_module_param(module_name, param, value, sizeof(value));
111 TEST_ASSERT(value[r - 1] == '\n',
112 "Expected trailing newline, got char '%c'", value[r - 1]);
113
114 /*
115 * Squash the newline, otherwise atoi_paranoid() will complain about
116 * trailing non-NUL characters in the string.
117 */
118 value[r - 1] = '\0';
119 return atoi_paranoid(value);
120}
121
122bool kvm_get_module_param_bool(const char *module_name, const char *param)
123{
124 char value;
125 ssize_t r;
126
127 r = get_module_param(module_name, param, &value, sizeof(value));
128 TEST_ASSERT_EQ(r, 1);
129
130 if (value == 'Y')
131 return true;
132 else if (value == 'N')
133 return false;
134
135 TEST_FAIL("Unrecognized value '%c' for boolean module param", value);
136}
137
138/*
139 * Capability
140 *
141 * Input Args:
142 * cap - Capability
143 *
144 * Output Args: None
145 *
146 * Return:
147 * On success, the Value corresponding to the capability (KVM_CAP_*)
148 * specified by the value of cap. On failure a TEST_ASSERT failure
149 * is produced.
150 *
151 * Looks up and returns the value corresponding to the capability
152 * (KVM_CAP_*) given by cap.
153 */
154unsigned int kvm_check_cap(long cap)
155{
156 int ret;
157 int kvm_fd;
158
159 kvm_fd = open_kvm_dev_path_or_exit();
160 ret = __kvm_ioctl(kvm_fd, KVM_CHECK_EXTENSION, (void *)cap);
161 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret));
162
163 close(kvm_fd);
164
165 return (unsigned int)ret;
166}
167
168void vm_enable_dirty_ring(struct kvm_vm *vm, u32 ring_size)
169{
170 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL))
171 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size);
172 else
173 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size);
174 vm->dirty_ring_size = ring_size;
175}
176
177static void vm_open(struct kvm_vm *vm)
178{
179 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR);
180
181 TEST_REQUIRE(kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT));
182
183 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type);
184 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd));
185
186 if (kvm_has_cap(KVM_CAP_BINARY_STATS_FD))
187 vm->stats.fd = vm_get_stats_fd(vm);
188 else
189 vm->stats.fd = -1;
190}
191
192const char *vm_guest_mode_string(u32 i)
193{
194 static const char * const strings[] = {
195 [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages",
196 [VM_MODE_P52V48_16K] = "PA-bits:52, VA-bits:48, 16K pages",
197 [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages",
198 [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages",
199 [VM_MODE_P48V48_16K] = "PA-bits:48, VA-bits:48, 16K pages",
200 [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages",
201 [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages",
202 [VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages",
203 [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
204 [VM_MODE_PXXVYY_4K] = "PA-bits:ANY, VA-bits:48 or 57, 4K pages",
205 [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages",
206 [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages",
207 [VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages",
208 [VM_MODE_P36V48_16K] = "PA-bits:36, VA-bits:48, 16K pages",
209 [VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages",
210 [VM_MODE_P47V47_16K] = "PA-bits:47, VA-bits:47, 16K pages",
211 [VM_MODE_P36V47_16K] = "PA-bits:36, VA-bits:47, 16K pages",
212 [VM_MODE_P56V57_4K] = "PA-bits:56, VA-bits:57, 4K pages",
213 [VM_MODE_P56V48_4K] = "PA-bits:56, VA-bits:48, 4K pages",
214 [VM_MODE_P56V39_4K] = "PA-bits:56, VA-bits:39, 4K pages",
215 [VM_MODE_P50V57_4K] = "PA-bits:50, VA-bits:57, 4K pages",
216 [VM_MODE_P50V48_4K] = "PA-bits:50, VA-bits:48, 4K pages",
217 [VM_MODE_P50V39_4K] = "PA-bits:50, VA-bits:39, 4K pages",
218 [VM_MODE_P41V57_4K] = "PA-bits:41, VA-bits:57, 4K pages",
219 [VM_MODE_P41V48_4K] = "PA-bits:41, VA-bits:48, 4K pages",
220 [VM_MODE_P41V39_4K] = "PA-bits:41, VA-bits:39, 4K pages",
221 };
222 _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
223 "Missing new mode strings?");
224
225 TEST_ASSERT(i < NUM_VM_MODES, "Guest mode ID %d too big", i);
226
227 return strings[i];
228}
229
230const struct vm_guest_mode_params vm_guest_mode_params[] = {
231 [VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 },
232 [VM_MODE_P52V48_16K] = { 52, 48, 0x4000, 14 },
233 [VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 },
234 [VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 },
235 [VM_MODE_P48V48_16K] = { 48, 48, 0x4000, 14 },
236 [VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 },
237 [VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 },
238 [VM_MODE_P40V48_16K] = { 40, 48, 0x4000, 14 },
239 [VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 },
240 [VM_MODE_PXXVYY_4K] = { 0, 0, 0x1000, 12 },
241 [VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 },
242 [VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 },
243 [VM_MODE_P36V48_4K] = { 36, 48, 0x1000, 12 },
244 [VM_MODE_P36V48_16K] = { 36, 48, 0x4000, 14 },
245 [VM_MODE_P36V48_64K] = { 36, 48, 0x10000, 16 },
246 [VM_MODE_P47V47_16K] = { 47, 47, 0x4000, 14 },
247 [VM_MODE_P36V47_16K] = { 36, 47, 0x4000, 14 },
248 [VM_MODE_P56V57_4K] = { 56, 57, 0x1000, 12 },
249 [VM_MODE_P56V48_4K] = { 56, 48, 0x1000, 12 },
250 [VM_MODE_P56V39_4K] = { 56, 39, 0x1000, 12 },
251 [VM_MODE_P50V57_4K] = { 50, 57, 0x1000, 12 },
252 [VM_MODE_P50V48_4K] = { 50, 48, 0x1000, 12 },
253 [VM_MODE_P50V39_4K] = { 50, 39, 0x1000, 12 },
254 [VM_MODE_P41V57_4K] = { 41, 57, 0x1000, 12 },
255 [VM_MODE_P41V48_4K] = { 41, 48, 0x1000, 12 },
256 [VM_MODE_P41V39_4K] = { 41, 39, 0x1000, 12 },
257};
258_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
259 "Missing new mode params?");
260
261/*
262 * Initializes vm->vpages_valid to match the canonical VA space of the
263 * architecture.
264 *
265 * The default implementation is valid for architectures which split the
266 * range addressed by a single page table into a low and high region
267 * based on the MSB of the VA. On architectures with this behavior
268 * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1].
269 */
270__weak void vm_populate_gva_bitmap(struct kvm_vm *vm)
271{
272 sparsebit_set_num(vm->vpages_valid,
273 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
274 sparsebit_set_num(vm->vpages_valid,
275 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
276 (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
277}
278
279struct kvm_vm *____vm_create(struct vm_shape shape)
280{
281 struct kvm_vm *vm;
282
283 vm = calloc(1, sizeof(*vm));
284 TEST_ASSERT(vm != NULL, "Insufficient Memory");
285
286 INIT_LIST_HEAD(&vm->vcpus);
287 vm->regions.gpa_tree = RB_ROOT;
288 vm->regions.hva_tree = RB_ROOT;
289 hash_init(vm->regions.slot_hash);
290
291 vm->mode = shape.mode;
292 vm->type = shape.type;
293
294 vm->pa_bits = vm_guest_mode_params[vm->mode].pa_bits;
295 vm->va_bits = vm_guest_mode_params[vm->mode].va_bits;
296 vm->page_size = vm_guest_mode_params[vm->mode].page_size;
297 vm->page_shift = vm_guest_mode_params[vm->mode].page_shift;
298
299 /* Setup mode specific traits. */
300 switch (vm->mode) {
301 case VM_MODE_P52V48_4K:
302 vm->mmu.pgtable_levels = 4;
303 break;
304 case VM_MODE_P52V48_64K:
305 vm->mmu.pgtable_levels = 3;
306 break;
307 case VM_MODE_P48V48_4K:
308 vm->mmu.pgtable_levels = 4;
309 break;
310 case VM_MODE_P48V48_64K:
311 vm->mmu.pgtable_levels = 3;
312 break;
313 case VM_MODE_P40V48_4K:
314 case VM_MODE_P36V48_4K:
315 vm->mmu.pgtable_levels = 4;
316 break;
317 case VM_MODE_P40V48_64K:
318 case VM_MODE_P36V48_64K:
319 vm->mmu.pgtable_levels = 3;
320 break;
321 case VM_MODE_P52V48_16K:
322 case VM_MODE_P48V48_16K:
323 case VM_MODE_P40V48_16K:
324 case VM_MODE_P36V48_16K:
325 vm->mmu.pgtable_levels = 4;
326 break;
327 case VM_MODE_P47V47_16K:
328 case VM_MODE_P36V47_16K:
329 vm->mmu.pgtable_levels = 3;
330 break;
331 case VM_MODE_PXXVYY_4K:
332#ifdef __x86_64__
333 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits);
334 kvm_init_vm_address_properties(vm);
335
336 pr_debug("Guest physical address width detected: %d\n",
337 vm->pa_bits);
338 pr_debug("Guest virtual address width detected: %d\n",
339 vm->va_bits);
340
341 if (vm->va_bits == 57) {
342 vm->mmu.pgtable_levels = 5;
343 } else {
344 TEST_ASSERT(vm->va_bits == 48,
345 "Unexpected guest virtual address width: %d",
346 vm->va_bits);
347 vm->mmu.pgtable_levels = 4;
348 }
349#else
350 TEST_FAIL("VM_MODE_PXXVYY_4K not supported on non-x86 platforms");
351#endif
352 break;
353 case VM_MODE_P47V64_4K:
354 vm->mmu.pgtable_levels = 5;
355 break;
356 case VM_MODE_P44V64_4K:
357 vm->mmu.pgtable_levels = 5;
358 break;
359 case VM_MODE_P56V57_4K:
360 case VM_MODE_P50V57_4K:
361 case VM_MODE_P41V57_4K:
362 vm->mmu.pgtable_levels = 5;
363 break;
364 case VM_MODE_P56V48_4K:
365 case VM_MODE_P50V48_4K:
366 case VM_MODE_P41V48_4K:
367 vm->mmu.pgtable_levels = 4;
368 break;
369 case VM_MODE_P56V39_4K:
370 case VM_MODE_P50V39_4K:
371 case VM_MODE_P41V39_4K:
372 vm->mmu.pgtable_levels = 3;
373 break;
374 default:
375 TEST_FAIL("Unknown guest mode: 0x%x", vm->mode);
376 }
377
378#ifdef __aarch64__
379 TEST_ASSERT(!vm->type, "ARM doesn't support test-provided types");
380 if (vm->pa_bits != 40)
381 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
382#endif
383
384 vm_open(vm);
385
386 /* Limit to VA-bit canonical virtual addresses. */
387 vm->vpages_valid = sparsebit_alloc();
388 vm_populate_gva_bitmap(vm);
389
390 /* Limit physical addresses to PA-bits. */
391 vm->max_gfn = vm_compute_max_gfn(vm);
392
393 /* Allocate and setup memory for guest. */
394 vm->vpages_mapped = sparsebit_alloc();
395
396 return vm;
397}
398
399static u64 vm_nr_pages_required(enum vm_guest_mode mode,
400 u32 nr_runnable_vcpus,
401 u64 extra_mem_pages)
402{
403 u64 page_size = vm_guest_mode_params[mode].page_size;
404 u64 nr_pages;
405
406 TEST_ASSERT(nr_runnable_vcpus,
407 "Use vm_create_barebones() for VMs that _never_ have vCPUs");
408
409 TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
410 "nr_vcpus = %d too large for host, max-vcpus = %d",
411 nr_runnable_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
412
413 /*
414 * Arbitrarily allocate 512 pages (2mb when page size is 4kb) for the
415 * test code and other per-VM assets that will be loaded into memslot0.
416 */
417 nr_pages = 512;
418
419 /* Account for the per-vCPU stacks on behalf of the test. */
420 nr_pages += nr_runnable_vcpus * DEFAULT_STACK_PGS;
421
422 /*
423 * Account for the number of pages needed for the page tables. The
424 * maximum page table size for a memory region will be when the
425 * smallest page size is used. Considering each page contains x page
426 * table descriptors, the total extra size for page tables (for extra
427 * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
428 * than N/x*2.
429 */
430 nr_pages += (nr_pages + extra_mem_pages) / PTES_PER_MIN_PAGE * 2;
431
432 /* Account for the number of pages needed by ucall. */
433 nr_pages += ucall_nr_pages_required(page_size);
434
435 return vm_adjust_num_guest_pages(mode, nr_pages);
436}
437
438void kvm_set_files_rlimit(u32 nr_vcpus)
439{
440 /*
441 * Each vCPU will open two file descriptors: the vCPU itself and the
442 * vCPU's binary stats file descriptor. Add an arbitrary amount of
443 * buffer for all other files a test may open.
444 */
445 int nr_fds_wanted = nr_vcpus * 2 + 100;
446 struct rlimit rl;
447
448 /*
449 * Check that we're allowed to open nr_fds_wanted file descriptors and
450 * try raising the limits if needed.
451 */
452 TEST_ASSERT(!getrlimit(RLIMIT_NOFILE, &rl), "getrlimit() failed!");
453
454 if (rl.rlim_cur < nr_fds_wanted) {
455 rl.rlim_cur = nr_fds_wanted;
456 if (rl.rlim_max < nr_fds_wanted) {
457 int old_rlim_max = rl.rlim_max;
458
459 rl.rlim_max = nr_fds_wanted;
460 __TEST_REQUIRE(setrlimit(RLIMIT_NOFILE, &rl) >= 0,
461 "RLIMIT_NOFILE hard limit is too low (%d, wanted %d)",
462 old_rlim_max, nr_fds_wanted);
463 } else {
464 TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!");
465 }
466 }
467
468}
469
470static bool is_guest_memfd_required(struct vm_shape shape)
471{
472#ifdef __x86_64__
473 return shape.type == KVM_X86_SNP_VM;
474#else
475 return false;
476#endif
477}
478
479struct kvm_vm *__vm_create(struct vm_shape shape, u32 nr_runnable_vcpus,
480 u64 nr_extra_pages)
481{
482 u64 nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus,
483 nr_extra_pages);
484 struct userspace_mem_region *slot0;
485 struct kvm_vm *vm;
486 int i, flags;
487
488 kvm_set_files_rlimit(nr_runnable_vcpus);
489
490 pr_debug("%s: mode='%s' type='%d', pages='%ld'\n", __func__,
491 vm_guest_mode_string(shape.mode), shape.type, nr_pages);
492
493 vm = ____vm_create(shape);
494
495 /*
496 * Force GUEST_MEMFD for the primary memory region if necessary, e.g.
497 * for CoCo VMs that require GUEST_MEMFD backed private memory.
498 */
499 flags = 0;
500 if (is_guest_memfd_required(shape))
501 flags |= KVM_MEM_GUEST_MEMFD;
502
503 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, flags);
504 for (i = 0; i < NR_MEM_REGIONS; i++)
505 vm->memslots[i] = 0;
506
507 kvm_vm_elf_load(vm, program_invocation_name);
508
509 /*
510 * TODO: Add proper defines to protect the library's memslots, and then
511 * carve out memslot1 for the ucall MMIO address. KVM treats writes to
512 * read-only memslots as MMIO, and creating a read-only memslot for the
513 * MMIO region would prevent silently clobbering the MMIO region.
514 */
515 slot0 = memslot2region(vm, 0);
516 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
517
518 if (guest_random_seed != last_guest_seed) {
519 pr_info("Random seed: 0x%x\n", guest_random_seed);
520 last_guest_seed = guest_random_seed;
521 }
522 guest_rng = new_guest_random_state(guest_random_seed);
523 sync_global_to_guest(vm, guest_rng);
524
525 kvm_arch_vm_post_create(vm, nr_runnable_vcpus);
526
527 return vm;
528}
529
530/*
531 * VM Create with customized parameters
532 *
533 * Input Args:
534 * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
535 * nr_vcpus - VCPU count
536 * extra_mem_pages - Non-slot0 physical memory total size
537 * guest_code - Guest entry point
538 * vcpuids - VCPU IDs
539 *
540 * Output Args: None
541 *
542 * Return:
543 * Pointer to opaque structure that describes the created VM.
544 *
545 * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K).
546 * extra_mem_pages is only used to calculate the maximum page table size,
547 * no real memory allocation for non-slot0 memory in this function.
548 */
549struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, u32 nr_vcpus,
550 u64 extra_mem_pages,
551 void *guest_code, struct kvm_vcpu *vcpus[])
552{
553 struct kvm_vm *vm;
554 int i;
555
556 TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
557
558 vm = __vm_create(shape, nr_vcpus, extra_mem_pages);
559
560 for (i = 0; i < nr_vcpus; ++i)
561 vcpus[i] = vm_vcpu_add(vm, i, guest_code);
562
563 kvm_arch_vm_finalize_vcpus(vm);
564 return vm;
565}
566
567struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
568 struct kvm_vcpu **vcpu,
569 u64 extra_mem_pages,
570 void *guest_code)
571{
572 struct kvm_vcpu *vcpus[1];
573 struct kvm_vm *vm;
574
575 vm = __vm_create_with_vcpus(shape, 1, extra_mem_pages, guest_code, vcpus);
576
577 *vcpu = vcpus[0];
578 return vm;
579}
580
581/*
582 * VM Restart
583 *
584 * Input Args:
585 * vm - VM that has been released before
586 *
587 * Output Args: None
588 *
589 * Reopens the file descriptors associated to the VM and reinstates the
590 * global state, such as the irqchip and the memory regions that are mapped
591 * into the guest.
592 */
593void kvm_vm_restart(struct kvm_vm *vmp)
594{
595 int ctr;
596 struct userspace_mem_region *region;
597
598 vm_open(vmp);
599 if (vmp->has_irqchip)
600 vm_create_irqchip(vmp);
601
602 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) {
603 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
604
605 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n"
606 " rc: %i errno: %i\n"
607 " slot: %u flags: 0x%x\n"
608 " guest_phys_addr: 0x%llx size: 0x%llx",
609 ret, errno, region->region.slot,
610 region->region.flags,
611 region->region.guest_phys_addr,
612 region->region.memory_size);
613 }
614}
615
616__weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm,
617 u32 vcpu_id)
618{
619 return __vm_vcpu_add(vm, vcpu_id);
620}
621
622struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm)
623{
624 kvm_vm_restart(vm);
625
626 return vm_vcpu_recreate(vm, 0);
627}
628
629int __pin_task_to_cpu(pthread_t task, int cpu)
630{
631 cpu_set_t cpuset;
632
633 CPU_ZERO(&cpuset);
634 CPU_SET(cpu, &cpuset);
635
636 return pthread_setaffinity_np(task, sizeof(cpuset), &cpuset);
637}
638
639static u32 parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
640{
641 u32 pcpu = atoi_non_negative("CPU number", cpu_str);
642
643 TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask),
644 "Not allowed to run on pCPU '%d', check cgroups?", pcpu);
645 return pcpu;
646}
647
648void kvm_print_vcpu_pinning_help(void)
649{
650 const char *name = program_invocation_name;
651
652 printf(" -c: Pin tasks to physical CPUs. Takes a list of comma separated\n"
653 " values (target pCPU), one for each vCPU, plus an optional\n"
654 " entry for the main application task (specified via entry\n"
655 " <nr_vcpus + 1>). If used, entries must be provided for all\n"
656 " vCPUs, i.e. pinning vCPUs is all or nothing.\n\n"
657 " E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n"
658 " vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n"
659 " %s -v 3 -c 22,23,24,50\n\n"
660 " To leave the application task unpinned, drop the final entry:\n\n"
661 " %s -v 3 -c 22,23,24\n\n"
662 " (default: no pinning)\n", name, name);
663}
664
665void kvm_parse_vcpu_pinning(const char *pcpus_string, u32 vcpu_to_pcpu[],
666 int nr_vcpus)
667{
668 cpu_set_t allowed_mask;
669 char *cpu, *cpu_list;
670 char delim[2] = ",";
671 int i, r;
672
673 cpu_list = strdup(pcpus_string);
674 TEST_ASSERT(cpu_list, "strdup() allocation failed.");
675
676 r = sched_getaffinity(0, sizeof(allowed_mask), &allowed_mask);
677 TEST_ASSERT(!r, "sched_getaffinity() failed");
678
679 cpu = strtok(cpu_list, delim);
680
681 /* 1. Get all pcpus for vcpus. */
682 for (i = 0; i < nr_vcpus; i++) {
683 TEST_ASSERT(cpu, "pCPU not provided for vCPU '%d'", i);
684 vcpu_to_pcpu[i] = parse_pcpu(cpu, &allowed_mask);
685 cpu = strtok(NULL, delim);
686 }
687
688 /* 2. Check if the main worker needs to be pinned. */
689 if (cpu) {
690 pin_self_to_cpu(parse_pcpu(cpu, &allowed_mask));
691 cpu = strtok(NULL, delim);
692 }
693
694 TEST_ASSERT(!cpu, "pCPU list contains trailing garbage characters '%s'", cpu);
695 free(cpu_list);
696}
697
698/*
699 * Userspace Memory Region Find
700 *
701 * Input Args:
702 * vm - Virtual Machine
703 * start - Starting VM physical address
704 * end - Ending VM physical address, inclusive.
705 *
706 * Output Args: None
707 *
708 * Return:
709 * Pointer to overlapping region, NULL if no such region.
710 *
711 * Searches for a region with any physical memory that overlaps with
712 * any portion of the guest physical addresses from start to end
713 * inclusive. If multiple overlapping regions exist, a pointer to any
714 * of the regions is returned. Null is returned only when no overlapping
715 * region exists.
716 */
717static struct userspace_mem_region *
718userspace_mem_region_find(struct kvm_vm *vm, u64 start, u64 end)
719{
720 struct rb_node *node;
721
722 for (node = vm->regions.gpa_tree.rb_node; node; ) {
723 struct userspace_mem_region *region =
724 container_of(node, struct userspace_mem_region, gpa_node);
725 u64 existing_start = region->region.guest_phys_addr;
726 u64 existing_end = region->region.guest_phys_addr
727 + region->region.memory_size - 1;
728 if (start <= existing_end && end >= existing_start)
729 return region;
730
731 if (start < existing_start)
732 node = node->rb_left;
733 else
734 node = node->rb_right;
735 }
736
737 return NULL;
738}
739
740static void kvm_stats_release(struct kvm_binary_stats *stats)
741{
742 if (stats->fd < 0)
743 return;
744
745 if (stats->desc) {
746 free(stats->desc);
747 stats->desc = NULL;
748 }
749
750 kvm_close(stats->fd);
751 stats->fd = -1;
752}
753
754__weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
755{
756
757}
758
759/*
760 * VM VCPU Remove
761 *
762 * Input Args:
763 * vcpu - VCPU to remove
764 *
765 * Output Args: None
766 *
767 * Return: None, TEST_ASSERT failures for all error conditions
768 *
769 * Removes a vCPU from a VM and frees its resources.
770 */
771static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
772{
773 if (vcpu->dirty_gfns) {
774 kvm_munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
775 vcpu->dirty_gfns = NULL;
776 }
777
778 kvm_munmap(vcpu->run, vcpu_mmap_sz());
779
780 kvm_close(vcpu->fd);
781 kvm_stats_release(&vcpu->stats);
782
783 list_del(&vcpu->list);
784
785 vcpu_arch_free(vcpu);
786 free(vcpu);
787}
788
789void kvm_vm_release(struct kvm_vm *vmp)
790{
791 struct kvm_vcpu *vcpu, *tmp;
792
793 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
794 vm_vcpu_rm(vmp, vcpu);
795
796 kvm_close(vmp->fd);
797 kvm_close(vmp->kvm_fd);
798
799 /* Free cached stats metadata and close FD */
800 kvm_stats_release(&vmp->stats);
801
802 kvm_arch_vm_release(vmp);
803}
804
805static void __vm_mem_region_delete(struct kvm_vm *vm,
806 struct userspace_mem_region *region)
807{
808 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree);
809 rb_erase(®ion->hva_node, &vm->regions.hva_tree);
810 hash_del(®ion->slot_node);
811
812 sparsebit_free(®ion->unused_phy_pages);
813 sparsebit_free(®ion->protected_phy_pages);
814 kvm_munmap(region->mmap_start, region->mmap_size);
815 if (region->fd >= 0) {
816 /* There's an extra map when using shared memory. */
817 kvm_munmap(region->mmap_alias, region->mmap_size);
818 close(region->fd);
819 }
820 if (region->region.guest_memfd >= 0)
821 close(region->region.guest_memfd);
822
823 free(region);
824}
825
826/*
827 * Destroys and frees the VM pointed to by vmp.
828 */
829void kvm_vm_free(struct kvm_vm *vmp)
830{
831 int ctr;
832 struct hlist_node *node;
833 struct userspace_mem_region *region;
834
835 if (vmp == NULL)
836 return;
837
838 /* Free userspace_mem_regions. */
839 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
840 __vm_mem_region_delete(vmp, region);
841
842 /* Free sparsebit arrays. */
843 sparsebit_free(&vmp->vpages_valid);
844 sparsebit_free(&vmp->vpages_mapped);
845
846 kvm_vm_release(vmp);
847
848 /* Free the structure describing the VM. */
849 free(vmp);
850}
851
852int kvm_memfd_alloc(size_t size, bool hugepages)
853{
854 int memfd_flags = MFD_CLOEXEC;
855 int fd;
856
857 if (hugepages)
858 memfd_flags |= MFD_HUGETLB;
859
860 fd = memfd_create("kvm_selftest", memfd_flags);
861 TEST_ASSERT(fd != -1, __KVM_SYSCALL_ERROR("memfd_create()", fd));
862
863 kvm_ftruncate(fd, size);
864 kvm_fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, size);
865
866 return fd;
867}
868
869static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree,
870 struct userspace_mem_region *region)
871{
872 struct rb_node **cur, *parent;
873
874 for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) {
875 struct userspace_mem_region *cregion;
876
877 cregion = container_of(*cur, typeof(*cregion), gpa_node);
878 parent = *cur;
879 if (region->region.guest_phys_addr <
880 cregion->region.guest_phys_addr)
881 cur = &(*cur)->rb_left;
882 else {
883 TEST_ASSERT(region->region.guest_phys_addr !=
884 cregion->region.guest_phys_addr,
885 "Duplicate GPA in region tree");
886
887 cur = &(*cur)->rb_right;
888 }
889 }
890
891 rb_link_node(®ion->gpa_node, parent, cur);
892 rb_insert_color(®ion->gpa_node, gpa_tree);
893}
894
895static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree,
896 struct userspace_mem_region *region)
897{
898 struct rb_node **cur, *parent;
899
900 for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) {
901 struct userspace_mem_region *cregion;
902
903 cregion = container_of(*cur, typeof(*cregion), hva_node);
904 parent = *cur;
905 if (region->host_mem < cregion->host_mem)
906 cur = &(*cur)->rb_left;
907 else {
908 TEST_ASSERT(region->host_mem !=
909 cregion->host_mem,
910 "Duplicate HVA in region tree");
911
912 cur = &(*cur)->rb_right;
913 }
914 }
915
916 rb_link_node(®ion->hva_node, parent, cur);
917 rb_insert_color(®ion->hva_node, hva_tree);
918}
919
920
921int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
922 gpa_t gpa, u64 size, void *hva)
923{
924 struct kvm_userspace_memory_region region = {
925 .slot = slot,
926 .flags = flags,
927 .guest_phys_addr = gpa,
928 .memory_size = size,
929 .userspace_addr = (uintptr_t)hva,
930 };
931
932 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion);
933}
934
935void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
936 gpa_t gpa, u64 size, void *hva)
937{
938 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva);
939
940 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed, errno = %d (%s)",
941 errno, strerror(errno));
942}
943
944#define TEST_REQUIRE_SET_USER_MEMORY_REGION2() \
945 __TEST_REQUIRE(kvm_has_cap(KVM_CAP_USER_MEMORY2), \
946 "KVM selftests now require KVM_SET_USER_MEMORY_REGION2 (introduced in v6.8)")
947
948int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
949 gpa_t gpa, u64 size, void *hva,
950 u32 guest_memfd, u64 guest_memfd_offset)
951{
952 struct kvm_userspace_memory_region2 region = {
953 .slot = slot,
954 .flags = flags,
955 .guest_phys_addr = gpa,
956 .memory_size = size,
957 .userspace_addr = (uintptr_t)hva,
958 .guest_memfd = guest_memfd,
959 .guest_memfd_offset = guest_memfd_offset,
960 };
961
962 TEST_REQUIRE_SET_USER_MEMORY_REGION2();
963
964 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, ®ion);
965}
966
967void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
968 gpa_t gpa, u64 size, void *hva,
969 u32 guest_memfd, u64 guest_memfd_offset)
970{
971 int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva,
972 guest_memfd, guest_memfd_offset);
973
974 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION2 failed, errno = %d (%s)",
975 errno, strerror(errno));
976}
977
978
979/* FIXME: This thing needs to be ripped apart and rewritten. */
980void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
981 gpa_t gpa, u32 slot, u64 npages, u32 flags,
982 int guest_memfd, u64 guest_memfd_offset)
983{
984 int ret;
985 struct userspace_mem_region *region;
986 size_t backing_src_pagesz = get_backing_src_pagesz(src_type);
987 size_t mem_size = npages * vm->page_size;
988 size_t alignment = 1;
989
990 TEST_REQUIRE_SET_USER_MEMORY_REGION2();
991
992 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
993 "Number of guest pages is not compatible with the host. "
994 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
995
996 TEST_ASSERT((gpa % vm->page_size) == 0, "Guest physical "
997 "address not on a page boundary.\n"
998 " gpa: 0x%lx vm->page_size: 0x%x",
999 gpa, vm->page_size);
1000 TEST_ASSERT((((gpa >> vm->page_shift) + npages) - 1)
1001 <= vm->max_gfn, "Physical range beyond maximum "
1002 "supported physical address,\n"
1003 " gpa: 0x%lx npages: 0x%lx\n"
1004 " vm->max_gfn: 0x%lx vm->page_size: 0x%x",
1005 gpa, npages, vm->max_gfn, vm->page_size);
1006
1007 /*
1008 * Confirm a mem region with an overlapping address doesn't
1009 * already exist.
1010 */
1011 region = (struct userspace_mem_region *) userspace_mem_region_find(
1012 vm, gpa, (gpa + npages * vm->page_size) - 1);
1013 if (region != NULL)
1014 TEST_FAIL("overlapping userspace_mem_region already "
1015 "exists\n"
1016 " requested gpa: 0x%lx npages: 0x%lx page_size: 0x%x\n"
1017 " existing gpa: 0x%lx size: 0x%lx",
1018 gpa, npages, vm->page_size,
1019 (u64)region->region.guest_phys_addr,
1020 (u64)region->region.memory_size);
1021
1022 /* Confirm no region with the requested slot already exists. */
1023 hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
1024 slot) {
1025 if (region->region.slot != slot)
1026 continue;
1027
1028 TEST_FAIL("A mem region with the requested slot "
1029 "already exists.\n"
1030 " requested slot: %u gpa: 0x%lx npages: 0x%lx\n"
1031 " existing slot: %u gpa: 0x%lx size: 0x%lx",
1032 slot, gpa, npages, region->region.slot,
1033 (u64)region->region.guest_phys_addr,
1034 (u64)region->region.memory_size);
1035 }
1036
1037 /* Allocate and initialize new mem region structure. */
1038 region = calloc(1, sizeof(*region));
1039 TEST_ASSERT(region != NULL, "Insufficient Memory");
1040 region->mmap_size = mem_size;
1041
1042 /*
1043 * When using THP mmap is not guaranteed to returned a hugepage aligned
1044 * address so we have to pad the mmap. Padding is not needed for HugeTLB
1045 * because mmap will always return an address aligned to the HugeTLB
1046 * page size.
1047 */
1048 if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
1049 alignment = max(backing_src_pagesz, alignment);
1050
1051 TEST_ASSERT_EQ(gpa, align_up(gpa, backing_src_pagesz));
1052
1053 /* Add enough memory to align up if necessary */
1054 if (alignment > 1)
1055 region->mmap_size += alignment;
1056
1057 region->fd = -1;
1058 if (backing_src_is_shared(src_type))
1059 region->fd = kvm_memfd_alloc(region->mmap_size,
1060 src_type == VM_MEM_SRC_SHARED_HUGETLB);
1061
1062 region->mmap_start = kvm_mmap(region->mmap_size, PROT_READ | PROT_WRITE,
1063 vm_mem_backing_src_alias(src_type)->flag,
1064 region->fd);
1065
1066 TEST_ASSERT(!is_backing_src_hugetlb(src_type) ||
1067 region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz),
1068 "mmap_start %p is not aligned to HugeTLB page size 0x%lx",
1069 region->mmap_start, backing_src_pagesz);
1070
1071 /* Align host address */
1072 region->host_mem = align_ptr_up(region->mmap_start, alignment);
1073
1074 /* As needed perform madvise */
1075 if ((src_type == VM_MEM_SRC_ANONYMOUS ||
1076 src_type == VM_MEM_SRC_ANONYMOUS_THP) && thp_configured()) {
1077 ret = madvise(region->host_mem, mem_size,
1078 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
1079 TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %s",
1080 region->host_mem, mem_size,
1081 vm_mem_backing_src_alias(src_type)->name);
1082 }
1083
1084 region->backing_src_type = src_type;
1085
1086 if (flags & KVM_MEM_GUEST_MEMFD) {
1087 if (guest_memfd < 0) {
1088 u32 guest_memfd_flags = 0;
1089 TEST_ASSERT(!guest_memfd_offset,
1090 "Offset must be zero when creating new guest_memfd");
1091 guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags);
1092 } else {
1093 /*
1094 * Install a unique fd for each memslot so that the fd
1095 * can be closed when the region is deleted without
1096 * needing to track if the fd is owned by the framework
1097 * or by the caller.
1098 */
1099 guest_memfd = kvm_dup(guest_memfd);
1100 }
1101
1102 region->region.guest_memfd = guest_memfd;
1103 region->region.guest_memfd_offset = guest_memfd_offset;
1104 } else {
1105 region->region.guest_memfd = -1;
1106 }
1107
1108 region->unused_phy_pages = sparsebit_alloc();
1109 if (vm_arch_has_protected_memory(vm))
1110 region->protected_phy_pages = sparsebit_alloc();
1111 sparsebit_set_num(region->unused_phy_pages, gpa >> vm->page_shift, npages);
1112 region->region.slot = slot;
1113 region->region.flags = flags;
1114 region->region.guest_phys_addr = gpa;
1115 region->region.memory_size = npages * vm->page_size;
1116 region->region.userspace_addr = (uintptr_t) region->host_mem;
1117 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
1118 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n"
1119 " rc: %i errno: %i\n"
1120 " slot: %u flags: 0x%x\n"
1121 " guest_phys_addr: 0x%lx size: 0x%llx guest_memfd: %d",
1122 ret, errno, slot, flags, gpa, region->region.memory_size,
1123 region->region.guest_memfd);
1124
1125 /* Add to quick lookup data structures */
1126 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region);
1127 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region);
1128 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot);
1129
1130 /* If shared memory, create an alias. */
1131 if (region->fd >= 0) {
1132 region->mmap_alias = kvm_mmap(region->mmap_size,
1133 PROT_READ | PROT_WRITE,
1134 vm_mem_backing_src_alias(src_type)->flag,
1135 region->fd);
1136
1137 /* Align host alias address */
1138 region->host_alias = align_ptr_up(region->mmap_alias, alignment);
1139 }
1140}
1141
1142void vm_userspace_mem_region_add(struct kvm_vm *vm,
1143 enum vm_mem_backing_src_type src_type,
1144 gpa_t gpa, u32 slot, u64 npages, u32 flags)
1145{
1146 vm_mem_add(vm, src_type, gpa, slot, npages, flags, -1, 0);
1147}
1148
1149/*
1150 * Memslot to region
1151 *
1152 * Input Args:
1153 * vm - Virtual Machine
1154 * memslot - KVM memory slot ID
1155 *
1156 * Output Args: None
1157 *
1158 * Return:
1159 * Pointer to memory region structure that describe memory region
1160 * using kvm memory slot ID given by memslot. TEST_ASSERT failure
1161 * on error (e.g. currently no memory region using memslot as a KVM
1162 * memory slot ID).
1163 */
1164struct userspace_mem_region *
1165memslot2region(struct kvm_vm *vm, u32 memslot)
1166{
1167 struct userspace_mem_region *region;
1168
1169 hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
1170 memslot)
1171 if (region->region.slot == memslot)
1172 return region;
1173
1174 fprintf(stderr, "No mem region with the requested slot found,\n"
1175 " requested slot: %u\n", memslot);
1176 fputs("---- vm dump ----\n", stderr);
1177 vm_dump(stderr, vm, 2);
1178 TEST_FAIL("Mem region not found");
1179 return NULL;
1180}
1181
1182/*
1183 * VM Memory Region Flags Set
1184 *
1185 * Input Args:
1186 * vm - Virtual Machine
1187 * flags - Starting guest physical address
1188 *
1189 * Output Args: None
1190 *
1191 * Return: None
1192 *
1193 * Sets the flags of the memory region specified by the value of slot,
1194 * to the values given by flags.
1195 */
1196void vm_mem_region_set_flags(struct kvm_vm *vm, u32 slot, u32 flags)
1197{
1198 int ret;
1199 struct userspace_mem_region *region;
1200
1201 region = memslot2region(vm, slot);
1202
1203 region->region.flags = flags;
1204
1205 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
1206
1207 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n"
1208 " rc: %i errno: %i slot: %u flags: 0x%x",
1209 ret, errno, slot, flags);
1210}
1211
1212void vm_mem_region_reload(struct kvm_vm *vm, u32 slot)
1213{
1214 struct userspace_mem_region *region = memslot2region(vm, slot);
1215 struct kvm_userspace_memory_region2 tmp = region->region;
1216
1217 tmp.memory_size = 0;
1218 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &tmp);
1219 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
1220}
1221
1222/*
1223 * VM Memory Region Move
1224 *
1225 * Input Args:
1226 * vm - Virtual Machine
1227 * slot - Slot of the memory region to move
1228 * new_gpa - Starting guest physical address
1229 *
1230 * Output Args: None
1231 *
1232 * Return: None
1233 *
1234 * Change the gpa of a memory region.
1235 */
1236void vm_mem_region_move(struct kvm_vm *vm, u32 slot, u64 new_gpa)
1237{
1238 struct userspace_mem_region *region;
1239 int ret;
1240
1241 region = memslot2region(vm, slot);
1242
1243 region->region.guest_phys_addr = new_gpa;
1244
1245 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
1246
1247 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION2 failed\n"
1248 "ret: %i errno: %i slot: %u new_gpa: 0x%lx",
1249 ret, errno, slot, new_gpa);
1250}
1251
1252/*
1253 * VM Memory Region Delete
1254 *
1255 * Input Args:
1256 * vm - Virtual Machine
1257 * slot - Slot of the memory region to delete
1258 *
1259 * Output Args: None
1260 *
1261 * Return: None
1262 *
1263 * Delete a memory region.
1264 */
1265void vm_mem_region_delete(struct kvm_vm *vm, u32 slot)
1266{
1267 struct userspace_mem_region *region = memslot2region(vm, slot);
1268
1269 region->region.memory_size = 0;
1270 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
1271
1272 __vm_mem_region_delete(vm, region);
1273}
1274
1275void vm_guest_mem_fallocate(struct kvm_vm *vm, u64 base, u64 size,
1276 bool punch_hole)
1277{
1278 const int mode = FALLOC_FL_KEEP_SIZE | (punch_hole ? FALLOC_FL_PUNCH_HOLE : 0);
1279 struct userspace_mem_region *region;
1280 u64 end = base + size;
1281 gpa_t gpa, len;
1282 off_t fd_offset;
1283 int ret;
1284
1285 for (gpa = base; gpa < end; gpa += len) {
1286 u64 offset;
1287
1288 region = userspace_mem_region_find(vm, gpa, gpa);
1289 TEST_ASSERT(region && region->region.flags & KVM_MEM_GUEST_MEMFD,
1290 "Private memory region not found for GPA 0x%lx", gpa);
1291
1292 offset = gpa - region->region.guest_phys_addr;
1293 fd_offset = region->region.guest_memfd_offset + offset;
1294 len = min_t(u64, end - gpa, region->region.memory_size - offset);
1295
1296 ret = fallocate(region->region.guest_memfd, mode, fd_offset, len);
1297 TEST_ASSERT(!ret, "fallocate() failed to %s at %lx (len = %lu), fd = %d, mode = %x, offset = %lx",
1298 punch_hole ? "punch hole" : "allocate", gpa, len,
1299 region->region.guest_memfd, mode, fd_offset);
1300 }
1301}
1302
1303/* Returns the size of a vCPU's kvm_run structure. */
1304static size_t vcpu_mmap_sz(void)
1305{
1306 int dev_fd, ret;
1307
1308 dev_fd = open_kvm_dev_path_or_exit();
1309
1310 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
1311 TEST_ASSERT(ret >= 0 && ret >= sizeof(struct kvm_run),
1312 KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, ret));
1313
1314 close(dev_fd);
1315
1316 return ret;
1317}
1318
1319static bool vcpu_exists(struct kvm_vm *vm, u32 vcpu_id)
1320{
1321 struct kvm_vcpu *vcpu;
1322
1323 list_for_each_entry(vcpu, &vm->vcpus, list) {
1324 if (vcpu->id == vcpu_id)
1325 return true;
1326 }
1327
1328 return false;
1329}
1330
1331/*
1332 * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
1333 * No additional vCPU setup is done. Returns the vCPU.
1334 */
1335struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
1336{
1337 struct kvm_vcpu *vcpu;
1338
1339 /* Confirm a vcpu with the specified id doesn't already exist. */
1340 TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists", vcpu_id);
1341
1342 /* Allocate and initialize new vcpu structure. */
1343 vcpu = calloc(1, sizeof(*vcpu));
1344 TEST_ASSERT(vcpu != NULL, "Insufficient Memory");
1345
1346 vcpu->vm = vm;
1347 vcpu->id = vcpu_id;
1348 vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id);
1349 TEST_ASSERT_VM_VCPU_IOCTL(vcpu->fd >= 0, KVM_CREATE_VCPU, vcpu->fd, vm);
1350
1351 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size "
1352 "smaller than expected, vcpu_mmap_sz: %zi expected_min: %zi",
1353 vcpu_mmap_sz(), sizeof(*vcpu->run));
1354 vcpu->run = kvm_mmap(vcpu_mmap_sz(), PROT_READ | PROT_WRITE,
1355 MAP_SHARED, vcpu->fd);
1356
1357 if (kvm_has_cap(KVM_CAP_BINARY_STATS_FD))
1358 vcpu->stats.fd = vcpu_get_stats_fd(vcpu);
1359 else
1360 vcpu->stats.fd = -1;
1361
1362 /* Add to linked-list of VCPUs. */
1363 list_add(&vcpu->list, &vm->vcpus);
1364
1365 return vcpu;
1366}
1367
1368/*
1369 * Within the VM specified by @vm, locates the lowest starting guest virtual
1370 * address >= @min_gva, that has at least @sz unallocated bytes. A
1371 * TEST_ASSERT failure occurs for invalid input or no area of at least
1372 * @sz unallocated bytes >= @min_gva is available.
1373 */
1374gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t min_gva)
1375{
1376 u64 pages = (sz + vm->page_size - 1) >> vm->page_shift;
1377
1378 /* Determine lowest permitted virtual page index. */
1379 u64 pgidx_start = (min_gva + vm->page_size - 1) >> vm->page_shift;
1380 if ((pgidx_start * vm->page_size) < min_gva)
1381 goto no_va_found;
1382
1383 /* Loop over section with enough valid virtual page indexes. */
1384 if (!sparsebit_is_set_num(vm->vpages_valid,
1385 pgidx_start, pages))
1386 pgidx_start = sparsebit_next_set_num(vm->vpages_valid,
1387 pgidx_start, pages);
1388 do {
1389 /*
1390 * Are there enough unused virtual pages available at
1391 * the currently proposed starting virtual page index.
1392 * If not, adjust proposed starting index to next
1393 * possible.
1394 */
1395 if (sparsebit_is_clear_num(vm->vpages_mapped,
1396 pgidx_start, pages))
1397 goto va_found;
1398 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped,
1399 pgidx_start, pages);
1400 if (pgidx_start == 0)
1401 goto no_va_found;
1402
1403 /*
1404 * If needed, adjust proposed starting virtual address,
1405 * to next range of valid virtual addresses.
1406 */
1407 if (!sparsebit_is_set_num(vm->vpages_valid,
1408 pgidx_start, pages)) {
1409 pgidx_start = sparsebit_next_set_num(
1410 vm->vpages_valid, pgidx_start, pages);
1411 if (pgidx_start == 0)
1412 goto no_va_found;
1413 }
1414 } while (pgidx_start != 0);
1415
1416no_va_found:
1417 TEST_FAIL("No gva of specified pages available, pages: 0x%lx", pages);
1418
1419 /* NOT REACHED */
1420 return -1;
1421
1422va_found:
1423 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid,
1424 pgidx_start, pages),
1425 "Unexpected, invalid virtual page index range,\n"
1426 " pgidx_start: 0x%lx\n"
1427 " pages: 0x%lx",
1428 pgidx_start, pages);
1429 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped,
1430 pgidx_start, pages),
1431 "Unexpected, pages already mapped,\n"
1432 " pgidx_start: 0x%lx\n"
1433 " pages: 0x%lx",
1434 pgidx_start, pages);
1435
1436 return pgidx_start * vm->page_size;
1437}
1438
1439static gva_t ____vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva,
1440 enum kvm_mem_region_type type, bool protected)
1441{
1442 u64 pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
1443
1444 virt_pgd_alloc(vm);
1445 gpa_t gpa = __vm_phy_pages_alloc(vm, pages,
1446 KVM_UTIL_MIN_PFN * vm->page_size,
1447 vm->memslots[type], protected);
1448
1449 /*
1450 * Find an unused range of virtual page addresses of at least
1451 * pages in length.
1452 */
1453 gva_t gva_start = vm_unused_gva_gap(vm, sz, min_gva);
1454
1455 /* Map the virtual pages. */
1456 for (gva_t gva = gva_start; pages > 0;
1457 pages--, gva += vm->page_size, gpa += vm->page_size) {
1458
1459 virt_pg_map(vm, gva, gpa);
1460 }
1461
1462 return gva_start;
1463}
1464
1465gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva,
1466 enum kvm_mem_region_type type)
1467{
1468 return ____vm_alloc(vm, sz, min_gva, type,
1469 vm_arch_has_protected_memory(vm));
1470}
1471
1472gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t min_gva,
1473 enum kvm_mem_region_type type)
1474{
1475 return ____vm_alloc(vm, sz, min_gva, type, false);
1476}
1477
1478/*
1479 * Allocates at least sz bytes within the virtual address space of the VM
1480 * given by @vm. The allocated bytes are mapped to a virtual address >= the
1481 * address given by @min_gva. Note that each allocation uses a a unique set
1482 * of pages, with the minimum real allocation being at least a page. The
1483 * allocated physical space comes from the TEST_DATA memory region.
1484 */
1485gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva)
1486{
1487 return __vm_alloc(vm, sz, min_gva, MEM_REGION_TEST_DATA);
1488}
1489
1490gva_t vm_alloc_pages(struct kvm_vm *vm, int nr_pages)
1491{
1492 return vm_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
1493}
1494
1495gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type)
1496{
1497 return __vm_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type);
1498}
1499
1500gva_t vm_alloc_page(struct kvm_vm *vm)
1501{
1502 return vm_alloc_pages(vm, 1);
1503}
1504
1505/*
1506 * Map a range of VM virtual address to the VM's physical address.
1507 *
1508 * Within the VM given by @vm, creates a virtual translation for @npages
1509 * starting at @gva to the page range starting at @gpa.
1510 */
1511void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, unsigned int npages)
1512{
1513 size_t page_size = vm->page_size;
1514 size_t size = npages * page_size;
1515
1516 TEST_ASSERT(gva + size > gva, "Vaddr overflow");
1517 TEST_ASSERT(gpa + size > gpa, "Paddr overflow");
1518
1519 while (npages--) {
1520 virt_pg_map(vm, gva, gpa);
1521
1522 gva += page_size;
1523 gpa += page_size;
1524 }
1525}
1526
1527/*
1528 * Address VM Physical to Host Virtual
1529 *
1530 * Input Args:
1531 * vm - Virtual Machine
1532 * gpa - VM physical address
1533 *
1534 * Output Args: None
1535 *
1536 * Return:
1537 * Equivalent host virtual address
1538 *
1539 * Locates the memory region containing the VM physical address given
1540 * by gpa, within the VM given by vm. When found, the host virtual
1541 * address providing the memory to the vm physical address is returned.
1542 * A TEST_ASSERT failure occurs if no region containing gpa exists.
1543 */
1544void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa)
1545{
1546 struct userspace_mem_region *region;
1547
1548 gpa = vm_untag_gpa(vm, gpa);
1549
1550 region = userspace_mem_region_find(vm, gpa, gpa);
1551 if (!region) {
1552 TEST_FAIL("No vm physical memory at 0x%lx", gpa);
1553 return NULL;
1554 }
1555
1556 return (void *)((uintptr_t)region->host_mem
1557 + (gpa - region->region.guest_phys_addr));
1558}
1559
1560/*
1561 * Address Host Virtual to VM Physical
1562 *
1563 * Input Args:
1564 * vm - Virtual Machine
1565 * hva - Host virtual address
1566 *
1567 * Output Args: None
1568 *
1569 * Return:
1570 * Equivalent VM physical address
1571 *
1572 * Locates the memory region containing the host virtual address given
1573 * by hva, within the VM given by vm. When found, the equivalent
1574 * VM physical address is returned. A TEST_ASSERT failure occurs if no
1575 * region containing hva exists.
1576 */
1577gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
1578{
1579 struct rb_node *node;
1580
1581 for (node = vm->regions.hva_tree.rb_node; node; ) {
1582 struct userspace_mem_region *region =
1583 container_of(node, struct userspace_mem_region, hva_node);
1584
1585 if (hva >= region->host_mem) {
1586 if (hva <= (region->host_mem
1587 + region->region.memory_size - 1))
1588 return (gpa_t)((uintptr_t)
1589 region->region.guest_phys_addr
1590 + (hva - (uintptr_t)region->host_mem));
1591
1592 node = node->rb_right;
1593 } else
1594 node = node->rb_left;
1595 }
1596
1597 TEST_FAIL("No mapping to a guest physical address, hva: %p", hva);
1598 return -1;
1599}
1600
1601/*
1602 * Address VM physical to Host Virtual *alias*.
1603 *
1604 * Input Args:
1605 * vm - Virtual Machine
1606 * gpa - VM physical address
1607 *
1608 * Output Args: None
1609 *
1610 * Return:
1611 * Equivalent address within the host virtual *alias* area, or NULL
1612 * (without failing the test) if the guest memory is not shared (so
1613 * no alias exists).
1614 *
1615 * Create a writable, shared virtual=>physical alias for the specific GPA.
1616 * The primary use case is to allow the host selftest to manipulate guest
1617 * memory without mapping said memory in the guest's address space. And, for
1618 * userfaultfd-based demand paging, to do so without triggering userfaults.
1619 */
1620void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa)
1621{
1622 struct userspace_mem_region *region;
1623 uintptr_t offset;
1624
1625 region = userspace_mem_region_find(vm, gpa, gpa);
1626 if (!region)
1627 return NULL;
1628
1629 if (!region->host_alias)
1630 return NULL;
1631
1632 offset = gpa - region->region.guest_phys_addr;
1633 return (void *) ((uintptr_t) region->host_alias + offset);
1634}
1635
1636/* Create an interrupt controller chip for the specified VM. */
1637void vm_create_irqchip(struct kvm_vm *vm)
1638{
1639 int r;
1640
1641 /*
1642 * Allocate a fully in-kernel IRQ chip by default, but fall back to a
1643 * split model (x86 only) if that fails (KVM x86 allows compiling out
1644 * support for KVM_CREATE_IRQCHIP).
1645 */
1646 r = __vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL);
1647 if (r && errno == ENOTTY && kvm_has_cap(KVM_CAP_SPLIT_IRQCHIP))
1648 vm_enable_cap(vm, KVM_CAP_SPLIT_IRQCHIP, 24);
1649 else
1650 TEST_ASSERT_VM_VCPU_IOCTL(!r, KVM_CREATE_IRQCHIP, r, vm);
1651
1652 vm->has_irqchip = true;
1653}
1654
1655int _vcpu_run(struct kvm_vcpu *vcpu)
1656{
1657 int rc;
1658
1659 do {
1660 rc = __vcpu_run(vcpu);
1661 } while (rc == -1 && errno == EINTR);
1662
1663 if (!rc)
1664 assert_on_unhandled_exception(vcpu);
1665
1666 return rc;
1667}
1668
1669/*
1670 * Invoke KVM_RUN on a vCPU until KVM returns something other than -EINTR.
1671 * Assert if the KVM returns an error (other than -EINTR).
1672 */
1673void vcpu_run(struct kvm_vcpu *vcpu)
1674{
1675 int ret = _vcpu_run(vcpu);
1676
1677 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret));
1678}
1679
1680void vcpu_run_complete_io(struct kvm_vcpu *vcpu)
1681{
1682 int ret;
1683
1684 vcpu->run->immediate_exit = 1;
1685 ret = __vcpu_run(vcpu);
1686 vcpu->run->immediate_exit = 0;
1687
1688 TEST_ASSERT(ret == -1 && errno == EINTR,
1689 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
1690 ret, errno);
1691}
1692
1693/*
1694 * Get the list of guest registers which are supported for
1695 * KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls. Returns a kvm_reg_list pointer,
1696 * it is the caller's responsibility to free the list.
1697 */
1698struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu)
1699{
1700 struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list;
1701 int ret;
1702
1703 ret = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, ®_list_n);
1704 TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0");
1705
1706 reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64));
1707 reg_list->n = reg_list_n.n;
1708 vcpu_ioctl(vcpu, KVM_GET_REG_LIST, reg_list);
1709 return reg_list;
1710}
1711
1712void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
1713{
1714 u32 page_size = getpagesize();
1715 u32 size = vcpu->vm->dirty_ring_size;
1716
1717 TEST_ASSERT(size > 0, "Should enable dirty ring first");
1718
1719 if (!vcpu->dirty_gfns) {
1720 void *addr;
1721
1722 addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, vcpu->fd,
1723 page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1724 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private");
1725
1726 addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, vcpu->fd,
1727 page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1728 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec");
1729
1730 addr = __kvm_mmap(size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd,
1731 page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1732
1733 vcpu->dirty_gfns = addr;
1734 vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn);
1735 }
1736
1737 return vcpu->dirty_gfns;
1738}
1739
1740/*
1741 * Device Ioctl
1742 */
1743
1744int __kvm_has_device_attr(int dev_fd, u32 group, u64 attr)
1745{
1746 struct kvm_device_attr attribute = {
1747 .group = group,
1748 .attr = attr,
1749 .flags = 0,
1750 };
1751
1752 return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute);
1753}
1754
1755int __kvm_test_create_device(struct kvm_vm *vm, u64 type)
1756{
1757 struct kvm_create_device create_dev = {
1758 .type = type,
1759 .flags = KVM_CREATE_DEVICE_TEST,
1760 };
1761
1762 return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
1763}
1764
1765int __kvm_create_device(struct kvm_vm *vm, u64 type)
1766{
1767 struct kvm_create_device create_dev = {
1768 .type = type,
1769 .fd = -1,
1770 .flags = 0,
1771 };
1772 int err;
1773
1774 err = __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
1775 TEST_ASSERT(err <= 0, "KVM_CREATE_DEVICE shouldn't return a positive value");
1776 return err ? : create_dev.fd;
1777}
1778
1779int __kvm_device_attr_get(int dev_fd, u32 group, u64 attr, void *val)
1780{
1781 struct kvm_device_attr kvmattr = {
1782 .group = group,
1783 .attr = attr,
1784 .flags = 0,
1785 .addr = (uintptr_t)val,
1786 };
1787
1788 return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr);
1789}
1790
1791int __kvm_device_attr_set(int dev_fd, u32 group, u64 attr, void *val)
1792{
1793 struct kvm_device_attr kvmattr = {
1794 .group = group,
1795 .attr = attr,
1796 .flags = 0,
1797 .addr = (uintptr_t)val,
1798 };
1799
1800 return __kvm_ioctl(dev_fd, KVM_SET_DEVICE_ATTR, &kvmattr);
1801}
1802
1803/*
1804 * IRQ related functions.
1805 */
1806
1807int _kvm_irq_line(struct kvm_vm *vm, u32 irq, int level)
1808{
1809 struct kvm_irq_level irq_level = {
1810 .irq = irq,
1811 .level = level,
1812 };
1813
1814 return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
1815}
1816
1817void kvm_irq_line(struct kvm_vm *vm, u32 irq, int level)
1818{
1819 int ret = _kvm_irq_line(vm, irq, level);
1820
1821 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret));
1822}
1823
1824struct kvm_irq_routing *kvm_gsi_routing_create(void)
1825{
1826 struct kvm_irq_routing *routing;
1827 size_t size;
1828
1829 size = sizeof(struct kvm_irq_routing);
1830 /* Allocate space for the max number of entries: this wastes 196 KBs. */
1831 size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry);
1832 routing = calloc(1, size);
1833 assert(routing);
1834
1835 return routing;
1836}
1837
1838void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
1839 u32 gsi, u32 pin)
1840{
1841 int i;
1842
1843 assert(routing);
1844 assert(routing->nr < KVM_MAX_IRQ_ROUTES);
1845
1846 i = routing->nr;
1847 routing->entries[i].gsi = gsi;
1848 routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP;
1849 routing->entries[i].flags = 0;
1850 routing->entries[i].u.irqchip.irqchip = 0;
1851 routing->entries[i].u.irqchip.pin = pin;
1852 routing->nr++;
1853}
1854
1855int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
1856{
1857 int ret;
1858
1859 assert(routing);
1860 ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing);
1861 free(routing);
1862
1863 return ret;
1864}
1865
1866void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
1867{
1868 int ret;
1869
1870 ret = _kvm_gsi_routing_write(vm, routing);
1871 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_GSI_ROUTING, ret));
1872}
1873
1874/*
1875 * VM Dump
1876 *
1877 * Input Args:
1878 * vm - Virtual Machine
1879 * indent - Left margin indent amount
1880 *
1881 * Output Args:
1882 * stream - Output FILE stream
1883 *
1884 * Return: None
1885 *
1886 * Dumps the current state of the VM given by vm, to the FILE stream
1887 * given by stream.
1888 */
1889void vm_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
1890{
1891 int ctr;
1892 struct userspace_mem_region *region;
1893 struct kvm_vcpu *vcpu;
1894
1895 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
1896 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
1897 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
1898 fprintf(stream, "%*sMem Regions:\n", indent, "");
1899 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
1900 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
1901 "host_virt: %p\n", indent + 2, "",
1902 (u64)region->region.guest_phys_addr,
1903 (u64)region->region.memory_size,
1904 region->host_mem);
1905 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
1906 sparsebit_dump(stream, region->unused_phy_pages, 0);
1907 if (region->protected_phy_pages) {
1908 fprintf(stream, "%*sprotected_phy_pages: ", indent + 2, "");
1909 sparsebit_dump(stream, region->protected_phy_pages, 0);
1910 }
1911 }
1912 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
1913 sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
1914 fprintf(stream, "%*spgd_created: %u\n", indent, "",
1915 vm->mmu.pgd_created);
1916 if (vm->mmu.pgd_created) {
1917 fprintf(stream, "%*sVirtual Translation Tables:\n",
1918 indent + 2, "");
1919 virt_dump(stream, vm, indent + 4);
1920 }
1921 fprintf(stream, "%*sVCPUs:\n", indent, "");
1922
1923 list_for_each_entry(vcpu, &vm->vcpus, list)
1924 vcpu_dump(stream, vcpu, indent + 2);
1925}
1926
1927#define KVM_EXIT_STRING(x) {KVM_EXIT_##x, #x}
1928
1929/* Known KVM exit reasons */
1930static struct exit_reason {
1931 unsigned int reason;
1932 const char *name;
1933} exit_reasons_known[] = {
1934 KVM_EXIT_STRING(UNKNOWN),
1935 KVM_EXIT_STRING(EXCEPTION),
1936 KVM_EXIT_STRING(IO),
1937 KVM_EXIT_STRING(HYPERCALL),
1938 KVM_EXIT_STRING(DEBUG),
1939 KVM_EXIT_STRING(HLT),
1940 KVM_EXIT_STRING(MMIO),
1941 KVM_EXIT_STRING(IRQ_WINDOW_OPEN),
1942 KVM_EXIT_STRING(SHUTDOWN),
1943 KVM_EXIT_STRING(FAIL_ENTRY),
1944 KVM_EXIT_STRING(INTR),
1945 KVM_EXIT_STRING(SET_TPR),
1946 KVM_EXIT_STRING(TPR_ACCESS),
1947 KVM_EXIT_STRING(S390_SIEIC),
1948 KVM_EXIT_STRING(S390_RESET),
1949 KVM_EXIT_STRING(DCR),
1950 KVM_EXIT_STRING(NMI),
1951 KVM_EXIT_STRING(INTERNAL_ERROR),
1952 KVM_EXIT_STRING(OSI),
1953 KVM_EXIT_STRING(PAPR_HCALL),
1954 KVM_EXIT_STRING(S390_UCONTROL),
1955 KVM_EXIT_STRING(WATCHDOG),
1956 KVM_EXIT_STRING(S390_TSCH),
1957 KVM_EXIT_STRING(EPR),
1958 KVM_EXIT_STRING(SYSTEM_EVENT),
1959 KVM_EXIT_STRING(S390_STSI),
1960 KVM_EXIT_STRING(IOAPIC_EOI),
1961 KVM_EXIT_STRING(HYPERV),
1962 KVM_EXIT_STRING(ARM_NISV),
1963 KVM_EXIT_STRING(X86_RDMSR),
1964 KVM_EXIT_STRING(X86_WRMSR),
1965 KVM_EXIT_STRING(DIRTY_RING_FULL),
1966 KVM_EXIT_STRING(AP_RESET_HOLD),
1967 KVM_EXIT_STRING(X86_BUS_LOCK),
1968 KVM_EXIT_STRING(XEN),
1969 KVM_EXIT_STRING(RISCV_SBI),
1970 KVM_EXIT_STRING(RISCV_CSR),
1971 KVM_EXIT_STRING(NOTIFY),
1972 KVM_EXIT_STRING(LOONGARCH_IOCSR),
1973 KVM_EXIT_STRING(MEMORY_FAULT),
1974 KVM_EXIT_STRING(ARM_SEA),
1975};
1976
1977/*
1978 * Exit Reason String
1979 *
1980 * Input Args:
1981 * exit_reason - Exit reason
1982 *
1983 * Output Args: None
1984 *
1985 * Return:
1986 * Constant string pointer describing the exit reason.
1987 *
1988 * Locates and returns a constant string that describes the KVM exit
1989 * reason given by exit_reason. If no such string is found, a constant
1990 * string of "Unknown" is returned.
1991 */
1992const char *exit_reason_str(unsigned int exit_reason)
1993{
1994 unsigned int n1;
1995
1996 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) {
1997 if (exit_reason == exit_reasons_known[n1].reason)
1998 return exit_reasons_known[n1].name;
1999 }
2000
2001 return "Unknown";
2002}
2003
2004/*
2005 * Physical Contiguous Page Allocator
2006 *
2007 * Input Args:
2008 * vm - Virtual Machine
2009 * num - number of pages
2010 * min_gpa - Physical address minimum
2011 * memslot - Memory region to allocate page from
2012 * protected - True if the pages will be used as protected/private memory
2013 *
2014 * Output Args: None
2015 *
2016 * Return:
2017 * Starting physical address
2018 *
2019 * Within the VM specified by vm, locates a range of available physical
2020 * pages at or above min_gpa. If found, the pages are marked as in use
2021 * and their base address is returned. A TEST_ASSERT failure occurs if
2022 * not enough pages are available at or above min_gpa.
2023 */
2024gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
2025 gpa_t min_gpa, u32 memslot,
2026 bool protected)
2027{
2028 struct userspace_mem_region *region;
2029 sparsebit_idx_t pg, base;
2030
2031 TEST_ASSERT(num > 0, "Must allocate at least one page");
2032
2033 TEST_ASSERT((min_gpa % vm->page_size) == 0, "Min physical address "
2034 "not divisible by page size.\n"
2035 " min_gpa: 0x%lx page_size: 0x%x",
2036 min_gpa, vm->page_size);
2037
2038 region = memslot2region(vm, memslot);
2039 TEST_ASSERT(!protected || region->protected_phy_pages,
2040 "Region doesn't support protected memory");
2041
2042 base = pg = min_gpa >> vm->page_shift;
2043 do {
2044 for (; pg < base + num; ++pg) {
2045 if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
2046 base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
2047 break;
2048 }
2049 }
2050 } while (pg && pg != base + num);
2051
2052 if (pg == 0) {
2053 fprintf(stderr, "No guest physical page available, "
2054 "min_gpa: 0x%lx page_size: 0x%x memslot: %u\n",
2055 min_gpa, vm->page_size, memslot);
2056 fputs("---- vm dump ----\n", stderr);
2057 vm_dump(stderr, vm, 2);
2058 abort();
2059 }
2060
2061 for (pg = base; pg < base + num; ++pg) {
2062 sparsebit_clear(region->unused_phy_pages, pg);
2063 if (protected)
2064 sparsebit_set(region->protected_phy_pages, pg);
2065 }
2066
2067 return base * vm->page_size;
2068}
2069
2070gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot)
2071{
2072 return vm_phy_pages_alloc(vm, 1, min_gpa, memslot);
2073}
2074
2075gpa_t vm_alloc_page_table(struct kvm_vm *vm)
2076{
2077 return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR,
2078 vm->memslots[MEM_REGION_PT]);
2079}
2080
2081/*
2082 * Address Guest Virtual to Host Virtual
2083 *
2084 * Input Args:
2085 * vm - Virtual Machine
2086 * gva - VM virtual address
2087 *
2088 * Output Args: None
2089 *
2090 * Return:
2091 * Equivalent host virtual address
2092 */
2093void *addr_gva2hva(struct kvm_vm *vm, gva_t gva)
2094{
2095 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
2096}
2097
2098unsigned long __weak vm_compute_max_gfn(struct kvm_vm *vm)
2099{
2100 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
2101}
2102
2103static unsigned int vm_calc_num_pages(unsigned int num_pages,
2104 unsigned int page_shift,
2105 unsigned int new_page_shift,
2106 bool ceil)
2107{
2108 unsigned int n = 1 << (new_page_shift - page_shift);
2109
2110 if (page_shift >= new_page_shift)
2111 return num_pages * (1 << (page_shift - new_page_shift));
2112
2113 return num_pages / n + !!(ceil && num_pages % n);
2114}
2115
2116static inline int getpageshift(void)
2117{
2118 return __builtin_ffs(getpagesize()) - 1;
2119}
2120
2121unsigned int
2122vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
2123{
2124 return vm_calc_num_pages(num_guest_pages,
2125 vm_guest_mode_params[mode].page_shift,
2126 getpageshift(), true);
2127}
2128
2129unsigned int
2130vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages)
2131{
2132 return vm_calc_num_pages(num_host_pages, getpageshift(),
2133 vm_guest_mode_params[mode].page_shift, false);
2134}
2135
2136unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size)
2137{
2138 unsigned int n;
2139 n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size);
2140 return vm_adjust_num_guest_pages(mode, n);
2141}
2142
2143/*
2144 * Read binary stats descriptors
2145 *
2146 * Input Args:
2147 * stats_fd - the file descriptor for the binary stats file from which to read
2148 * header - the binary stats metadata header corresponding to the given FD
2149 *
2150 * Output Args: None
2151 *
2152 * Return:
2153 * A pointer to a newly allocated series of stat descriptors.
2154 * Caller is responsible for freeing the returned kvm_stats_desc.
2155 *
2156 * Read the stats descriptors from the binary stats interface.
2157 */
2158struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
2159 struct kvm_stats_header *header)
2160{
2161 struct kvm_stats_desc *stats_desc;
2162 ssize_t desc_size, total_size, ret;
2163
2164 desc_size = get_stats_descriptor_size(header);
2165 total_size = header->num_desc * desc_size;
2166
2167 stats_desc = calloc(header->num_desc, desc_size);
2168 TEST_ASSERT(stats_desc, "Allocate memory for stats descriptors");
2169
2170 ret = pread(stats_fd, stats_desc, total_size, header->desc_offset);
2171 TEST_ASSERT(ret == total_size, "Read KVM stats descriptors");
2172
2173 return stats_desc;
2174}
2175
2176/*
2177 * Read stat data for a particular stat
2178 *
2179 * Input Args:
2180 * stats_fd - the file descriptor for the binary stats file from which to read
2181 * header - the binary stats metadata header corresponding to the given FD
2182 * desc - the binary stat metadata for the particular stat to be read
2183 * max_elements - the maximum number of 8-byte values to read into data
2184 *
2185 * Output Args:
2186 * data - the buffer into which stat data should be read
2187 *
2188 * Read the data values of a specified stat from the binary stats interface.
2189 */
2190void read_stat_data(int stats_fd, struct kvm_stats_header *header,
2191 struct kvm_stats_desc *desc, u64 *data,
2192 size_t max_elements)
2193{
2194 size_t nr_elements = min_t(ssize_t, desc->size, max_elements);
2195 size_t size = nr_elements * sizeof(*data);
2196 ssize_t ret;
2197
2198 TEST_ASSERT(desc->size, "No elements in stat '%s'", desc->name);
2199 TEST_ASSERT(max_elements, "Zero elements requested for stat '%s'", desc->name);
2200
2201 ret = pread(stats_fd, data, size,
2202 header->data_offset + desc->offset);
2203
2204 TEST_ASSERT(ret >= 0, "pread() failed on stat '%s', errno: %i (%s)",
2205 desc->name, errno, strerror(errno));
2206 TEST_ASSERT(ret == size,
2207 "pread() on stat '%s' read %ld bytes, wanted %lu bytes",
2208 desc->name, size, ret);
2209}
2210
2211void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
2212 u64 *data, size_t max_elements)
2213{
2214 struct kvm_stats_desc *desc;
2215 size_t size_desc;
2216 int i;
2217
2218 if (!stats->desc) {
2219 read_stats_header(stats->fd, &stats->header);
2220 stats->desc = read_stats_descriptors(stats->fd, &stats->header);
2221 }
2222
2223 size_desc = get_stats_descriptor_size(&stats->header);
2224
2225 for (i = 0; i < stats->header.num_desc; ++i) {
2226 desc = (void *)stats->desc + (i * size_desc);
2227
2228 if (strcmp(desc->name, name))
2229 continue;
2230
2231 read_stat_data(stats->fd, &stats->header, desc, data, max_elements);
2232 return;
2233 }
2234
2235 TEST_FAIL("Unable to find stat '%s'", name);
2236}
2237
2238__weak void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus)
2239{
2240}
2241
2242__weak void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm)
2243{
2244}
2245
2246__weak void kvm_arch_vm_release(struct kvm_vm *vm)
2247{
2248}
2249
2250__weak void kvm_selftest_arch_init(void)
2251{
2252}
2253
2254static void report_unexpected_signal(int signum)
2255{
2256#define KVM_CASE_SIGNUM(sig) \
2257 case sig: TEST_FAIL("Unexpected " #sig " (%d)\n", signum)
2258
2259 switch (signum) {
2260 KVM_CASE_SIGNUM(SIGBUS);
2261 KVM_CASE_SIGNUM(SIGSEGV);
2262 KVM_CASE_SIGNUM(SIGILL);
2263 KVM_CASE_SIGNUM(SIGFPE);
2264 default:
2265 TEST_FAIL("Unexpected signal %d\n", signum);
2266 }
2267}
2268
2269void __attribute((constructor)) kvm_selftest_init(void)
2270{
2271 struct sigaction sig_sa = {
2272 .sa_handler = report_unexpected_signal,
2273 };
2274
2275 /* Tell stdout not to buffer its content. */
2276 setbuf(stdout, NULL);
2277
2278 sigaction(SIGBUS, &sig_sa, NULL);
2279 sigaction(SIGSEGV, &sig_sa, NULL);
2280 sigaction(SIGILL, &sig_sa, NULL);
2281 sigaction(SIGFPE, &sig_sa, NULL);
2282
2283 guest_random_seed = last_guest_seed = random();
2284 pr_info("Random seed: 0x%x\n", guest_random_seed);
2285
2286 kvm_selftest_arch_init();
2287}
2288
2289bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa)
2290{
2291 sparsebit_idx_t pg = 0;
2292 struct userspace_mem_region *region;
2293
2294 if (!vm_arch_has_protected_memory(vm))
2295 return false;
2296
2297 region = userspace_mem_region_find(vm, gpa, gpa);
2298 TEST_ASSERT(region, "No vm physical memory at 0x%lx", gpa);
2299
2300 pg = gpa >> vm->page_shift;
2301 return sparsebit_is_set(region->protected_phy_pages, pg);
2302}
2303
2304__weak bool kvm_arch_has_default_irqchip(void)
2305{
2306 return false;
2307}