Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at master 589 lines 15 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * steal/stolen time test 4 * 5 * Copyright (C) 2020, Red Hat, Inc. 6 */ 7#include <stdio.h> 8#include <time.h> 9#include <sched.h> 10#include <pthread.h> 11#include <linux/kernel.h> 12#include <asm/kvm.h> 13#ifdef __riscv 14#include "sbi.h" 15#else 16#include <asm/kvm_para.h> 17#endif 18 19#include "test_util.h" 20#include "kvm_util.h" 21#include "processor.h" 22#include "ucall_common.h" 23 24#define NR_VCPUS 4 25#define ST_GPA_BASE (1 << 30) 26 27static void *st_gva[NR_VCPUS]; 28static u64 guest_stolen_time[NR_VCPUS]; 29 30#if defined(__x86_64__) 31 32/* steal_time must have 64-byte alignment */ 33#define STEAL_TIME_SIZE ((sizeof(struct kvm_steal_time) + 63) & ~63) 34 35static void check_status(struct kvm_steal_time *st) 36{ 37 GUEST_ASSERT(!(READ_ONCE(st->version) & 1)); 38 GUEST_ASSERT_EQ(READ_ONCE(st->flags), 0); 39 GUEST_ASSERT_EQ(READ_ONCE(st->preempted), 0); 40} 41 42static void guest_code(int cpu) 43{ 44 struct kvm_steal_time *st = st_gva[cpu]; 45 u32 version; 46 47 GUEST_ASSERT_EQ(rdmsr(MSR_KVM_STEAL_TIME), ((u64)st_gva[cpu] | KVM_MSR_ENABLED)); 48 49 memset(st, 0, sizeof(*st)); 50 GUEST_SYNC(0); 51 52 check_status(st); 53 WRITE_ONCE(guest_stolen_time[cpu], st->steal); 54 version = READ_ONCE(st->version); 55 check_status(st); 56 GUEST_SYNC(1); 57 58 check_status(st); 59 GUEST_ASSERT(version < READ_ONCE(st->version)); 60 WRITE_ONCE(guest_stolen_time[cpu], st->steal); 61 check_status(st); 62 GUEST_DONE(); 63} 64 65static bool is_steal_time_supported(struct kvm_vcpu *vcpu) 66{ 67 return kvm_cpu_has(X86_FEATURE_KVM_STEAL_TIME); 68} 69 70static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) 71{ 72 /* ST_GPA_BASE is identity mapped */ 73 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); 74 sync_global_to_guest(vcpu->vm, st_gva[i]); 75 76 vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED); 77} 78 79static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) 80{ 81 struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); 82 83 ksft_print_msg("VCPU%d:\n", vcpu_idx); 84 ksft_print_msg(" steal: %lld\n", st->steal); 85 ksft_print_msg(" version: %d\n", st->version); 86 ksft_print_msg(" flags: %d\n", st->flags); 87 ksft_print_msg(" preempted: %d\n", st->preempted); 88 ksft_print_msg(" u8_pad: %d %d %d\n", 89 st->u8_pad[0], st->u8_pad[1], st->u8_pad[2]); 90 ksft_print_msg(" pad: %d %d %d %d %d %d %d %d %d %d %d\n", 91 st->pad[0], st->pad[1], st->pad[2], st->pad[3], 92 st->pad[4], st->pad[5], st->pad[6], st->pad[7], 93 st->pad[8], st->pad[9], st->pad[10]); 94} 95 96static void check_steal_time_uapi(void) 97{ 98 struct kvm_vm *vm; 99 struct kvm_vcpu *vcpu; 100 int ret; 101 102 vm = vm_create_with_one_vcpu(&vcpu, NULL); 103 104 ret = _vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, 105 (ulong)ST_GPA_BASE | KVM_STEAL_RESERVED_MASK); 106 TEST_ASSERT(ret == 0, "Bad GPA didn't fail"); 107 108 kvm_vm_free(vm); 109} 110 111#elif defined(__aarch64__) 112 113/* PV_TIME_ST must have 64-byte alignment */ 114#define STEAL_TIME_SIZE ((sizeof(struct st_time) + 63) & ~63) 115 116#define SMCCC_ARCH_FEATURES 0x80000001 117#define PV_TIME_FEATURES 0xc5000020 118#define PV_TIME_ST 0xc5000021 119 120struct st_time { 121 u32 rev; 122 u32 attr; 123 u64 st_time; 124}; 125 126static s64 smccc(u32 func, u64 arg) 127{ 128 struct arm_smccc_res res; 129 130 do_smccc(func, arg, 0, 0, 0, 0, 0, 0, &res); 131 return res.a0; 132} 133 134static void check_status(struct st_time *st) 135{ 136 GUEST_ASSERT_EQ(READ_ONCE(st->rev), 0); 137 GUEST_ASSERT_EQ(READ_ONCE(st->attr), 0); 138} 139 140static void guest_code(int cpu) 141{ 142 struct st_time *st; 143 s64 status; 144 145 status = smccc(SMCCC_ARCH_FEATURES, PV_TIME_FEATURES); 146 GUEST_ASSERT_EQ(status, 0); 147 status = smccc(PV_TIME_FEATURES, PV_TIME_FEATURES); 148 GUEST_ASSERT_EQ(status, 0); 149 status = smccc(PV_TIME_FEATURES, PV_TIME_ST); 150 GUEST_ASSERT_EQ(status, 0); 151 152 status = smccc(PV_TIME_ST, 0); 153 GUEST_ASSERT_NE(status, -1); 154 GUEST_ASSERT_EQ(status, (ulong)st_gva[cpu]); 155 156 st = (struct st_time *)status; 157 GUEST_SYNC(0); 158 159 check_status(st); 160 WRITE_ONCE(guest_stolen_time[cpu], st->st_time); 161 GUEST_SYNC(1); 162 163 check_status(st); 164 WRITE_ONCE(guest_stolen_time[cpu], st->st_time); 165 GUEST_DONE(); 166} 167 168static bool is_steal_time_supported(struct kvm_vcpu *vcpu) 169{ 170 struct kvm_device_attr dev = { 171 .group = KVM_ARM_VCPU_PVTIME_CTRL, 172 .attr = KVM_ARM_VCPU_PVTIME_IPA, 173 }; 174 175 return !__vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev); 176} 177 178static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) 179{ 180 struct kvm_vm *vm = vcpu->vm; 181 u64 st_ipa; 182 183 struct kvm_device_attr dev = { 184 .group = KVM_ARM_VCPU_PVTIME_CTRL, 185 .attr = KVM_ARM_VCPU_PVTIME_IPA, 186 .addr = (u64)&st_ipa, 187 }; 188 189 /* ST_GPA_BASE is identity mapped */ 190 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); 191 sync_global_to_guest(vm, st_gva[i]); 192 193 st_ipa = (ulong)st_gva[i]; 194 vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev); 195} 196 197static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) 198{ 199 struct st_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); 200 201 ksft_print_msg("VCPU%d:\n", vcpu_idx); 202 ksft_print_msg(" rev: %d\n", st->rev); 203 ksft_print_msg(" attr: %d\n", st->attr); 204 ksft_print_msg(" st_time: %ld\n", st->st_time); 205} 206 207static void check_steal_time_uapi(void) 208{ 209 struct kvm_vm *vm; 210 struct kvm_vcpu *vcpu; 211 u64 st_ipa; 212 int ret; 213 214 vm = vm_create_with_one_vcpu(&vcpu, NULL); 215 216 struct kvm_device_attr dev = { 217 .group = KVM_ARM_VCPU_PVTIME_CTRL, 218 .attr = KVM_ARM_VCPU_PVTIME_IPA, 219 .addr = (u64)&st_ipa, 220 }; 221 222 vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev); 223 224 st_ipa = (ulong)ST_GPA_BASE | 1; 225 ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev); 226 TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL"); 227 228 st_ipa = (ulong)ST_GPA_BASE; 229 vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev); 230 231 ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev); 232 TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST"); 233 234 kvm_vm_free(vm); 235} 236 237#elif defined(__riscv) 238 239/* SBI STA shmem must have 64-byte alignment */ 240#define STEAL_TIME_SIZE ((sizeof(struct sta_struct) + 63) & ~63) 241 242static gpa_t st_gpa[NR_VCPUS]; 243 244struct sta_struct { 245 u32 sequence; 246 u32 flags; 247 u64 steal; 248 u8 preempted; 249 u8 pad[47]; 250} __packed; 251 252static void sta_set_shmem(gpa_t gpa, unsigned long flags) 253{ 254 unsigned long lo = (unsigned long)gpa; 255#if __riscv_xlen == 32 256 unsigned long hi = (unsigned long)(gpa >> 32); 257#else 258 unsigned long hi = gpa == -1 ? -1 : 0; 259#endif 260 struct sbiret ret = sbi_ecall(SBI_EXT_STA, 0, lo, hi, flags, 0, 0, 0); 261 262 GUEST_ASSERT(ret.value == 0 && ret.error == 0); 263} 264 265static void check_status(struct sta_struct *st) 266{ 267 GUEST_ASSERT(!(READ_ONCE(st->sequence) & 1)); 268 GUEST_ASSERT(READ_ONCE(st->flags) == 0); 269 GUEST_ASSERT(READ_ONCE(st->preempted) == 0); 270} 271 272static void guest_code(int cpu) 273{ 274 struct sta_struct *st = st_gva[cpu]; 275 u32 sequence; 276 long out_val = 0; 277 bool probe; 278 279 probe = guest_sbi_probe_extension(SBI_EXT_STA, &out_val); 280 GUEST_ASSERT(probe && out_val == 1); 281 282 sta_set_shmem(st_gpa[cpu], 0); 283 GUEST_SYNC(0); 284 285 check_status(st); 286 WRITE_ONCE(guest_stolen_time[cpu], st->steal); 287 sequence = READ_ONCE(st->sequence); 288 check_status(st); 289 GUEST_SYNC(1); 290 291 check_status(st); 292 GUEST_ASSERT(sequence < READ_ONCE(st->sequence)); 293 WRITE_ONCE(guest_stolen_time[cpu], st->steal); 294 check_status(st); 295 GUEST_DONE(); 296} 297 298static bool is_steal_time_supported(struct kvm_vcpu *vcpu) 299{ 300 u64 id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA); 301 unsigned long enabled = vcpu_get_reg(vcpu, id); 302 303 TEST_ASSERT(enabled == 0 || enabled == 1, "Expected boolean result"); 304 305 return enabled; 306} 307 308static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) 309{ 310 /* ST_GPA_BASE is identity mapped */ 311 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); 312 st_gpa[i] = addr_gva2gpa(vcpu->vm, (gva_t)st_gva[i]); 313 sync_global_to_guest(vcpu->vm, st_gva[i]); 314 sync_global_to_guest(vcpu->vm, st_gpa[i]); 315} 316 317static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) 318{ 319 struct sta_struct *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); 320 int i; 321 322 pr_info("VCPU%d:\n", vcpu_idx); 323 pr_info(" sequence: %d\n", st->sequence); 324 pr_info(" flags: %d\n", st->flags); 325 pr_info(" steal: %"PRIu64"\n", st->steal); 326 pr_info(" preempted: %d\n", st->preempted); 327 pr_info(" pad: "); 328 for (i = 0; i < 47; ++i) 329 pr_info("%d", st->pad[i]); 330 pr_info("\n"); 331} 332 333static void check_steal_time_uapi(void) 334{ 335 struct kvm_vm *vm; 336 struct kvm_vcpu *vcpu; 337 struct kvm_one_reg reg; 338 u64 shmem; 339 int ret; 340 341 vm = vm_create_with_one_vcpu(&vcpu, NULL); 342 343 reg.id = KVM_REG_RISCV | 344 KVM_REG_SIZE_ULONG | 345 KVM_REG_RISCV_SBI_STATE | 346 KVM_REG_RISCV_SBI_STA | 347 KVM_REG_RISCV_SBI_STA_REG(shmem_lo); 348 reg.addr = (u64)&shmem; 349 350 shmem = ST_GPA_BASE + 1; 351 ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg); 352 TEST_ASSERT(ret == -1 && errno == EINVAL, 353 "misaligned STA shmem returns -EINVAL"); 354 355 shmem = ST_GPA_BASE; 356 ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg); 357 TEST_ASSERT(ret == 0, 358 "aligned STA shmem succeeds"); 359 360 shmem = INVALID_GPA; 361 ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg); 362 TEST_ASSERT(ret == 0, 363 "all-ones for STA shmem succeeds"); 364 365 kvm_vm_free(vm); 366} 367 368#elif defined(__loongarch__) 369 370/* steal_time must have 64-byte alignment */ 371#define STEAL_TIME_SIZE ((sizeof(struct kvm_steal_time) + 63) & ~63) 372#define KVM_STEAL_PHYS_VALID BIT_ULL(0) 373 374struct kvm_steal_time { 375 __u64 steal; 376 __u32 version; 377 __u32 flags; 378 __u8 preempted; 379 __u8 pad[47]; 380}; 381 382static void check_status(struct kvm_steal_time *st) 383{ 384 GUEST_ASSERT(!(READ_ONCE(st->version) & 1)); 385 GUEST_ASSERT_EQ(READ_ONCE(st->flags), 0); 386 GUEST_ASSERT_EQ(READ_ONCE(st->preempted), 0); 387} 388 389static void guest_code(int cpu) 390{ 391 u32 version; 392 struct kvm_steal_time *st = st_gva[cpu]; 393 394 memset(st, 0, sizeof(*st)); 395 GUEST_SYNC(0); 396 397 check_status(st); 398 WRITE_ONCE(guest_stolen_time[cpu], st->steal); 399 version = READ_ONCE(st->version); 400 check_status(st); 401 GUEST_SYNC(1); 402 403 check_status(st); 404 GUEST_ASSERT(version < READ_ONCE(st->version)); 405 WRITE_ONCE(guest_stolen_time[cpu], st->steal); 406 check_status(st); 407 GUEST_DONE(); 408} 409 410static bool is_steal_time_supported(struct kvm_vcpu *vcpu) 411{ 412 int err; 413 u64 val; 414 struct kvm_device_attr attr = { 415 .group = KVM_LOONGARCH_VCPU_CPUCFG, 416 .attr = CPUCFG_KVM_FEATURE, 417 .addr = (u64)&val, 418 }; 419 420 err = __vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &attr); 421 if (err) 422 return false; 423 424 err = __vcpu_ioctl(vcpu, KVM_GET_DEVICE_ATTR, &attr); 425 if (err) 426 return false; 427 428 return val & BIT(KVM_FEATURE_STEAL_TIME); 429} 430 431static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) 432{ 433 int err; 434 u64 st_gpa; 435 struct kvm_vm *vm = vcpu->vm; 436 struct kvm_device_attr attr = { 437 .group = KVM_LOONGARCH_VCPU_PVTIME_CTRL, 438 .attr = KVM_LOONGARCH_VCPU_PVTIME_GPA, 439 .addr = (u64)&st_gpa, 440 }; 441 442 /* ST_GPA_BASE is identity mapped */ 443 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); 444 sync_global_to_guest(vm, st_gva[i]); 445 446 err = __vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &attr); 447 TEST_ASSERT(err == 0, "No PV stealtime Feature"); 448 449 st_gpa = (unsigned long)st_gva[i] | KVM_STEAL_PHYS_VALID; 450 err = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &attr); 451 TEST_ASSERT(err == 0, "Fail to set PV stealtime GPA"); 452} 453 454static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) 455{ 456 struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); 457 458 ksft_print_msg("VCPU%d:\n", vcpu_idx); 459 ksft_print_msg(" steal: %lld\n", st->steal); 460 ksft_print_msg(" flags: %d\n", st->flags); 461 ksft_print_msg(" version: %d\n", st->version); 462 ksft_print_msg(" preempted: %d\n", st->preempted); 463} 464 465static void check_steal_time_uapi(void) 466{ 467 468} 469#endif 470 471static void *do_steal_time(void *arg) 472{ 473 struct timespec ts, stop; 474 475 clock_gettime(CLOCK_MONOTONIC, &ts); 476 stop = timespec_add_ns(ts, MIN_RUN_DELAY_NS); 477 478 while (1) { 479 clock_gettime(CLOCK_MONOTONIC, &ts); 480 if (timespec_to_ns(timespec_sub(ts, stop)) >= 0) 481 break; 482 } 483 484 return NULL; 485} 486 487static void run_vcpu(struct kvm_vcpu *vcpu) 488{ 489 struct ucall uc; 490 491 vcpu_run(vcpu); 492 493 switch (get_ucall(vcpu, &uc)) { 494 case UCALL_SYNC: 495 case UCALL_DONE: 496 break; 497 case UCALL_ABORT: 498 REPORT_GUEST_ASSERT(uc); 499 default: 500 TEST_ASSERT(false, "Unexpected exit: %s", 501 exit_reason_str(vcpu->run->exit_reason)); 502 } 503} 504 505int main(int ac, char **av) 506{ 507 struct kvm_vcpu *vcpus[NR_VCPUS]; 508 struct kvm_vm *vm; 509 pthread_attr_t attr; 510 pthread_t thread; 511 cpu_set_t cpuset; 512 unsigned int gpages; 513 long stolen_time; 514 long run_delay; 515 bool verbose; 516 int i; 517 518 verbose = ac > 1 && (!strncmp(av[1], "-v", 3) || !strncmp(av[1], "--verbose", 10)); 519 520 /* Set CPU affinity so we can force preemption of the VCPU */ 521 CPU_ZERO(&cpuset); 522 CPU_SET(0, &cpuset); 523 pthread_attr_init(&attr); 524 pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset); 525 pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset); 526 527 /* Create a VM and an identity mapped memslot for the steal time structure */ 528 vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus); 529 gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS); 530 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0); 531 virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages); 532 533 ksft_print_header(); 534 TEST_REQUIRE(is_steal_time_supported(vcpus[0])); 535 ksft_set_plan(NR_VCPUS); 536 537 check_steal_time_uapi(); 538 539 /* Run test on each VCPU */ 540 for (i = 0; i < NR_VCPUS; ++i) { 541 steal_time_init(vcpus[i], i); 542 543 vcpu_args_set(vcpus[i], 1, i); 544 545 /* First VCPU run initializes steal-time */ 546 run_vcpu(vcpus[i]); 547 548 /* Second VCPU run, expect guest stolen time to be <= run_delay */ 549 run_vcpu(vcpus[i]); 550 sync_global_from_guest(vm, guest_stolen_time[i]); 551 stolen_time = guest_stolen_time[i]; 552 run_delay = get_run_delay(); 553 TEST_ASSERT(stolen_time <= run_delay, 554 "Expected stolen time <= %ld, got %ld", 555 run_delay, stolen_time); 556 557 /* Steal time from the VCPU. The steal time thread has the same CPU affinity as the VCPUs. */ 558 run_delay = get_run_delay(); 559 pthread_create(&thread, &attr, do_steal_time, NULL); 560 do 561 sched_yield(); 562 while (get_run_delay() - run_delay < MIN_RUN_DELAY_NS); 563 pthread_join(thread, NULL); 564 run_delay = get_run_delay() - run_delay; 565 TEST_ASSERT(run_delay >= MIN_RUN_DELAY_NS, 566 "Expected run_delay >= %ld, got %ld", 567 MIN_RUN_DELAY_NS, run_delay); 568 569 /* Run VCPU again to confirm stolen time is consistent with run_delay */ 570 run_vcpu(vcpus[i]); 571 sync_global_from_guest(vm, guest_stolen_time[i]); 572 stolen_time = guest_stolen_time[i] - stolen_time; 573 TEST_ASSERT(stolen_time >= run_delay, 574 "Expected stolen time >= %ld, got %ld", 575 run_delay, stolen_time); 576 577 if (verbose) { 578 ksft_print_msg("VCPU%d: total-stolen-time=%ld test-stolen-time=%ld%s\n", 579 i, guest_stolen_time[i], stolen_time, 580 stolen_time == run_delay ? 581 " (BONUS: guest test-stolen-time even exactly matches test-run_delay)" : ""); 582 steal_time_dump(vm, i); 583 } 584 ksft_test_result_pass("vcpu%d\n", i); 585 } 586 587 /* Print results and exit() accordingly */ 588 ksft_finished(); 589}