Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

KVM: selftests: Use gva_t instead of vm_vaddr_t

Replace all occurrences of vm_vaddr_t with gva_t to align with KVM code
and with the conversion helpers (e.g. addr_gva2hva()).

This commit was generated with the following command:

git ls-files tools/testing/selftests/kvm | xargs sed -i 's/vm_vaddr_/gva_/g'

Then by manually adjusting whitespace to make checkpatch.pl happy, and
dropping renames of functions that allocate memory within a given VM.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
[sean: drop renames of allocator APIs]
Link: https://patch.msgid.link/20260420212004.3938325-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>

authored by

David Matlack and committed by
Sean Christopherson
5567fc9d 6b802031

+172 -178
+3 -3
tools/testing/selftests/kvm/arm64/vgic_irq.c
··· 731 731 struct kvm_inject_args *args) 732 732 { 733 733 struct kvm_inject_args *kvm_args_hva; 734 - vm_vaddr_t kvm_args_gva; 734 + gva_t kvm_args_gva; 735 735 736 736 kvm_args_gva = uc->args[1]; 737 737 kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva); ··· 752 752 struct kvm_vcpu *vcpu; 753 753 struct kvm_vm *vm; 754 754 struct kvm_inject_args inject_args; 755 - vm_vaddr_t args_gva; 755 + gva_t args_gva; 756 756 757 757 struct test_args args = { 758 758 .nr_irqs = nr_irqs, ··· 986 986 struct kvm_vcpu *vcpus[2]; 987 987 struct test_args args = {}; 988 988 struct kvm_vm *vm; 989 - vm_vaddr_t args_gva; 989 + gva_t args_gva; 990 990 int gic_fd, ret; 991 991 992 992 vm = vm_create_with_vcpus(2, gcode, vcpus);
+2 -2
tools/testing/selftests/kvm/include/arm64/processor.h
··· 179 179 void vm_install_sync_handler(struct kvm_vm *vm, 180 180 int vector, int ec, handler_fn handler); 181 181 182 - uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level); 183 - uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva); 182 + uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level); 183 + uint64_t *virt_get_pte_hva(struct kvm_vm *vm, gva_t gva); 184 184 185 185 static inline void cpu_relax(void) 186 186 {
+2 -2
tools/testing/selftests/kvm/include/arm64/ucall.h
··· 10 10 * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each 11 11 * VM), it must not be accessed from host code. 12 12 */ 13 - extern vm_vaddr_t *ucall_exit_mmio_addr; 13 + extern gva_t *ucall_exit_mmio_addr; 14 14 15 - static inline void ucall_arch_do_ucall(vm_vaddr_t uc) 15 + static inline void ucall_arch_do_ucall(gva_t uc) 16 16 { 17 17 WRITE_ONCE(*ucall_exit_mmio_addr, uc); 18 18 }
+15 -17
tools/testing/selftests/kvm/include/kvm_util.h
··· 112 112 struct sparsebit *vpages_mapped; 113 113 bool has_irqchip; 114 114 vm_paddr_t ucall_mmio_addr; 115 - vm_vaddr_t handlers; 115 + gva_t handlers; 116 116 uint32_t dirty_ring_size; 117 117 uint64_t gpa_tag_mask; 118 118 ··· 716 716 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); 717 717 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 718 718 void vm_populate_vaddr_bitmap(struct kvm_vm *vm); 719 - vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); 720 - vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); 721 - vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, 719 + gva_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, gva_t vaddr_min); 720 + gva_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, gva_t vaddr_min); 721 + gva_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, gva_t vaddr_min, 722 + enum kvm_mem_region_type type); 723 + gva_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t vaddr_min, 722 724 enum kvm_mem_region_type type); 723 - vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, 724 - vm_vaddr_t vaddr_min, 725 - enum kvm_mem_region_type type); 726 - vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); 727 - vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, 728 - enum kvm_mem_region_type type); 729 - vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm); 725 + gva_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); 726 + gva_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type); 727 + gva_t vm_vaddr_alloc_page(struct kvm_vm *vm); 730 728 731 729 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 732 730 unsigned int npages); 733 731 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); 734 - void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); 732 + void *addr_gva2hva(struct kvm_vm *vm, gva_t gva); 735 733 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); 736 734 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa); 737 735 ··· 1129 1131 } 1130 1132 1131 1133 #define sync_global_to_guest(vm, g) ({ \ 1132 - typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 1134 + typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \ 1133 1135 memcpy(_p, &(g), sizeof(g)); \ 1134 1136 }) 1135 1137 1136 1138 #define sync_global_from_guest(vm, g) ({ \ 1137 - typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 1139 + typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \ 1138 1140 memcpy(&(g), _p, sizeof(g)); \ 1139 1141 }) 1140 1142 ··· 1145 1147 * undesirable to change the host's copy of the global. 1146 1148 */ 1147 1149 #define write_guest_global(vm, g, val) ({ \ 1148 - typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 1150 + typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \ 1149 1151 typeof(g) _val = val; \ 1150 1152 \ 1151 1153 memcpy(_p, &(_val), sizeof(g)); \ ··· 1240 1242 * Returns the VM physical address of the translated VM virtual 1241 1243 * address given by @gva. 1242 1244 */ 1243 - vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva); 1245 + vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva); 1244 1246 1245 - static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) 1247 + static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, gva_t gva) 1246 1248 { 1247 1249 return addr_arch_gva2gpa(vm, gva); 1248 1250 }
+1 -1
tools/testing/selftests/kvm/include/kvm_util_types.h
··· 15 15 #define kvm_static_assert(expr, ...) __kvm_static_assert(expr, ##__VA_ARGS__, #expr) 16 16 17 17 typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */ 18 - typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */ 18 + typedef uint64_t gva_t; /* Virtual Machine (Guest) virtual address */ 19 19 20 20 #define INVALID_GPA (~(uint64_t)0) 21 21
+2 -2
tools/testing/selftests/kvm/include/loongarch/ucall.h
··· 10 10 * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each 11 11 * VM), it must not be accessed from host code. 12 12 */ 13 - extern vm_vaddr_t *ucall_exit_mmio_addr; 13 + extern gva_t *ucall_exit_mmio_addr; 14 14 15 - static inline void ucall_arch_do_ucall(vm_vaddr_t uc) 15 + static inline void ucall_arch_do_ucall(gva_t uc) 16 16 { 17 17 WRITE_ONCE(*ucall_exit_mmio_addr, uc); 18 18 }
+1 -1
tools/testing/selftests/kvm/include/riscv/ucall.h
··· 11 11 { 12 12 } 13 13 14 - static inline void ucall_arch_do_ucall(vm_vaddr_t uc) 14 + static inline void ucall_arch_do_ucall(gva_t uc) 15 15 { 16 16 sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT, 17 17 KVM_RISCV_SELFTESTS_SBI_UCALL,
+1 -1
tools/testing/selftests/kvm/include/s390/ucall.h
··· 10 10 { 11 11 } 12 12 13 - static inline void ucall_arch_do_ucall(vm_vaddr_t uc) 13 + static inline void ucall_arch_do_ucall(gva_t uc) 14 14 { 15 15 /* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */ 16 16 asm volatile ("diag 0,%0,0x501" : : "a"(uc) : "memory");
+2 -2
tools/testing/selftests/kvm/include/ucall_common.h
··· 30 30 }; 31 31 32 32 void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa); 33 - void ucall_arch_do_ucall(vm_vaddr_t uc); 33 + void ucall_arch_do_ucall(gva_t uc); 34 34 void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu); 35 35 36 36 void ucall(uint64_t cmd, int nargs, ...); ··· 48 48 * the full ucall() are problematic and/or unwanted. Note, this will come out 49 49 * as UCALL_NONE on the backend. 50 50 */ 51 - #define GUEST_UCALL_NONE() ucall_arch_do_ucall((vm_vaddr_t)NULL) 51 + #define GUEST_UCALL_NONE() ucall_arch_do_ucall((gva_t)NULL) 52 52 53 53 #define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \ 54 54 ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
+5 -5
tools/testing/selftests/kvm/include/x86/hyperv.h
··· 254 254 * Issue a Hyper-V hypercall. Returns exception vector raised or 0, 'hv_status' 255 255 * is set to the hypercall status (if no exception occurred). 256 256 */ 257 - static inline uint8_t __hyperv_hypercall(u64 control, vm_vaddr_t input_address, 258 - vm_vaddr_t output_address, 257 + static inline uint8_t __hyperv_hypercall(u64 control, gva_t input_address, 258 + gva_t output_address, 259 259 uint64_t *hv_status) 260 260 { 261 261 uint64_t error_code; ··· 274 274 } 275 275 276 276 /* Issue a Hyper-V hypercall and assert that it succeeded. */ 277 - static inline void hyperv_hypercall(u64 control, vm_vaddr_t input_address, 278 - vm_vaddr_t output_address) 277 + static inline void hyperv_hypercall(u64 control, gva_t input_address, 278 + gva_t output_address) 279 279 { 280 280 uint64_t hv_status; 281 281 uint8_t vector; ··· 347 347 }; 348 348 349 349 struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm, 350 - vm_vaddr_t *p_hv_pages_gva); 350 + gva_t *p_hv_pages_gva); 351 351 352 352 /* HV_X64_MSR_TSC_INVARIANT_CONTROL bits */ 353 353 #define HV_INVARIANT_TSC_EXPOSED BIT_ULL(0)
+3 -3
tools/testing/selftests/kvm/include/x86/kvm_util_arch.h
··· 33 33 struct kvm_mmu; 34 34 35 35 struct kvm_vm_arch { 36 - vm_vaddr_t gdt; 37 - vm_vaddr_t tss; 38 - vm_vaddr_t idt; 36 + gva_t gdt; 37 + gva_t tss; 38 + gva_t idt; 39 39 40 40 uint64_t c_bit; 41 41 uint64_t s_bit;
+1 -1
tools/testing/selftests/kvm/include/x86/svm_util.h
··· 56 56 "clgi\n" \ 57 57 ) 58 58 59 - struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva); 59 + struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, gva_t *p_svm_gva); 60 60 void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp); 61 61 void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa); 62 62
+1 -1
tools/testing/selftests/kvm/include/x86/vmx.h
··· 550 550 }; 551 551 }; 552 552 553 - struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva); 553 + struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, gva_t *p_vmx_gva); 554 554 bool prepare_for_vmx_operation(struct vmx_pages *vmx); 555 555 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); 556 556 bool load_vmcs(struct vmx_pages *vmx);
+1 -1
tools/testing/selftests/kvm/kvm_page_table_test.c
··· 292 292 ret = sem_init(&test_stage_completed, 0, 0); 293 293 TEST_ASSERT(ret == 0, "Error in sem_init"); 294 294 295 - current_stage = addr_gva2hva(vm, (vm_vaddr_t)(&guest_test_stage)); 295 + current_stage = addr_gva2hva(vm, (gva_t)(&guest_test_stage)); 296 296 *current_stage = NUM_TEST_STAGES; 297 297 298 298 pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
+9 -9
tools/testing/selftests/kvm/lib/arm64/processor.c
··· 19 19 20 20 #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000 21 21 22 - static vm_vaddr_t exception_handlers; 22 + static gva_t exception_handlers; 23 23 24 - static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) 24 + static uint64_t pgd_index(struct kvm_vm *vm, gva_t gva) 25 25 { 26 26 unsigned int shift = (vm->mmu.pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; 27 27 uint64_t mask = (1UL << (vm->va_bits - shift)) - 1; ··· 29 29 return (gva >> shift) & mask; 30 30 } 31 31 32 - static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) 32 + static uint64_t pud_index(struct kvm_vm *vm, gva_t gva) 33 33 { 34 34 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift; 35 35 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; ··· 40 40 return (gva >> shift) & mask; 41 41 } 42 42 43 - static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva) 43 + static uint64_t pmd_index(struct kvm_vm *vm, gva_t gva) 44 44 { 45 45 unsigned int shift = (vm->page_shift - 3) + vm->page_shift; 46 46 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; ··· 51 51 return (gva >> shift) & mask; 52 52 } 53 53 54 - static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva) 54 + static uint64_t pte_index(struct kvm_vm *vm, gva_t gva) 55 55 { 56 56 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; 57 57 return (gva >> vm->page_shift) & mask; ··· 181 181 _virt_pg_map(vm, vaddr, paddr, attr_idx); 182 182 } 183 183 184 - uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level) 184 + uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level) 185 185 { 186 186 uint64_t *ptep; 187 187 ··· 225 225 exit(EXIT_FAILURE); 226 226 } 227 227 228 - uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva) 228 + uint64_t *virt_get_pte_hva(struct kvm_vm *vm, gva_t gva) 229 229 { 230 230 return virt_get_pte_hva_at_level(vm, gva, 3); 231 231 } 232 232 233 - vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) 233 + vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 234 234 { 235 235 uint64_t *ptep = virt_get_pte_hva(vm, gva); 236 236 ··· 539 539 vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers), 540 540 vm->page_size, MEM_REGION_DATA); 541 541 542 - *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; 542 + *(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers; 543 543 } 544 544 545 545 void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec,
+3 -3
tools/testing/selftests/kvm/lib/arm64/ucall.c
··· 6 6 */ 7 7 #include "kvm_util.h" 8 8 9 - vm_vaddr_t *ucall_exit_mmio_addr; 9 + gva_t *ucall_exit_mmio_addr; 10 10 11 11 void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 12 12 { 13 - vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); 13 + gva_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); 14 14 15 15 virt_map(vm, mmio_gva, mmio_gpa, 1); 16 16 17 17 vm->ucall_mmio_addr = mmio_gpa; 18 18 19 - write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva); 19 + write_guest_global(vm, ucall_exit_mmio_addr, (gva_t *)mmio_gva); 20 20 } 21 21 22 22 void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
+3 -3
tools/testing/selftests/kvm/lib/elf.c
··· 157 157 "memsize of 0,\n" 158 158 " phdr index: %u p_memsz: 0x%" PRIx64, 159 159 n1, (uint64_t) phdr.p_memsz); 160 - vm_vaddr_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size); 161 - vm_vaddr_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1; 160 + gva_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size); 161 + gva_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1; 162 162 seg_vend |= vm->page_size - 1; 163 163 size_t seg_size = seg_vend - seg_vstart + 1; 164 164 165 - vm_vaddr_t vaddr = __vm_vaddr_alloc(vm, seg_size, seg_vstart, 165 + gva_t vaddr = __vm_vaddr_alloc(vm, seg_size, seg_vstart, 166 166 MEM_REGION_CODE); 167 167 TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate " 168 168 "virtual memory for segment at requested min addr,\n"
+14 -18
tools/testing/selftests/kvm/lib/kvm_util.c
··· 1386 1386 * TEST_ASSERT failure occurs for invalid input or no area of at least 1387 1387 * sz unallocated bytes >= vaddr_min is available. 1388 1388 */ 1389 - vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, 1390 - vm_vaddr_t vaddr_min) 1389 + gva_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, gva_t vaddr_min) 1391 1390 { 1392 1391 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; 1393 1392 ··· 1451 1452 return pgidx_start * vm->page_size; 1452 1453 } 1453 1454 1454 - static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, 1455 - vm_vaddr_t vaddr_min, 1456 - enum kvm_mem_region_type type, 1457 - bool protected) 1455 + static gva_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, gva_t vaddr_min, 1456 + enum kvm_mem_region_type type, bool protected) 1458 1457 { 1459 1458 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); 1460 1459 ··· 1465 1468 * Find an unused range of virtual page addresses of at least 1466 1469 * pages in length. 1467 1470 */ 1468 - vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); 1471 + gva_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); 1469 1472 1470 1473 /* Map the virtual pages. */ 1471 - for (vm_vaddr_t vaddr = vaddr_start; pages > 0; 1474 + for (gva_t vaddr = vaddr_start; pages > 0; 1472 1475 pages--, vaddr += vm->page_size, paddr += vm->page_size) { 1473 1476 1474 1477 virt_pg_map(vm, vaddr, paddr); ··· 1477 1480 return vaddr_start; 1478 1481 } 1479 1482 1480 - vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, 1481 - enum kvm_mem_region_type type) 1483 + gva_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, gva_t vaddr_min, 1484 + enum kvm_mem_region_type type) 1482 1485 { 1483 1486 return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, 1484 1487 vm_arch_has_protected_memory(vm)); 1485 1488 } 1486 1489 1487 - vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, 1488 - vm_vaddr_t vaddr_min, 1489 - enum kvm_mem_region_type type) 1490 + gva_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t vaddr_min, 1491 + enum kvm_mem_region_type type) 1490 1492 { 1491 1493 return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, false); 1492 1494 } ··· 1509 1513 * a unique set of pages, with the minimum real allocation being at least 1510 1514 * a page. The allocated physical space comes from the TEST_DATA memory region. 1511 1515 */ 1512 - vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min) 1516 + gva_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, gva_t vaddr_min) 1513 1517 { 1514 1518 return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA); 1515 1519 } ··· 1528 1532 * Allocates at least N system pages worth of bytes within the virtual address 1529 1533 * space of the vm. 1530 1534 */ 1531 - vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages) 1535 + gva_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages) 1532 1536 { 1533 1537 return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR); 1534 1538 } 1535 1539 1536 - vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type) 1540 + gva_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type) 1537 1541 { 1538 1542 return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type); 1539 1543 } ··· 1552 1556 * Allocates at least one system page worth of bytes within the virtual address 1553 1557 * space of the vm. 1554 1558 */ 1555 - vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm) 1559 + gva_t vm_vaddr_alloc_page(struct kvm_vm *vm) 1556 1560 { 1557 1561 return vm_vaddr_alloc_pages(vm, 1); 1558 1562 } ··· 2157 2161 * Return: 2158 2162 * Equivalent host virtual address 2159 2163 */ 2160 - void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) 2164 + void *addr_gva2hva(struct kvm_vm *vm, gva_t gva) 2161 2165 { 2162 2166 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); 2163 2167 }
+4 -4
tools/testing/selftests/kvm/lib/loongarch/processor.c
··· 13 13 #define LOONGARCH_GUEST_STACK_VADDR_MIN 0x200000 14 14 15 15 static vm_paddr_t invalid_pgtable[4]; 16 - static vm_vaddr_t exception_handlers; 16 + static gva_t exception_handlers; 17 17 18 - static uint64_t virt_pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) 18 + static uint64_t virt_pte_index(struct kvm_vm *vm, gva_t gva, int level) 19 19 { 20 20 unsigned int shift; 21 21 uint64_t mask; ··· 72 72 return *ptep == invalid_pgtable[level]; 73 73 } 74 74 75 - static uint64_t *virt_populate_pte(struct kvm_vm *vm, vm_vaddr_t gva, int alloc) 75 + static uint64_t *virt_populate_pte(struct kvm_vm *vm, gva_t gva, int alloc) 76 76 { 77 77 int level; 78 78 uint64_t *ptep; ··· 106 106 exit(EXIT_FAILURE); 107 107 } 108 108 109 - vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) 109 + vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 110 110 { 111 111 uint64_t *ptep; 112 112
+3 -3
tools/testing/selftests/kvm/lib/loongarch/ucall.c
··· 9 9 * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each 10 10 * VM), it must not be accessed from host code. 11 11 */ 12 - vm_vaddr_t *ucall_exit_mmio_addr; 12 + gva_t *ucall_exit_mmio_addr; 13 13 14 14 void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 15 15 { 16 - vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); 16 + gva_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); 17 17 18 18 virt_map(vm, mmio_gva, mmio_gpa, 1); 19 19 20 20 vm->ucall_mmio_addr = mmio_gpa; 21 21 22 - write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva); 22 + write_guest_global(vm, ucall_exit_mmio_addr, (gva_t *)mmio_gva); 23 23 } 24 24 25 25 void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
+4 -4
tools/testing/selftests/kvm/lib/riscv/processor.c
··· 15 15 16 16 #define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN 0xac0000 17 17 18 - static vm_vaddr_t exception_handlers; 18 + static gva_t exception_handlers; 19 19 20 20 bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext) 21 21 { ··· 52 52 PGTBL_L3_INDEX_SHIFT, 53 53 }; 54 54 55 - static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) 55 + static uint64_t pte_index(struct kvm_vm *vm, gva_t gva, int level) 56 56 { 57 57 TEST_ASSERT(level > -1, 58 58 "Negative page table level (%d) not possible", level); ··· 119 119 PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK; 120 120 } 121 121 122 - vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) 122 + vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 123 123 { 124 124 uint64_t *ptep; 125 125 int level = vm->mmu.pgtable_levels - 1; ··· 452 452 vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers), 453 453 vm->page_size, MEM_REGION_DATA); 454 454 455 - *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; 455 + *(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers; 456 456 } 457 457 458 458 void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler)
+1 -1
tools/testing/selftests/kvm/lib/s390/processor.c
··· 86 86 entry[idx] = gpa; 87 87 } 88 88 89 - vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) 89 + vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 90 90 { 91 91 int ri, idx; 92 92 uint64_t *entry;
+4 -4
tools/testing/selftests/kvm/lib/ucall_common.c
··· 29 29 { 30 30 struct ucall_header *hdr; 31 31 struct ucall *uc; 32 - vm_vaddr_t vaddr; 32 + gva_t vaddr; 33 33 int i; 34 34 35 35 vaddr = vm_vaddr_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR, ··· 96 96 guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va); 97 97 va_end(va); 98 98 99 - ucall_arch_do_ucall((vm_vaddr_t)uc->hva); 99 + ucall_arch_do_ucall((gva_t)uc->hva); 100 100 101 101 ucall_free(uc); 102 102 } ··· 113 113 guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va); 114 114 va_end(va); 115 115 116 - ucall_arch_do_ucall((vm_vaddr_t)uc->hva); 116 + ucall_arch_do_ucall((gva_t)uc->hva); 117 117 118 118 ucall_free(uc); 119 119 } ··· 135 135 WRITE_ONCE(uc->args[i], va_arg(va, uint64_t)); 136 136 va_end(va); 137 137 138 - ucall_arch_do_ucall((vm_vaddr_t)uc->hva); 138 + ucall_arch_do_ucall((gva_t)uc->hva); 139 139 140 140 ucall_free(uc); 141 141 }
+2 -2
tools/testing/selftests/kvm/lib/x86/hyperv.c
··· 76 76 } 77 77 78 78 struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm, 79 - vm_vaddr_t *p_hv_pages_gva) 79 + gva_t *p_hv_pages_gva) 80 80 { 81 - vm_vaddr_t hv_pages_gva = vm_vaddr_alloc_page(vm); 81 + gva_t hv_pages_gva = vm_vaddr_alloc_page(vm); 82 82 struct hyperv_test_pages *hv = addr_gva2hva(vm, hv_pages_gva); 83 83 84 84 /* Setup of a region of guest memory for the VP Assist page. */
+1 -1
tools/testing/selftests/kvm/lib/x86/memstress.c
··· 104 104 void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]) 105 105 { 106 106 struct kvm_regs regs; 107 - vm_vaddr_t nested_gva; 107 + gva_t nested_gva; 108 108 int vcpu_id; 109 109 110 110 TEST_REQUIRE(kvm_cpu_has_tdp());
+7 -7
tools/testing/selftests/kvm/lib/x86/processor.c
··· 21 21 #define KERNEL_DS 0x10 22 22 #define KERNEL_TSS 0x18 23 23 24 - vm_vaddr_t exception_handlers; 24 + gva_t exception_handlers; 25 25 bool host_cpu_is_amd; 26 26 bool host_cpu_is_intel; 27 27 bool host_cpu_is_hygon; ··· 618 618 segp->present = true; 619 619 } 620 620 621 - vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) 621 + vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 622 622 { 623 623 int level = PG_LEVEL_NONE; 624 624 uint64_t *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level); ··· 633 633 return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level)); 634 634 } 635 635 636 - static void kvm_seg_set_tss_64bit(vm_vaddr_t base, struct kvm_segment *segp) 636 + static void kvm_seg_set_tss_64bit(gva_t base, struct kvm_segment *segp) 637 637 { 638 638 memset(segp, 0, sizeof(*segp)); 639 639 segp->base = base; ··· 755 755 for (i = 0; i < NUM_INTERRUPTS; i++) 756 756 set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, KERNEL_CS); 757 757 758 - *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; 758 + *(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers; 759 759 760 760 kvm_seg_set_kernel_code_64bit(&seg); 761 761 kvm_seg_fill_gdt_64bit(vm, &seg); ··· 770 770 void vm_install_exception_handler(struct kvm_vm *vm, int vector, 771 771 void (*handler)(struct ex_regs *)) 772 772 { 773 - vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers); 773 + gva_t *handlers = (gva_t *)addr_gva2hva(vm, vm->handlers); 774 774 775 - handlers[vector] = (vm_vaddr_t)handler; 775 + handlers[vector] = (gva_t)handler; 776 776 } 777 777 778 778 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) ··· 825 825 { 826 826 struct kvm_mp_state mp_state; 827 827 struct kvm_regs regs; 828 - vm_vaddr_t stack_vaddr; 828 + gva_t stack_vaddr; 829 829 struct kvm_vcpu *vcpu; 830 830 831 831 stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
+2 -2
tools/testing/selftests/kvm/lib/x86/svm.c
··· 28 28 * Pointer to structure with the addresses of the SVM areas. 29 29 */ 30 30 struct svm_test_data * 31 - vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva) 31 + vcpu_alloc_svm(struct kvm_vm *vm, gva_t *p_svm_gva) 32 32 { 33 - vm_vaddr_t svm_gva = vm_vaddr_alloc_page(vm); 33 + gva_t svm_gva = vm_vaddr_alloc_page(vm); 34 34 struct svm_test_data *svm = addr_gva2hva(vm, svm_gva); 35 35 36 36 svm->vmcb = (void *)vm_vaddr_alloc_page(vm);
+1 -1
tools/testing/selftests/kvm/lib/x86/ucall.c
··· 8 8 9 9 #define UCALL_PIO_PORT ((uint16_t)0x1000) 10 10 11 - void ucall_arch_do_ucall(vm_vaddr_t uc) 11 + void ucall_arch_do_ucall(gva_t uc) 12 12 { 13 13 /* 14 14 * FIXME: Revert this hack (the entire commit that added it) once nVMX
+2 -2
tools/testing/selftests/kvm/lib/x86/vmx.c
··· 79 79 * Pointer to structure with the addresses of the VMX areas. 80 80 */ 81 81 struct vmx_pages * 82 - vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) 82 + vcpu_alloc_vmx(struct kvm_vm *vm, gva_t *p_vmx_gva) 83 83 { 84 - vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm); 84 + gva_t vmx_gva = vm_vaddr_alloc_page(vm); 85 85 struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); 86 86 87 87 /* Setup of a region of guest memory for the vmxon region. */
+1 -1
tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
··· 610 610 virt_map(vm, PMU_SNAPSHOT_GPA_BASE, PMU_SNAPSHOT_GPA_BASE, 1); 611 611 612 612 snapshot_gva = (void *)(PMU_SNAPSHOT_GPA_BASE); 613 - snapshot_gpa = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)snapshot_gva); 613 + snapshot_gpa = addr_gva2gpa(vcpu->vm, (gva_t)snapshot_gva); 614 614 sync_global_to_guest(vcpu->vm, snapshot_gva); 615 615 sync_global_to_guest(vcpu->vm, snapshot_gpa); 616 616 }
+3 -3
tools/testing/selftests/kvm/s390/memop.c
··· 878 878 static void test_copy_key_fetch_prot_override(void) 879 879 { 880 880 struct test_default t = test_default_init(guest_copy_key_fetch_prot_override); 881 - vm_vaddr_t guest_0_page, guest_last_page; 881 + gva_t guest_0_page, guest_last_page; 882 882 883 883 guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0); 884 884 guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); ··· 917 917 static void test_errors_key_fetch_prot_override_not_enabled(void) 918 918 { 919 919 struct test_default t = test_default_init(guest_copy_key_fetch_prot_override); 920 - vm_vaddr_t guest_0_page, guest_last_page; 920 + gva_t guest_0_page, guest_last_page; 921 921 922 922 guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0); 923 923 guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); ··· 938 938 static void test_errors_key_fetch_prot_override_enabled(void) 939 939 { 940 940 struct test_default t = test_default_init(guest_copy_key_fetch_prot_override); 941 - vm_vaddr_t guest_0_page, guest_last_page; 941 + gva_t guest_0_page, guest_last_page; 942 942 943 943 guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0); 944 944 guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
+3 -3
tools/testing/selftests/kvm/s390/tprot.c
··· 207 207 struct kvm_vcpu *vcpu; 208 208 struct kvm_vm *vm; 209 209 struct kvm_run *run; 210 - vm_vaddr_t guest_0_page; 210 + gva_t guest_0_page; 211 211 212 212 ksft_print_header(); 213 213 ksft_set_plan(STAGE_END); ··· 216 216 run = vcpu->run; 217 217 218 218 HOST_SYNC(vcpu, STAGE_INIT_SIMPLE); 219 - mprotect(addr_gva2hva(vm, (vm_vaddr_t)pages), PAGE_SIZE * 2, PROT_READ); 219 + mprotect(addr_gva2hva(vm, (gva_t)pages), PAGE_SIZE * 2, PROT_READ); 220 220 HOST_SYNC(vcpu, TEST_SIMPLE); 221 221 222 222 guest_0_page = vm_vaddr_alloc(vm, PAGE_SIZE, 0); ··· 229 229 HOST_SYNC(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE); 230 230 } 231 231 if (guest_0_page == 0) 232 - mprotect(addr_gva2hva(vm, (vm_vaddr_t)0), PAGE_SIZE, PROT_READ); 232 + mprotect(addr_gva2hva(vm, (gva_t)0), PAGE_SIZE, PROT_READ); 233 233 run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE; 234 234 run->kvm_dirty_regs = KVM_SYNC_CRS; 235 235 HOST_SYNC(vcpu, TEST_FETCH_PROT_OVERRIDE);
+1 -1
tools/testing/selftests/kvm/steal_time.c
··· 309 309 { 310 310 /* ST_GPA_BASE is identity mapped */ 311 311 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); 312 - st_gpa[i] = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)st_gva[i]); 312 + st_gpa[i] = addr_gva2gpa(vcpu->vm, (gva_t)st_gva[i]); 313 313 sync_global_to_guest(vcpu->vm, st_gva[i]); 314 314 sync_global_to_guest(vcpu->vm, st_gpa[i]); 315 315 }
+1 -1
tools/testing/selftests/kvm/x86/amx_test.c
··· 236 236 struct kvm_x86_state *state; 237 237 struct kvm_x86_state *tile_state = NULL; 238 238 int xsave_restore_size; 239 - vm_vaddr_t amx_cfg, tiledata, xstate; 239 + gva_t amx_cfg, tiledata, xstate; 240 240 struct ucall uc; 241 241 int ret; 242 242
+1 -1
tools/testing/selftests/kvm/x86/aperfmperf_test.c
··· 123 123 { 124 124 const bool has_nested = kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX); 125 125 uint64_t host_aperf_before, host_mperf_before; 126 - vm_vaddr_t nested_test_data_gva; 126 + gva_t nested_test_data_gva; 127 127 struct kvm_vcpu *vcpu; 128 128 struct kvm_vm *vm; 129 129 int msr_fd, cpu, i;
+3 -3
tools/testing/selftests/kvm/x86/cpuid_test.c
··· 140 140 } 141 141 } 142 142 143 - struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct kvm_cpuid2 *cpuid) 143 + struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, gva_t *p_gva, struct kvm_cpuid2 *cpuid) 144 144 { 145 145 int size = sizeof(*cpuid) + cpuid->nent * sizeof(cpuid->entries[0]); 146 - vm_vaddr_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR); 146 + gva_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR); 147 147 struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva); 148 148 149 149 memcpy(guest_cpuids, cpuid, size); ··· 217 217 int main(void) 218 218 { 219 219 struct kvm_vcpu *vcpu; 220 - vm_vaddr_t cpuid_gva; 220 + gva_t cpuid_gva; 221 221 struct kvm_vm *vm; 222 222 int stage; 223 223
+1 -1
tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c
··· 73 73 74 74 int main(int argc, char *argv[]) 75 75 { 76 - vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0; 76 + gva_t vmx_pages_gva = 0, hv_pages_gva = 0; 77 77 struct hyperv_test_pages *hv; 78 78 struct hv_enlightened_vmcs *evmcs; 79 79 struct kvm_vcpu *vcpu;
+1 -1
tools/testing/selftests/kvm/x86/hyperv_clock.c
··· 208 208 struct kvm_vcpu *vcpu; 209 209 struct kvm_vm *vm; 210 210 struct ucall uc; 211 - vm_vaddr_t tsc_page_gva; 211 + gva_t tsc_page_gva; 212 212 int stage; 213 213 214 214 TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_TIME));
+3 -3
tools/testing/selftests/kvm/x86/hyperv_evmcs.c
··· 76 76 } 77 77 78 78 void guest_code(struct vmx_pages *vmx_pages, struct hyperv_test_pages *hv_pages, 79 - vm_vaddr_t hv_hcall_page_gpa) 79 + gva_t hv_hcall_page_gpa) 80 80 { 81 81 #define L2_GUEST_STACK_SIZE 64 82 82 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; ··· 231 231 232 232 int main(int argc, char *argv[]) 233 233 { 234 - vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0; 235 - vm_vaddr_t hcall_page; 234 + gva_t vmx_pages_gva = 0, hv_pages_gva = 0; 235 + gva_t hcall_page; 236 236 237 237 struct kvm_vcpu *vcpu; 238 238 struct kvm_vm *vm;
+3 -3
tools/testing/selftests/kvm/x86/hyperv_extended_hypercalls.c
··· 16 16 #define EXT_CAPABILITIES 0xbull 17 17 18 18 static void guest_code(vm_paddr_t in_pg_gpa, vm_paddr_t out_pg_gpa, 19 - vm_vaddr_t out_pg_gva) 19 + gva_t out_pg_gva) 20 20 { 21 21 uint64_t *output_gva; 22 22 ··· 35 35 36 36 int main(void) 37 37 { 38 - vm_vaddr_t hcall_out_page; 39 - vm_vaddr_t hcall_in_page; 38 + gva_t hcall_out_page; 39 + gva_t hcall_in_page; 40 40 struct kvm_vcpu *vcpu; 41 41 struct kvm_run *run; 42 42 struct kvm_vm *vm;
+3 -3
tools/testing/selftests/kvm/x86/hyperv_features.c
··· 82 82 GUEST_DONE(); 83 83 } 84 84 85 - static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall) 85 + static void guest_hcall(gva_t pgs_gpa, struct hcall_data *hcall) 86 86 { 87 87 u64 res, input, output; 88 88 uint8_t vector; ··· 134 134 struct kvm_vm *vm; 135 135 struct ucall uc; 136 136 int stage = 0; 137 - vm_vaddr_t msr_gva; 137 + gva_t msr_gva; 138 138 struct msr_data *msr; 139 139 bool has_invtsc = kvm_cpu_has(X86_FEATURE_INVTSC); 140 140 ··· 523 523 struct kvm_vm *vm; 524 524 struct ucall uc; 525 525 int stage = 0; 526 - vm_vaddr_t hcall_page, hcall_params; 526 + gva_t hcall_page, hcall_params; 527 527 struct hcall_data *hcall; 528 528 529 529 while (true) {
+4 -4
tools/testing/selftests/kvm/x86/hyperv_ipi.c
··· 45 45 struct hv_vpset vp_set; 46 46 }; 47 47 48 - static inline void hv_init(vm_vaddr_t pgs_gpa) 48 + static inline void hv_init(gva_t pgs_gpa) 49 49 { 50 50 wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); 51 51 wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa); 52 52 } 53 53 54 - static void receiver_code(void *hcall_page, vm_vaddr_t pgs_gpa) 54 + static void receiver_code(void *hcall_page, gva_t pgs_gpa) 55 55 { 56 56 u32 vcpu_id; 57 57 ··· 85 85 asm volatile("nop"); 86 86 } 87 87 88 - static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa) 88 + static void sender_guest_code(void *hcall_page, gva_t pgs_gpa) 89 89 { 90 90 struct hv_send_ipi *ipi = (struct hv_send_ipi *)hcall_page; 91 91 struct hv_send_ipi_ex *ipi_ex = (struct hv_send_ipi_ex *)hcall_page; ··· 243 243 { 244 244 struct kvm_vm *vm; 245 245 struct kvm_vcpu *vcpu[3]; 246 - vm_vaddr_t hcall_page; 246 + gva_t hcall_page; 247 247 pthread_t threads[2]; 248 248 int stage = 1, r; 249 249 struct ucall uc;
+3 -3
tools/testing/selftests/kvm/x86/hyperv_svm_test.c
··· 67 67 68 68 static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm, 69 69 struct hyperv_test_pages *hv_pages, 70 - vm_vaddr_t pgs_gpa) 70 + gva_t pgs_gpa) 71 71 { 72 72 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 73 73 struct vmcb *vmcb = svm->vmcb; ··· 149 149 150 150 int main(int argc, char *argv[]) 151 151 { 152 - vm_vaddr_t nested_gva = 0, hv_pages_gva = 0; 153 - vm_vaddr_t hcall_page; 152 + gva_t nested_gva = 0, hv_pages_gva = 0; 153 + gva_t hcall_page; 154 154 struct kvm_vcpu *vcpu; 155 155 struct kvm_vm *vm; 156 156 struct ucall uc;
+6 -6
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
··· 61 61 * - GVAs of the test pages' PTEs 62 62 */ 63 63 struct test_data { 64 - vm_vaddr_t hcall_gva; 64 + gva_t hcall_gva; 65 65 vm_paddr_t hcall_gpa; 66 - vm_vaddr_t test_pages; 67 - vm_vaddr_t test_pages_pte[NTEST_PAGES]; 66 + gva_t test_pages; 67 + gva_t test_pages_pte[NTEST_PAGES]; 68 68 }; 69 69 70 70 /* 'Worker' vCPU code checking the contents of the test page */ 71 - static void worker_guest_code(vm_vaddr_t test_data) 71 + static void worker_guest_code(gva_t test_data) 72 72 { 73 73 struct test_data *data = (struct test_data *)test_data; 74 74 u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX); ··· 196 196 #define TESTVAL2 0x0202020202020202 197 197 198 198 /* Main vCPU doing the test */ 199 - static void sender_guest_code(vm_vaddr_t test_data) 199 + static void sender_guest_code(gva_t test_data) 200 200 { 201 201 struct test_data *data = (struct test_data *)test_data; 202 202 struct hv_tlb_flush *flush = (struct hv_tlb_flush *)data->hcall_gva; ··· 581 581 struct kvm_vm *vm; 582 582 struct kvm_vcpu *vcpu[3]; 583 583 pthread_t threads[2]; 584 - vm_vaddr_t test_data_page, gva; 584 + gva_t test_data_page, gva; 585 585 vm_paddr_t gpa; 586 586 uint64_t *pte; 587 587 struct test_data *data;
+1 -1
tools/testing/selftests/kvm/x86/kvm_buslock_test.c
··· 73 73 int main(int argc, char *argv[]) 74 74 { 75 75 const bool has_nested = kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX); 76 - vm_vaddr_t nested_test_data_gva; 76 + gva_t nested_test_data_gva; 77 77 struct kvm_vcpu *vcpu; 78 78 struct kvm_run *run; 79 79 struct kvm_vm *vm;
+1 -1
tools/testing/selftests/kvm/x86/kvm_clock_test.c
··· 135 135 int main(void) 136 136 { 137 137 struct kvm_vcpu *vcpu; 138 - vm_vaddr_t pvti_gva; 138 + gva_t pvti_gva; 139 139 vm_paddr_t pvti_gpa; 140 140 struct kvm_vm *vm; 141 141 int flags;
+1 -1
tools/testing/selftests/kvm/x86/nested_close_kvm_test.c
··· 67 67 68 68 int main(int argc, char *argv[]) 69 69 { 70 - vm_vaddr_t guest_gva; 70 + gva_t guest_gva; 71 71 struct kvm_vcpu *vcpu; 72 72 struct kvm_vm *vm; 73 73
+5 -5
tools/testing/selftests/kvm/x86/nested_dirty_log_test.c
··· 47 47 #define TEST_SYNC_WRITE_FAULT BIT(1) 48 48 #define TEST_SYNC_NO_FAULT BIT(2) 49 49 50 - static void l2_guest_code(vm_vaddr_t base) 50 + static void l2_guest_code(gva_t base) 51 51 { 52 - vm_vaddr_t page0 = TEST_GUEST_ADDR(base, 0); 53 - vm_vaddr_t page1 = TEST_GUEST_ADDR(base, 1); 52 + gva_t page0 = TEST_GUEST_ADDR(base, 0); 53 + gva_t page1 = TEST_GUEST_ADDR(base, 1); 54 54 55 55 READ_ONCE(*(u64 *)page0); 56 56 GUEST_SYNC(page0 | TEST_SYNC_READ_FAULT); ··· 143 143 static void test_handle_ucall_sync(struct kvm_vm *vm, u64 arg, 144 144 unsigned long *bmap) 145 145 { 146 - vm_vaddr_t gva = arg & ~(PAGE_SIZE - 1); 146 + gva_t gva = arg & ~(PAGE_SIZE - 1); 147 147 int page_nr, i; 148 148 149 149 /* ··· 198 198 199 199 static void test_dirty_log(bool nested_tdp) 200 200 { 201 - vm_vaddr_t nested_gva = 0; 201 + gva_t nested_gva = 0; 202 202 unsigned long *bmap; 203 203 struct kvm_vcpu *vcpu; 204 204 struct kvm_vm *vm;
+1 -1
tools/testing/selftests/kvm/x86/nested_emulation_test.c
··· 122 122 123 123 int main(int argc, char *argv[]) 124 124 { 125 - vm_vaddr_t nested_test_data_gva; 125 + gva_t nested_test_data_gva; 126 126 struct kvm_vcpu *vcpu; 127 127 struct kvm_vm *vm; 128 128
+1 -1
tools/testing/selftests/kvm/x86/nested_exceptions_test.c
··· 216 216 */ 217 217 int main(int argc, char *argv[]) 218 218 { 219 - vm_vaddr_t nested_test_data_gva; 219 + gva_t nested_test_data_gva; 220 220 struct kvm_vcpu_events events; 221 221 struct kvm_vcpu *vcpu; 222 222 struct kvm_vm *vm;
+1 -1
tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c
··· 78 78 { 79 79 struct kvm_vcpu *vcpu; 80 80 struct kvm_vm *vm; 81 - vm_vaddr_t guest_gva = 0; 81 + gva_t guest_gva = 0; 82 82 83 83 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) || 84 84 kvm_cpu_has(X86_FEATURE_SVM));
+1 -1
tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c
··· 125 125 126 126 int main(int argc, char *argv[]) 127 127 { 128 - vm_vaddr_t nested_gva; 128 + gva_t nested_gva; 129 129 struct kvm_vcpu *vcpu; 130 130 131 131 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) ||
+1 -1
tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c
··· 152 152 { 153 153 struct kvm_vcpu *vcpu; 154 154 struct kvm_vm *vm; 155 - vm_vaddr_t guest_gva = 0; 155 + gva_t guest_gva = 0; 156 156 157 157 uint64_t tsc_start, tsc_end; 158 158 uint64_t tsc_khz;
+1 -1
tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c
··· 128 128 129 129 int main(int argc, char *argv[]) 130 130 { 131 - vm_vaddr_t nested_gva = 0; 131 + gva_t nested_gva = 0; 132 132 struct vmcb *test_vmcb[2]; 133 133 struct kvm_vcpu *vcpu; 134 134 struct kvm_vm *vm;
+1 -1
tools/testing/selftests/kvm/x86/sev_smoke_test.c
··· 108 108 { 109 109 struct kvm_vcpu *vcpu; 110 110 struct kvm_vm *vm; 111 - vm_vaddr_t gva; 111 + gva_t gva; 112 112 void *hva; 113 113 114 114 double x87val = M_PI;
+1 -1
tools/testing/selftests/kvm/x86/smm_test.c
··· 113 113 114 114 int main(int argc, char *argv[]) 115 115 { 116 - vm_vaddr_t nested_gva = 0; 116 + gva_t nested_gva = 0; 117 117 118 118 struct kvm_vcpu *vcpu; 119 119 struct kvm_regs regs;
+1 -1
tools/testing/selftests/kvm/x86/state_test.c
··· 258 258 int main(int argc, char *argv[]) 259 259 { 260 260 uint64_t *xstate_bv, saved_xstate_bv; 261 - vm_vaddr_t nested_gva = 0; 261 + gva_t nested_gva = 0; 262 262 struct kvm_cpuid2 empty_cpuid = {}; 263 263 struct kvm_regs regs1, regs2; 264 264 struct kvm_vcpu *vcpu, *vcpuN;
+1 -1
tools/testing/selftests/kvm/x86/svm_int_ctl_test.c
··· 82 82 int main(int argc, char *argv[]) 83 83 { 84 84 struct kvm_vcpu *vcpu; 85 - vm_vaddr_t svm_gva; 85 + gva_t svm_gva; 86 86 struct kvm_vm *vm; 87 87 struct ucall uc; 88 88
+1 -1
tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c
··· 97 97 { 98 98 struct kvm_x86_state *state = NULL; 99 99 struct kvm_vcpu *vcpu; 100 - vm_vaddr_t svm_gva; 101 100 struct kvm_vm *vm; 102 101 struct ucall uc; 102 + gva_t svm_gva; 103 103 104 104 pr_info("Testing with nested LBRV %s\n", nested_lbrv ? "enabled" : "disabled"); 105 105
+1 -1
tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c
··· 38 38 { 39 39 struct kvm_vcpu *vcpu; 40 40 struct kvm_vm *vm; 41 - vm_vaddr_t nested_gva = 0; 41 + gva_t nested_gva = 0; 42 42 43 43 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM)); 44 44
+1 -1
tools/testing/selftests/kvm/x86/svm_nested_shutdown_test.c
··· 42 42 int main(int argc, char *argv[]) 43 43 { 44 44 struct kvm_vcpu *vcpu; 45 - vm_vaddr_t svm_gva; 45 + gva_t svm_gva; 46 46 struct kvm_vm *vm; 47 47 48 48 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
+2 -2
tools/testing/selftests/kvm/x86/svm_nested_soft_inject_test.c
··· 144 144 { 145 145 struct kvm_vcpu *vcpu; 146 146 struct kvm_vm *vm; 147 - vm_vaddr_t svm_gva; 148 - vm_vaddr_t idt_alt_vm; 147 + gva_t svm_gva; 148 + gva_t idt_alt_vm; 149 149 struct kvm_guest_debug debug; 150 150 151 151 pr_info("Running %s test\n", is_nmi ? "NMI" : "soft int");
+3 -3
tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c
··· 74 74 75 75 static void test_invalid_vmcb12(struct kvm_vcpu *vcpu) 76 76 { 77 - vm_vaddr_t nested_gva = 0; 77 + gva_t nested_gva = 0; 78 78 struct ucall uc; 79 79 80 80 ··· 90 90 91 91 static void test_unmappable_vmcb12(struct kvm_vcpu *vcpu) 92 92 { 93 - vm_vaddr_t nested_gva = 0; 93 + gva_t nested_gva = 0; 94 94 95 95 vcpu_alloc_svm(vcpu->vm, &nested_gva); 96 96 vcpu_args_set(vcpu, 2, nested_gva, unmappable_gpa(vcpu)); ··· 103 103 static void test_unmappable_vmcb12_vmexit(struct kvm_vcpu *vcpu) 104 104 { 105 105 struct kvm_x86_state *state; 106 - vm_vaddr_t nested_gva = 0; 106 + gva_t nested_gva = 0; 107 107 struct ucall uc; 108 108 109 109 /*
+1 -1
tools/testing/selftests/kvm/x86/svm_vmcall_test.c
··· 36 36 int main(int argc, char *argv[]) 37 37 { 38 38 struct kvm_vcpu *vcpu; 39 - vm_vaddr_t svm_gva; 39 + gva_t svm_gva; 40 40 struct kvm_vm *vm; 41 41 42 42 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
+2 -2
tools/testing/selftests/kvm/x86/triple_fault_event_test.c
··· 72 72 73 73 74 74 if (has_vmx) { 75 - vm_vaddr_t vmx_pages_gva; 75 + gva_t vmx_pages_gva; 76 76 77 77 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_vmx); 78 78 vcpu_alloc_vmx(vm, &vmx_pages_gva); 79 79 vcpu_args_set(vcpu, 1, vmx_pages_gva); 80 80 } else { 81 - vm_vaddr_t svm_gva; 81 + gva_t svm_gva; 82 82 83 83 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_svm); 84 84 vcpu_alloc_svm(vm, &svm_gva);
+1 -1
tools/testing/selftests/kvm/x86/vmx_apic_access_test.c
··· 72 72 int main(int argc, char *argv[]) 73 73 { 74 74 unsigned long apic_access_addr = ~0ul; 75 - vm_vaddr_t vmx_pages_gva; 75 + gva_t vmx_pages_gva; 76 76 unsigned long high_gpa; 77 77 struct vmx_pages *vmx; 78 78 bool done = false;
+1 -1
tools/testing/selftests/kvm/x86/vmx_apicv_updates_test.c
··· 110 110 111 111 int main(int argc, char *argv[]) 112 112 { 113 - vm_vaddr_t vmx_pages_gva; 113 + gva_t vmx_pages_gva; 114 114 struct vmx_pages *vmx; 115 115 struct kvm_vcpu *vcpu; 116 116 struct kvm_vm *vm;
+1 -1
tools/testing/selftests/kvm/x86/vmx_invalid_nested_guest_state.c
··· 52 52 53 53 int main(int argc, char *argv[]) 54 54 { 55 - vm_vaddr_t vmx_pages_gva; 55 + gva_t vmx_pages_gva; 56 56 struct kvm_sregs sregs; 57 57 struct kvm_vcpu *vcpu; 58 58 struct kvm_run *run;
+1 -1
tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c
··· 73 73 74 74 int main(int argc, char *argv[]) 75 75 { 76 - vm_vaddr_t vmx_pages_gva = 0; 76 + gva_t vmx_pages_gva = 0; 77 77 struct kvm_vm *vm; 78 78 struct kvm_vcpu *vcpu; 79 79 struct kvm_x86_state *state;
+1 -1
tools/testing/selftests/kvm/x86/vmx_preemption_timer_test.c
··· 152 152 153 153 int main(int argc, char *argv[]) 154 154 { 155 - vm_vaddr_t vmx_pages_gva = 0; 155 + gva_t vmx_pages_gva = 0; 156 156 157 157 struct kvm_regs regs1, regs2; 158 158 struct kvm_vm *vm;
+1 -1
tools/testing/selftests/kvm/x86/xapic_ipi_test.c
··· 393 393 int run_secs = 0; 394 394 int delay_usecs = 0; 395 395 struct test_data_page *data; 396 - vm_vaddr_t test_data_page_vaddr; 396 + gva_t test_data_page_vaddr; 397 397 bool migrate = false; 398 398 pthread_t threads[2]; 399 399 struct thread_params params[2];