Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

KVM: selftests: Replace "vaddr" with "gva" throughout

Replace all variations of "vaddr" variables in KVM selftests with "gva",
with the exception of the ELF structures, as those fields are not specific
to guest virtual addresses, to complete the conversion from vm_vaddr_t to
gva_t.

Opportunistically use gva_t instead of u64 for relevant variables, and
fixup indentation as appropriate.

No functional change intended.

Link: https://patch.msgid.link/20260420212004.3938325-17-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>

+150 -182
+3 -3
tools/testing/selftests/kvm/access_tracking_perf_test.c
··· 123 123 #define PAGEMAP_PRESENT (1ULL << 63) 124 124 #define PAGEMAP_PFN_MASK ((1ULL << 55) - 1) 125 125 126 - static u64 lookup_pfn(int pagemap_fd, struct kvm_vm *vm, u64 gva) 126 + static u64 lookup_pfn(int pagemap_fd, struct kvm_vm *vm, gva_t gva) 127 127 { 128 128 u64 hva = (u64)addr_gva2hva(vm, gva); 129 129 u64 entry; ··· 174 174 struct memstress_vcpu_args *vcpu_args) 175 175 { 176 176 int vcpu_idx = vcpu_args->vcpu_idx; 177 - u64 base_gva = vcpu_args->gva; 177 + gva_t base_gva = vcpu_args->gva; 178 178 u64 pages = vcpu_args->pages; 179 179 u64 page; 180 180 u64 still_idle = 0; ··· 193 193 TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap."); 194 194 195 195 for (page = 0; page < pages; page++) { 196 - u64 gva = base_gva + page * memstress_args.guest_page_size; 196 + gva_t gva = base_gva + page * memstress_args.guest_page_size; 197 197 u64 pfn = lookup_pfn(pagemap_fd, vm, gva); 198 198 199 199 if (!pfn) {
+2 -2
tools/testing/selftests/kvm/arm64/page_fault_test.c
··· 70 70 struct test_desc *test_desc; 71 71 }; 72 72 73 - static inline void flush_tlb_page(u64 vaddr) 73 + static inline void flush_tlb_page(gva_t gva) 74 74 { 75 - u64 page = vaddr >> 12; 75 + gva_t page = gva >> 12; 76 76 77 77 dsb(ishst); 78 78 asm volatile("tlbi vaae1is, %0" :: "r" (page));
+10 -22
tools/testing/selftests/kvm/include/kvm_util.h
··· 715 715 void vm_mem_region_delete(struct kvm_vm *vm, u32 slot); 716 716 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id); 717 717 void vm_populate_gva_bitmap(struct kvm_vm *vm); 718 - gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t vaddr_min); 719 - gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t vaddr_min); 720 - gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t vaddr_min, 718 + gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t min_gva); 719 + gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva); 720 + gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva, 721 721 enum kvm_mem_region_type type); 722 - gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t vaddr_min, 722 + gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t min_gva, 723 723 enum kvm_mem_region_type type); 724 724 gva_t vm_alloc_pages(struct kvm_vm *vm, int nr_pages); 725 725 gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type); 726 726 gva_t vm_alloc_page(struct kvm_vm *vm); 727 727 728 - void virt_map(struct kvm_vm *vm, u64 vaddr, u64 paddr, 728 + void virt_map(struct kvm_vm *vm, gva_t gva, u64 paddr, 729 729 unsigned int npages); 730 730 void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa); 731 731 void *addr_gva2hva(struct kvm_vm *vm, gva_t gva); ··· 1202 1202 } 1203 1203 1204 1204 /* 1205 - * VM Virtual Page Map 1206 - * 1207 - * Input Args: 1208 - * vm - Virtual Machine 1209 - * vaddr - VM Virtual Address 1210 - * paddr - VM Physical Address 1211 - * memslot - Memory region slot for new virtual translation tables 1212 - * 1213 - * Output Args: None 1214 - * 1215 - * Return: None 1216 - * 1217 1205 * Within @vm, creates a virtual translation for the page starting 1218 - * at @vaddr to the page starting at @paddr. 1206 + * at @gva to the page starting at @paddr. 1219 1207 */ 1220 - void virt_arch_pg_map(struct kvm_vm *vm, u64 vaddr, u64 paddr); 1208 + void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr); 1221 1209 1222 - static inline void virt_pg_map(struct kvm_vm *vm, u64 vaddr, u64 paddr) 1210 + static inline void virt_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr) 1223 1211 { 1224 - virt_arch_pg_map(vm, vaddr, paddr); 1225 - sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); 1212 + virt_arch_pg_map(vm, gva, paddr); 1213 + sparsebit_set(vm->vpages_mapped, gva >> vm->page_shift); 1226 1214 } 1227 1215 1228 1216
+1 -1
tools/testing/selftests/kvm/include/memstress.h
··· 21 21 22 22 struct memstress_vcpu_args { 23 23 u64 gpa; 24 - u64 gva; 24 + gva_t gva; 25 25 u64 pages; 26 26 27 27 /* Only used by the host userspace part of the vCPU thread */
+3 -3
tools/testing/selftests/kvm/include/x86/processor.h
··· 1394 1394 return !!get_kvm_amd_param_integer("lbrv"); 1395 1395 } 1396 1396 1397 - u64 *vm_get_pte(struct kvm_vm *vm, u64 vaddr); 1397 + u64 *vm_get_pte(struct kvm_vm *vm, gva_t gva); 1398 1398 1399 1399 u64 kvm_hypercall(u64 nr, u64 a0, u64 a1, u64 a2, u64 a3); 1400 1400 u64 __xen_hypercall(u64 nr, u64 a0, void *a1); ··· 1507 1507 void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels, 1508 1508 struct pte_masks *pte_masks); 1509 1509 1510 - void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, u64 vaddr, 1510 + void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva, 1511 1511 u64 paddr, int level); 1512 - void virt_map_level(struct kvm_vm *vm, u64 vaddr, u64 paddr, 1512 + void virt_map_level(struct kvm_vm *vm, gva_t gva, u64 paddr, 1513 1513 u64 nr_bytes, int level); 1514 1514 1515 1515 void vm_enable_tdp(struct kvm_vm *vm);
+16 -17
tools/testing/selftests/kvm/lib/arm64/processor.c
··· 121 121 vm->mmu.pgd_created = true; 122 122 } 123 123 124 - static void _virt_pg_map(struct kvm_vm *vm, u64 vaddr, u64 paddr, 124 + static void _virt_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr, 125 125 u64 flags) 126 126 { 127 127 u8 attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT); 128 128 u64 pg_attr; 129 129 u64 *ptep; 130 130 131 - TEST_ASSERT((vaddr % vm->page_size) == 0, 131 + TEST_ASSERT((gva % vm->page_size) == 0, 132 132 "Virtual address not on page boundary,\n" 133 - " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); 134 - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, 135 - (vaddr >> vm->page_shift)), 136 - "Invalid virtual address, vaddr: 0x%lx", vaddr); 133 + " gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size); 134 + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), 135 + "Invalid virtual address, gva: 0x%lx", gva); 137 136 TEST_ASSERT((paddr % vm->page_size) == 0, 138 137 "Physical address not on page boundary,\n" 139 138 " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); ··· 141 142 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 142 143 paddr, vm->max_gfn, vm->page_size); 143 144 144 - ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, vaddr) * 8; 145 + ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, gva) * 8; 145 146 if (!*ptep) 146 147 *ptep = addr_pte(vm, vm_alloc_page_table(vm), 147 148 PGD_TYPE_TABLE | PTE_VALID); 148 149 149 150 switch (vm->mmu.pgtable_levels) { 150 151 case 4: 151 - ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8; 152 + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8; 152 153 if (!*ptep) 153 154 *ptep = addr_pte(vm, vm_alloc_page_table(vm), 154 155 PUD_TYPE_TABLE | PTE_VALID); 155 156 /* fall through */ 156 157 case 3: 157 - ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8; 158 + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8; 158 159 if (!*ptep) 159 160 *ptep = addr_pte(vm, vm_alloc_page_table(vm), 160 161 PMD_TYPE_TABLE | PTE_VALID); 161 162 /* fall through */ 162 163 case 2: 163 - ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8; 164 + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8; 164 165 break; 165 166 default: 166 167 TEST_FAIL("Page table levels must be 2, 3, or 4"); ··· 173 174 *ptep = addr_pte(vm, paddr, pg_attr); 174 175 } 175 176 176 - void virt_arch_pg_map(struct kvm_vm *vm, u64 vaddr, u64 paddr) 177 + void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr) 177 178 { 178 179 u64 attr_idx = MT_NORMAL; 179 180 180 - _virt_pg_map(vm, vaddr, paddr, attr_idx); 181 + _virt_pg_map(vm, gva, paddr, attr_idx); 181 182 } 182 183 183 184 u64 *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level) ··· 416 417 struct kvm_vcpu_init *init) 417 418 { 418 419 size_t stack_size; 419 - u64 stack_vaddr; 420 + gva_t stack_gva; 420 421 struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); 421 422 422 423 stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size : 423 424 vm->page_size; 424 - stack_vaddr = __vm_alloc(vm, stack_size, 425 - DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, 426 - MEM_REGION_DATA); 425 + stack_gva = __vm_alloc(vm, stack_size, 426 + DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, 427 + MEM_REGION_DATA); 427 428 428 429 aarch64_vcpu_setup(vcpu, init); 429 430 430 - vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), stack_vaddr + stack_size); 431 + vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), stack_gva + stack_size); 431 432 return vcpu; 432 433 } 433 434
+5 -5
tools/testing/selftests/kvm/lib/elf.c
··· 162 162 seg_vend |= vm->page_size - 1; 163 163 size_t seg_size = seg_vend - seg_vstart + 1; 164 164 165 - gva_t vaddr = __vm_alloc(vm, seg_size, seg_vstart, MEM_REGION_CODE); 166 - TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate " 165 + gva_t gva = __vm_alloc(vm, seg_size, seg_vstart, MEM_REGION_CODE); 166 + TEST_ASSERT(gva == seg_vstart, "Unable to allocate " 167 167 "virtual memory for segment at requested min addr,\n" 168 168 " segment idx: %u\n" 169 169 " seg_vstart: 0x%lx\n" 170 - " vaddr: 0x%lx", 171 - n1, seg_vstart, vaddr); 172 - memset(addr_gva2hva(vm, vaddr), 0, seg_size); 170 + " gva: 0x%lx", 171 + n1, seg_vstart, gva); 172 + memset(addr_gva2hva(vm, gva), 0, seg_size); 173 173 /* TODO(lhuemill): Set permissions of each memory segment 174 174 * based on the least-significant 3 bits of phdr.p_flags. 175 175 */
+25 -35
tools/testing/selftests/kvm/lib/kvm_util.c
··· 1367 1367 1368 1368 /* 1369 1369 * Within the VM specified by @vm, locates the lowest starting guest virtual 1370 - * address >= @vaddr_min, that has at least @sz unallocated bytes. A 1370 + * address >= @min_gva, that has at least @sz unallocated bytes. A 1371 1371 * TEST_ASSERT failure occurs for invalid input or no area of at least 1372 1372 * @sz unallocated bytes >= @min_gva is available. 1373 1373 */ 1374 - gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t vaddr_min) 1374 + gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t min_gva) 1375 1375 { 1376 1376 u64 pages = (sz + vm->page_size - 1) >> vm->page_shift; 1377 1377 1378 1378 /* Determine lowest permitted virtual page index. */ 1379 - u64 pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; 1380 - if ((pgidx_start * vm->page_size) < vaddr_min) 1379 + u64 pgidx_start = (min_gva + vm->page_size - 1) >> vm->page_shift; 1380 + if ((pgidx_start * vm->page_size) < min_gva) 1381 1381 goto no_va_found; 1382 1382 1383 1383 /* Loop over section with enough valid virtual page indexes. */ ··· 1414 1414 } while (pgidx_start != 0); 1415 1415 1416 1416 no_va_found: 1417 - TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages); 1417 + TEST_FAIL("No gva of specified pages available, pages: 0x%lx", pages); 1418 1418 1419 1419 /* NOT REACHED */ 1420 1420 return -1; ··· 1436 1436 return pgidx_start * vm->page_size; 1437 1437 } 1438 1438 1439 - static gva_t ____vm_alloc(struct kvm_vm *vm, size_t sz, gva_t vaddr_min, 1439 + static gva_t ____vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva, 1440 1440 enum kvm_mem_region_type type, bool protected) 1441 1441 { 1442 1442 u64 pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); ··· 1450 1450 * Find an unused range of virtual page addresses of at least 1451 1451 * pages in length. 1452 1452 */ 1453 - gva_t vaddr_start = vm_unused_gva_gap(vm, sz, vaddr_min); 1453 + gva_t gva_start = vm_unused_gva_gap(vm, sz, min_gva); 1454 1454 1455 1455 /* Map the virtual pages. */ 1456 - for (gva_t vaddr = vaddr_start; pages > 0; 1457 - pages--, vaddr += vm->page_size, paddr += vm->page_size) { 1456 + for (gva_t gva = gva_start; pages > 0; 1457 + pages--, gva += vm->page_size, paddr += vm->page_size) { 1458 1458 1459 - virt_pg_map(vm, vaddr, paddr); 1459 + virt_pg_map(vm, gva, paddr); 1460 1460 } 1461 1461 1462 - return vaddr_start; 1462 + return gva_start; 1463 1463 } 1464 1464 1465 - gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t vaddr_min, 1465 + gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva, 1466 1466 enum kvm_mem_region_type type) 1467 1467 { 1468 - return ____vm_alloc(vm, sz, vaddr_min, type, 1468 + return ____vm_alloc(vm, sz, min_gva, type, 1469 1469 vm_arch_has_protected_memory(vm)); 1470 1470 } 1471 1471 1472 - gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t vaddr_min, 1472 + gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t min_gva, 1473 1473 enum kvm_mem_region_type type) 1474 1474 { 1475 - return ____vm_alloc(vm, sz, vaddr_min, type, false); 1475 + return ____vm_alloc(vm, sz, min_gva, type, false); 1476 1476 } 1477 1477 1478 1478 /* 1479 1479 * Allocates at least sz bytes within the virtual address space of the VM 1480 1480 * given by @vm. The allocated bytes are mapped to a virtual address >= the 1481 - * address given by @vaddr_min. Note that each allocation uses a a unique set 1481 + * address given by @min_gva. Note that each allocation uses a a unique set 1482 1482 * of pages, with the minimum real allocation being at least a page. The 1483 1483 * allocated physical space comes from the TEST_DATA memory region. 1484 1484 */ 1485 - gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t vaddr_min) 1485 + gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva) 1486 1486 { 1487 - return __vm_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA); 1487 + return __vm_alloc(vm, sz, min_gva, MEM_REGION_TEST_DATA); 1488 1488 } 1489 1489 1490 1490 gva_t vm_alloc_pages(struct kvm_vm *vm, int nr_pages) ··· 1503 1503 } 1504 1504 1505 1505 /* 1506 - * Map a range of VM virtual address to the VM's physical address 1506 + * Map a range of VM virtual address to the VM's physical address. 1507 1507 * 1508 - * Input Args: 1509 - * vm - Virtual Machine 1510 - * vaddr - Virtuall address to map 1511 - * paddr - VM Physical Address 1512 - * npages - The number of pages to map 1513 - * 1514 - * Output Args: None 1515 - * 1516 - * Return: None 1517 - * 1518 - * Within the VM given by @vm, creates a virtual translation for 1519 - * @npages starting at @vaddr to the page range starting at @paddr. 1508 + * Within the VM given by @vm, creates a virtual translation for @npages 1509 + * starting at @gva to the page range starting at @paddr. 1520 1510 */ 1521 - void virt_map(struct kvm_vm *vm, u64 vaddr, u64 paddr, 1511 + void virt_map(struct kvm_vm *vm, gva_t gva, u64 paddr, 1522 1512 unsigned int npages) 1523 1513 { 1524 1514 size_t page_size = vm->page_size; 1525 1515 size_t size = npages * page_size; 1526 1516 1527 - TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow"); 1517 + TEST_ASSERT(gva + size > gva, "Vaddr overflow"); 1528 1518 TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); 1529 1519 1530 1520 while (npages--) { 1531 - virt_pg_map(vm, vaddr, paddr); 1521 + virt_pg_map(vm, gva, paddr); 1532 1522 1533 - vaddr += page_size; 1523 + gva += page_size; 1534 1524 paddr += page_size; 1535 1525 } 1536 1526 }
+12 -13
tools/testing/selftests/kvm/lib/loongarch/processor.c
··· 111 111 u64 *ptep; 112 112 113 113 ptep = virt_populate_pte(vm, gva, 0); 114 - TEST_ASSERT(*ptep != 0, "Virtual address vaddr: 0x%lx not mapped\n", gva); 114 + TEST_ASSERT(*ptep != 0, "Virtual address gva: 0x%lx not mapped\n", gva); 115 115 116 116 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); 117 117 } 118 118 119 - void virt_arch_pg_map(struct kvm_vm *vm, u64 vaddr, u64 paddr) 119 + void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr) 120 120 { 121 121 u32 prot_bits; 122 122 u64 *ptep; 123 123 124 - TEST_ASSERT((vaddr % vm->page_size) == 0, 124 + TEST_ASSERT((gva % vm->page_size) == 0, 125 125 "Virtual address not on page boundary,\n" 126 - "vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); 127 - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, 128 - (vaddr >> vm->page_shift)), 129 - "Invalid virtual address, vaddr: 0x%lx", vaddr); 126 + "gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size); 127 + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), 128 + "Invalid virtual address, gva: 0x%lx", gva); 130 129 TEST_ASSERT((paddr % vm->page_size) == 0, 131 130 "Physical address not on page boundary,\n" 132 131 "paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); ··· 134 135 "paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 135 136 paddr, vm->max_gfn, vm->page_size); 136 137 137 - ptep = virt_populate_pte(vm, vaddr, 1); 138 + ptep = virt_populate_pte(vm, gva, 1); 138 139 prot_bits = _PAGE_PRESENT | __READABLE | __WRITEABLE | _CACHE_CC | _PAGE_USER; 139 140 WRITE_ONCE(*ptep, paddr | prot_bits); 140 141 } ··· 372 373 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) 373 374 { 374 375 size_t stack_size; 375 - u64 stack_vaddr; 376 + u64 stack_gva; 376 377 struct kvm_regs regs; 377 378 struct kvm_vcpu *vcpu; 378 379 379 380 vcpu = __vm_vcpu_add(vm, vcpu_id); 380 381 stack_size = vm->page_size; 381 - stack_vaddr = __vm_alloc(vm, stack_size, 382 - LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA); 383 - TEST_ASSERT(stack_vaddr != 0, "No memory for vm stack"); 382 + stack_gva = __vm_alloc(vm, stack_size, 383 + LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA); 384 + TEST_ASSERT(stack_gva != 0, "No memory for vm stack"); 384 385 385 386 loongarch_vcpu_setup(vcpu); 386 387 /* Setup guest general purpose registers */ 387 388 vcpu_regs_get(vcpu, &regs); 388 - regs.gpr[3] = stack_vaddr + stack_size; 389 + regs.gpr[3] = stack_gva + stack_size; 389 390 vcpu_regs_set(vcpu, &regs); 390 391 391 392 return vcpu;
+1 -1
tools/testing/selftests/kvm/lib/memstress.c
··· 49 49 struct memstress_args *args = &memstress_args; 50 50 struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx]; 51 51 struct guest_random_state rand_state; 52 - u64 gva; 52 + gva_t gva; 53 53 u64 pages; 54 54 u64 addr; 55 55 u64 page;
+12 -13
tools/testing/selftests/kvm/lib/riscv/processor.c
··· 75 75 vm->mmu.pgd_created = true; 76 76 } 77 77 78 - void virt_arch_pg_map(struct kvm_vm *vm, u64 vaddr, u64 paddr) 78 + void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr) 79 79 { 80 80 u64 *ptep, next_ppn; 81 81 int level = vm->mmu.pgtable_levels - 1; 82 82 83 - TEST_ASSERT((vaddr % vm->page_size) == 0, 83 + TEST_ASSERT((gva % vm->page_size) == 0, 84 84 "Virtual address not on page boundary,\n" 85 - " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); 86 - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, 87 - (vaddr >> vm->page_shift)), 88 - "Invalid virtual address, vaddr: 0x%lx", vaddr); 85 + " gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size); 86 + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), 87 + "Invalid virtual address, gva: 0x%lx", gva); 89 88 TEST_ASSERT((paddr % vm->page_size) == 0, 90 89 "Physical address not on page boundary,\n" 91 90 " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); ··· 93 94 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 94 95 paddr, vm->max_gfn, vm->page_size); 95 96 96 - ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, vaddr, level) * 8; 97 + ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, gva, level) * 8; 97 98 if (!*ptep) { 98 99 next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT; 99 100 *ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) | ··· 103 104 104 105 while (level > -1) { 105 106 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + 106 - pte_index(vm, vaddr, level) * 8; 107 + pte_index(vm, gva, level) * 8; 107 108 if (!*ptep && level > 0) { 108 109 next_ppn = vm_alloc_page_table(vm) >> 109 110 PGTBL_PAGE_SIZE_SHIFT; ··· 314 315 { 315 316 int r; 316 317 size_t stack_size; 317 - unsigned long stack_vaddr; 318 + unsigned long stack_gva; 318 319 unsigned long current_gp = 0; 319 320 struct kvm_mp_state mps; 320 321 struct kvm_vcpu *vcpu; 321 322 322 323 stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size : 323 324 vm->page_size; 324 - stack_vaddr = __vm_alloc(vm, stack_size, 325 - DEFAULT_RISCV_GUEST_STACK_VADDR_MIN, 326 - MEM_REGION_DATA); 325 + stack_gva = __vm_alloc(vm, stack_size, 326 + DEFAULT_RISCV_GUEST_STACK_VADDR_MIN, 327 + MEM_REGION_DATA); 327 328 328 329 vcpu = __vm_vcpu_add(vm, vcpu_id); 329 330 riscv_vcpu_mmu_setup(vcpu); ··· 343 344 vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.gp), current_gp); 344 345 345 346 /* Setup stack pointer and program counter of guest */ 346 - vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size); 347 + vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_gva + stack_size); 347 348 348 349 /* Setup sscratch for guest_get_vcpuid() */ 349 350 vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(sscratch), vcpu_id);
+10 -13
tools/testing/selftests/kvm/lib/s390/processor.c
··· 47 47 | ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH); 48 48 } 49 49 50 - void virt_arch_pg_map(struct kvm_vm *vm, u64 gva, u64 gpa) 50 + void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 gpa) 51 51 { 52 52 int ri, idx; 53 53 u64 *entry; 54 54 55 55 TEST_ASSERT((gva % vm->page_size) == 0, 56 - "Virtual address not on page boundary,\n" 57 - " vaddr: 0x%lx vm->page_size: 0x%x", 58 - gva, vm->page_size); 59 - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, 60 - (gva >> vm->page_shift)), 61 - "Invalid virtual address, vaddr: 0x%lx", 62 - gva); 56 + "Virtual address not on page boundary,\n" 57 + " gva: 0x%lx vm->page_size: 0x%x", 58 + gva, vm->page_size); 59 + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), 60 + "Invalid virtual address, gva: 0x%lx", gva); 63 61 TEST_ASSERT((gpa % vm->page_size) == 0, 64 62 "Physical address not on page boundary,\n" 65 63 " paddr: 0x%lx vm->page_size: 0x%x", ··· 161 163 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) 162 164 { 163 165 size_t stack_size = DEFAULT_STACK_PGS * getpagesize(); 164 - u64 stack_vaddr; 166 + u64 stack_gva; 165 167 struct kvm_regs regs; 166 168 struct kvm_sregs sregs; 167 169 struct kvm_vcpu *vcpu; ··· 169 171 TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x", 170 172 vm->page_size); 171 173 172 - stack_vaddr = __vm_alloc(vm, stack_size, 173 - DEFAULT_GUEST_STACK_VADDR_MIN, 174 - MEM_REGION_DATA); 174 + stack_gva = __vm_alloc(vm, stack_size, DEFAULT_GUEST_STACK_VADDR_MIN, 175 + MEM_REGION_DATA); 175 176 176 177 vcpu = __vm_vcpu_add(vm, vcpu_id); 177 178 178 179 /* Setup guest registers */ 179 180 vcpu_regs_get(vcpu, &regs); 180 - regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160; 181 + regs.gprs[15] = stack_gva + (DEFAULT_STACK_PGS * getpagesize()) - 160; 181 182 vcpu_regs_set(vcpu, &regs); 182 183 183 184 vcpu_sregs_get(vcpu, &sregs);
+4 -4
tools/testing/selftests/kvm/lib/ucall_common.c
··· 29 29 { 30 30 struct ucall_header *hdr; 31 31 struct ucall *uc; 32 - gva_t vaddr; 32 + gva_t gva; 33 33 int i; 34 34 35 - vaddr = vm_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR, 35 + gva = vm_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR, 36 36 MEM_REGION_DATA); 37 - hdr = (struct ucall_header *)addr_gva2hva(vm, vaddr); 37 + hdr = (struct ucall_header *)addr_gva2hva(vm, gva); 38 38 memset(hdr, 0, sizeof(*hdr)); 39 39 40 40 for (i = 0; i < KVM_MAX_VCPUS; ++i) { ··· 42 42 uc->hva = uc; 43 43 } 44 44 45 - write_guest_global(vm, ucall_pool, (struct ucall_header *)vaddr); 45 + write_guest_global(vm, ucall_pool, (struct ucall_header *)gva); 46 46 47 47 ucall_arch_init(vm, mmio_gpa); 48 48 }
+39 -43
tools/testing/selftests/kvm/lib/x86/processor.c
··· 207 207 } 208 208 209 209 static void *virt_get_pte(struct kvm_vm *vm, struct kvm_mmu *mmu, 210 - u64 *parent_pte, u64 vaddr, int level) 210 + u64 *parent_pte, gva_t gva, int level) 211 211 { 212 212 u64 pt_gpa = PTE_GET_PA(*parent_pte); 213 213 u64 *page_table = addr_gpa2hva(vm, pt_gpa); 214 - int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu; 214 + int index = (gva >> PG_LEVEL_SHIFT(level)) & 0x1ffu; 215 215 216 216 TEST_ASSERT((*parent_pte == mmu->pgd) || is_present_pte(mmu, parent_pte), 217 217 "Parent PTE (level %d) not PRESENT for gva: 0x%08lx", 218 - level + 1, vaddr); 218 + level + 1, gva); 219 219 220 220 return &page_table[index]; 221 221 } ··· 223 223 static u64 *virt_create_upper_pte(struct kvm_vm *vm, 224 224 struct kvm_mmu *mmu, 225 225 u64 *parent_pte, 226 - u64 vaddr, 226 + gva_t gva, 227 227 u64 paddr, 228 228 int current_level, 229 229 int target_level) 230 230 { 231 - u64 *pte = virt_get_pte(vm, mmu, parent_pte, vaddr, current_level); 231 + u64 *pte = virt_get_pte(vm, mmu, parent_pte, gva, current_level); 232 232 233 233 paddr = vm_untag_gpa(vm, paddr); 234 234 ··· 247 247 * this level. 248 248 */ 249 249 TEST_ASSERT(current_level != target_level, 250 - "Cannot create hugepage at level: %u, vaddr: 0x%lx", 251 - current_level, vaddr); 250 + "Cannot create hugepage at level: %u, gva: 0x%lx", 251 + current_level, gva); 252 252 TEST_ASSERT(!is_huge_pte(mmu, pte), 253 - "Cannot create page table at level: %u, vaddr: 0x%lx", 254 - current_level, vaddr); 253 + "Cannot create page table at level: %u, gva: 0x%lx", 254 + current_level, gva); 255 255 } 256 256 return pte; 257 257 } 258 258 259 - void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, u64 vaddr, 259 + void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva, 260 260 u64 paddr, int level) 261 261 { 262 262 const u64 pg_size = PG_LEVEL_SIZE(level); ··· 266 266 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K, 267 267 "Unknown or unsupported guest mode: 0x%x", vm->mode); 268 268 269 - TEST_ASSERT((vaddr % pg_size) == 0, 269 + TEST_ASSERT((gva % pg_size) == 0, 270 270 "Virtual address not aligned,\n" 271 - "vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size); 272 - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)), 273 - "Invalid virtual address, vaddr: 0x%lx", vaddr); 271 + "gva: 0x%lx page size: 0x%lx", gva, pg_size); 272 + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), 273 + "Invalid virtual address, gva: 0x%lx", gva); 274 274 TEST_ASSERT((paddr % pg_size) == 0, 275 275 "Physical address not aligned,\n" 276 276 " paddr: 0x%lx page size: 0x%lx", paddr, pg_size); ··· 291 291 for (current_level = mmu->pgtable_levels; 292 292 current_level > PG_LEVEL_4K; 293 293 current_level--) { 294 - pte = virt_create_upper_pte(vm, mmu, pte, vaddr, paddr, 294 + pte = virt_create_upper_pte(vm, mmu, pte, gva, paddr, 295 295 current_level, level); 296 296 if (is_huge_pte(mmu, pte)) 297 297 return; 298 298 } 299 299 300 300 /* Fill in page table entry. */ 301 - pte = virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K); 301 + pte = virt_get_pte(vm, mmu, pte, gva, PG_LEVEL_4K); 302 302 TEST_ASSERT(!is_present_pte(mmu, pte), 303 - "PTE already present for 4k page at vaddr: 0x%lx", vaddr); 303 + "PTE already present for 4k page at gva: 0x%lx", gva); 304 304 *pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) | 305 305 PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) | 306 306 PTE_ALWAYS_SET_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK); ··· 315 315 *pte |= PTE_S_BIT_MASK(mmu); 316 316 } 317 317 318 - void virt_arch_pg_map(struct kvm_vm *vm, u64 vaddr, u64 paddr) 318 + void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr) 319 319 { 320 - __virt_pg_map(vm, &vm->mmu, vaddr, paddr, PG_LEVEL_4K); 320 + __virt_pg_map(vm, &vm->mmu, gva, paddr, PG_LEVEL_4K); 321 321 } 322 322 323 - void virt_map_level(struct kvm_vm *vm, u64 vaddr, u64 paddr, 323 + void virt_map_level(struct kvm_vm *vm, gva_t gva, u64 paddr, 324 324 u64 nr_bytes, int level) 325 325 { 326 326 u64 pg_size = PG_LEVEL_SIZE(level); ··· 332 332 nr_bytes, pg_size); 333 333 334 334 for (i = 0; i < nr_pages; i++) { 335 - __virt_pg_map(vm, &vm->mmu, vaddr, paddr, level); 336 - sparsebit_set_num(vm->vpages_mapped, vaddr >> vm->page_shift, 335 + __virt_pg_map(vm, &vm->mmu, gva, paddr, level); 336 + sparsebit_set_num(vm->vpages_mapped, gva >> vm->page_shift, 337 337 nr_bytes / PAGE_SIZE); 338 338 339 - vaddr += pg_size; 339 + gva += pg_size; 340 340 paddr += pg_size; 341 341 } 342 342 } ··· 356 356 357 357 static u64 *__vm_get_page_table_entry(struct kvm_vm *vm, 358 358 struct kvm_mmu *mmu, 359 - u64 vaddr, 359 + gva_t gva, 360 360 int *level) 361 361 { 362 362 int va_width = 12 + (mmu->pgtable_levels) * 9; ··· 371 371 372 372 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K, 373 373 "Unknown or unsupported guest mode: 0x%x", vm->mode); 374 - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, 375 - (vaddr >> vm->page_shift)), 376 - "Invalid virtual address, vaddr: 0x%lx", 377 - vaddr); 374 + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), 375 + "Invalid virtual address, gva: 0x%lx", gva); 378 376 /* 379 - * Check that the vaddr is a sign-extended va_width value. 377 + * Check that the gva is a sign-extended va_width value. 380 378 */ 381 - TEST_ASSERT(vaddr == 382 - (((s64)vaddr << (64 - va_width) >> (64 - va_width))), 379 + TEST_ASSERT(gva == (((s64)gva << (64 - va_width) >> (64 - va_width))), 383 380 "Canonical check failed. The virtual address is invalid."); 384 381 385 382 for (current_level = mmu->pgtable_levels; 386 383 current_level > PG_LEVEL_4K; 387 384 current_level--) { 388 - pte = virt_get_pte(vm, mmu, pte, vaddr, current_level); 385 + pte = virt_get_pte(vm, mmu, pte, gva, current_level); 389 386 if (vm_is_target_pte(mmu, pte, level, current_level)) 390 387 return pte; 391 388 } 392 389 393 - return virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K); 390 + return virt_get_pte(vm, mmu, pte, gva, PG_LEVEL_4K); 394 391 } 395 392 396 393 u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa) ··· 397 400 return __vm_get_page_table_entry(vm, &vm->stage2_mmu, l2_gpa, &level); 398 401 } 399 402 400 - u64 *vm_get_pte(struct kvm_vm *vm, u64 vaddr) 403 + u64 *vm_get_pte(struct kvm_vm *vm, gva_t gva) 401 404 { 402 405 int level = PG_LEVEL_4K; 403 406 404 - return __vm_get_page_table_entry(vm, &vm->mmu, vaddr, &level); 407 + return __vm_get_page_table_entry(vm, &vm->mmu, gva, &level); 405 408 } 406 409 407 410 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent) ··· 822 825 { 823 826 struct kvm_mp_state mp_state; 824 827 struct kvm_regs regs; 825 - gva_t stack_vaddr; 828 + gva_t stack_gva; 826 829 struct kvm_vcpu *vcpu; 827 830 828 - stack_vaddr = __vm_alloc(vm, DEFAULT_STACK_PGS * getpagesize(), 829 - DEFAULT_GUEST_STACK_VADDR_MIN, 830 - MEM_REGION_DATA); 831 + stack_gva = __vm_alloc(vm, DEFAULT_STACK_PGS * getpagesize(), 832 + DEFAULT_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA); 831 833 832 - stack_vaddr += DEFAULT_STACK_PGS * getpagesize(); 834 + stack_gva += DEFAULT_STACK_PGS * getpagesize(); 833 835 834 836 /* 835 837 * Align stack to match calling sequence requirements in section "The ··· 839 843 * If this code is ever used to launch a vCPU with 32-bit entry point it 840 844 * may need to subtract 4 bytes instead of 8 bytes. 841 845 */ 842 - TEST_ASSERT(IS_ALIGNED(stack_vaddr, PAGE_SIZE), 846 + TEST_ASSERT(IS_ALIGNED(stack_gva, PAGE_SIZE), 843 847 "__vm_alloc() did not provide a page-aligned address"); 844 - stack_vaddr -= 8; 848 + stack_gva -= 8; 845 849 846 850 vcpu = __vm_vcpu_add(vm, vcpu_id); 847 851 vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid()); ··· 851 855 /* Setup guest general purpose registers */ 852 856 vcpu_regs_get(vcpu, &regs); 853 857 regs.rflags = regs.rflags | 0x2; 854 - regs.rsp = stack_vaddr; 858 + regs.rsp = stack_gva; 855 859 vcpu_regs_set(vcpu, &regs); 856 860 857 861 /* Setup the MP state */
+2 -2
tools/testing/selftests/kvm/s390/ucontrol_test.c
··· 571 571 { 572 572 struct kvm_s390_sie_block *sie_block = self->sie_block; 573 573 struct kvm_sync_regs *sync_regs = &self->run->s.regs; 574 - u64 test_vaddr = VM_MEM_SIZE - (SZ_1M / 2); 574 + u64 test_gva = VM_MEM_SIZE - (SZ_1M / 2); 575 575 struct kvm_run *run = self->run; 576 576 const u8 skeyvalue = 0x34; 577 577 ··· 583 583 /* set register content for test_skey_asm to access not mapped memory */ 584 584 sync_regs->gprs[1] = skeyvalue; 585 585 sync_regs->gprs[5] = self->base_gpa; 586 - sync_regs->gprs[6] = test_vaddr; 586 + sync_regs->gprs[6] = test_gva; 587 587 run->kvm_dirty_regs |= KVM_SYNC_GPRS; 588 588 589 589 /* DAT disabled + 64 bit mode */
+5 -5
tools/testing/selftests/kvm/x86/xapic_ipi_test.c
··· 393 393 int run_secs = 0; 394 394 int delay_usecs = 0; 395 395 struct test_data_page *data; 396 - gva_t test_data_page_vaddr; 396 + gva_t test_data_page_gva; 397 397 bool migrate = false; 398 398 pthread_t threads[2]; 399 399 struct thread_params params[2]; ··· 414 414 415 415 params[1].vcpu = vm_vcpu_add(vm, 1, sender_guest_code); 416 416 417 - test_data_page_vaddr = vm_alloc_page(vm); 418 - data = addr_gva2hva(vm, test_data_page_vaddr); 417 + test_data_page_gva = vm_alloc_page(vm); 418 + data = addr_gva2hva(vm, test_data_page_gva); 419 419 memset(data, 0, sizeof(*data)); 420 420 params[0].data = data; 421 421 params[1].data = data; 422 422 423 - vcpu_args_set(params[0].vcpu, 1, test_data_page_vaddr); 424 - vcpu_args_set(params[1].vcpu, 1, test_data_page_vaddr); 423 + vcpu_args_set(params[0].vcpu, 1, test_data_page_gva); 424 + vcpu_args_set(params[1].vcpu, 1, test_data_page_gva); 425 425 426 426 pipis_rcvd = (u64 *)addr_gva2hva(vm, (u64)&ipis_rcvd); 427 427 params[0].pipis_rcvd = pipis_rcvd;