Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

KVM: selftests: Replace "paddr" with "gpa" throughout

Replace all variations of "paddr" variables in KVM selftests with "gpa",
with the exception of the ELF structures, as those fields are not specific
to guest virtual addresses, to complete the conversion from vm_paddr_t to
gpa_t.

No functional change intended.

Link: https://patch.msgid.link/20260420212004.3938325-20-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>

+98 -100
+1 -1
tools/testing/selftests/kvm/arm64/sea_to_user.c
··· 275 275 vm_userspace_mem_region_add( 276 276 /*vm=*/vm, 277 277 /*src_type=*/src_type, 278 - /*guest_paddr=*/start_gpa, 278 + /*gpa=*/start_gpa, 279 279 /*slot=*/1, 280 280 /*npages=*/num_guest_pages, 281 281 /*flags=*/0);
+11 -12
tools/testing/selftests/kvm/include/kvm_util.h
··· 725 725 gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type); 726 726 gva_t vm_alloc_page(struct kvm_vm *vm); 727 727 728 - void virt_map(struct kvm_vm *vm, gva_t gva, u64 paddr, 728 + void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, 729 729 unsigned int npages); 730 730 void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa); 731 731 void *addr_gva2hva(struct kvm_vm *vm, gva_t gva); ··· 990 990 991 991 const char *exit_reason_str(unsigned int exit_reason); 992 992 993 - gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t paddr_min, u32 memslot); 994 - gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 995 - gpa_t paddr_min, u32 memslot, 996 - bool protected); 993 + gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot); 994 + gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, gpa_t min_gpa, 995 + u32 memslot, bool protected); 997 996 gpa_t vm_alloc_page_table(struct kvm_vm *vm); 998 997 999 998 static inline gpa_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 1000 - gpa_t paddr_min, u32 memslot) 999 + gpa_t min_gpa, u32 memslot) 1001 1000 { 1002 1001 /* 1003 1002 * By default, allocate memory as protected for VMs that support 1004 1003 * protected memory, as the majority of memory for such VMs is 1005 1004 * protected, i.e. using shared memory is effectively opt-in. 1006 1005 */ 1007 - return __vm_phy_pages_alloc(vm, num, paddr_min, memslot, 1006 + return __vm_phy_pages_alloc(vm, num, min_gpa, memslot, 1008 1007 vm_arch_has_protected_memory(vm)); 1009 1008 } 1010 1009 ··· 1202 1203 1203 1204 /* 1204 1205 * Within @vm, creates a virtual translation for the page starting 1205 - * at @gva to the page starting at @paddr. 1206 + * at @gva to the page starting at @gpa. 1206 1207 */ 1207 - void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr); 1208 + void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa); 1208 1209 1209 - static inline void virt_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr) 1210 + static inline void virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) 1210 1211 { 1211 - virt_arch_pg_map(vm, gva, paddr); 1212 + virt_arch_pg_map(vm, gva, gpa); 1212 1213 sparsebit_set(vm->vpages_mapped, gva >> vm->page_shift); 1213 1214 } 1214 1215 ··· 1279 1280 void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm); 1280 1281 void kvm_arch_vm_release(struct kvm_vm *vm); 1281 1282 1282 - bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t paddr); 1283 + bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa); 1283 1284 1284 1285 u32 guest_get_vcpuid(void); 1285 1286
+3 -3
tools/testing/selftests/kvm/include/x86/processor.h
··· 1508 1508 struct pte_masks *pte_masks); 1509 1509 1510 1510 void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva, 1511 - u64 paddr, int level); 1512 - void virt_map_level(struct kvm_vm *vm, gva_t gva, u64 paddr, 1511 + gpa_t gpa, int level); 1512 + void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa, 1513 1513 u64 nr_bytes, int level); 1514 1514 1515 1515 void vm_enable_tdp(struct kvm_vm *vm); 1516 1516 bool kvm_cpu_has_tdp(void); 1517 - void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, u64 paddr, u64 size); 1517 + void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size); 1518 1518 void tdp_identity_map_default_memslots(struct kvm_vm *vm); 1519 1519 void tdp_identity_map_1g(struct kvm_vm *vm, u64 addr, u64 size); 1520 1520 u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa);
+11 -11
tools/testing/selftests/kvm/lib/arm64/processor.c
··· 121 121 vm->mmu.pgd_created = true; 122 122 } 123 123 124 - static void _virt_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr, 124 + static void _virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, 125 125 u64 flags) 126 126 { 127 127 u8 attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT); ··· 133 133 " gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size); 134 134 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), 135 135 "Invalid virtual address, gva: 0x%lx", gva); 136 - TEST_ASSERT((paddr % vm->page_size) == 0, 137 - "Physical address not on page boundary,\n" 138 - " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); 139 - TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, 140 - "Physical address beyond beyond maximum supported,\n" 141 - " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 142 - paddr, vm->max_gfn, vm->page_size); 136 + TEST_ASSERT((gpa % vm->page_size) == 0, 137 + "Physical address not on page boundary,\n" 138 + " gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size); 139 + TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, 140 + "Physical address beyond beyond maximum supported,\n" 141 + " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 142 + gpa, vm->max_gfn, vm->page_size); 143 143 144 144 ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, gva) * 8; 145 145 if (!*ptep) ··· 170 170 if (!use_lpa2_pte_format(vm)) 171 171 pg_attr |= PTE_SHARED; 172 172 173 - *ptep = addr_pte(vm, paddr, pg_attr); 173 + *ptep = addr_pte(vm, gpa, pg_attr); 174 174 } 175 175 176 - void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr) 176 + void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) 177 177 { 178 178 u64 attr_idx = MT_NORMAL; 179 179 180 - _virt_pg_map(vm, gva, paddr, attr_idx); 180 + _virt_pg_map(vm, gva, gpa, attr_idx); 181 181 } 182 182 183 183 u64 *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level)
+26 -27
tools/testing/selftests/kvm/lib/kvm_util.c
··· 1027 1027 1028 1028 TEST_FAIL("A mem region with the requested slot " 1029 1029 "already exists.\n" 1030 - " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" 1031 - " existing slot: %u paddr: 0x%lx size: 0x%lx", 1030 + " requested slot: %u gpa: 0x%lx npages: 0x%lx\n" 1031 + " existing slot: %u gpa: 0x%lx size: 0x%lx", 1032 1032 slot, gpa, npages, region->region.slot, 1033 1033 (u64)region->region.guest_phys_addr, 1034 1034 (u64)region->region.memory_size); ··· 1442 1442 u64 pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); 1443 1443 1444 1444 virt_pgd_alloc(vm); 1445 - gpa_t paddr = __vm_phy_pages_alloc(vm, pages, 1445 + gpa_t gpa = __vm_phy_pages_alloc(vm, pages, 1446 1446 KVM_UTIL_MIN_PFN * vm->page_size, 1447 1447 vm->memslots[type], protected); 1448 1448 ··· 1454 1454 1455 1455 /* Map the virtual pages. */ 1456 1456 for (gva_t gva = gva_start; pages > 0; 1457 - pages--, gva += vm->page_size, paddr += vm->page_size) { 1457 + pages--, gva += vm->page_size, gpa += vm->page_size) { 1458 1458 1459 - virt_pg_map(vm, gva, paddr); 1459 + virt_pg_map(vm, gva, gpa); 1460 1460 } 1461 1461 1462 1462 return gva_start; ··· 1506 1506 * Map a range of VM virtual address to the VM's physical address. 1507 1507 * 1508 1508 * Within the VM given by @vm, creates a virtual translation for @npages 1509 - * starting at @gva to the page range starting at @paddr. 1509 + * starting at @gva to the page range starting at @gpa. 1510 1510 */ 1511 - void virt_map(struct kvm_vm *vm, gva_t gva, u64 paddr, 1512 - unsigned int npages) 1511 + void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, unsigned int npages) 1513 1512 { 1514 1513 size_t page_size = vm->page_size; 1515 1514 size_t size = npages * page_size; 1516 1515 1517 1516 TEST_ASSERT(gva + size > gva, "Vaddr overflow"); 1518 - TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); 1517 + TEST_ASSERT(gpa + size > gpa, "Paddr overflow"); 1519 1518 1520 1519 while (npages--) { 1521 - virt_pg_map(vm, gva, paddr); 1520 + virt_pg_map(vm, gva, gpa); 1522 1521 1523 1522 gva += page_size; 1524 - paddr += page_size; 1523 + gpa += page_size; 1525 1524 } 1526 1525 } 1527 1526 ··· 2007 2008 * Input Args: 2008 2009 * vm - Virtual Machine 2009 2010 * num - number of pages 2010 - * paddr_min - Physical address minimum 2011 + * min_gpa - Physical address minimum 2011 2012 * memslot - Memory region to allocate page from 2012 2013 * protected - True if the pages will be used as protected/private memory 2013 2014 * ··· 2017 2018 * Starting physical address 2018 2019 * 2019 2020 * Within the VM specified by vm, locates a range of available physical 2020 - * pages at or above paddr_min. If found, the pages are marked as in use 2021 + * pages at or above min_gpa. If found, the pages are marked as in use 2021 2022 * and their base address is returned. A TEST_ASSERT failure occurs if 2022 - * not enough pages are available at or above paddr_min. 2023 + * not enough pages are available at or above min_gpa. 2023 2024 */ 2024 2025 gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 2025 - gpa_t paddr_min, u32 memslot, 2026 + gpa_t min_gpa, u32 memslot, 2026 2027 bool protected) 2027 2028 { 2028 2029 struct userspace_mem_region *region; ··· 2030 2031 2031 2032 TEST_ASSERT(num > 0, "Must allocate at least one page"); 2032 2033 2033 - TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " 2034 + TEST_ASSERT((min_gpa % vm->page_size) == 0, "Min physical address " 2034 2035 "not divisible by page size.\n" 2035 - " paddr_min: 0x%lx page_size: 0x%x", 2036 - paddr_min, vm->page_size); 2036 + " min_gpa: 0x%lx page_size: 0x%x", 2037 + min_gpa, vm->page_size); 2037 2038 2038 2039 region = memslot2region(vm, memslot); 2039 2040 TEST_ASSERT(!protected || region->protected_phy_pages, 2040 2041 "Region doesn't support protected memory"); 2041 2042 2042 - base = pg = paddr_min >> vm->page_shift; 2043 + base = pg = min_gpa >> vm->page_shift; 2043 2044 do { 2044 2045 for (; pg < base + num; ++pg) { 2045 2046 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { ··· 2051 2052 2052 2053 if (pg == 0) { 2053 2054 fprintf(stderr, "No guest physical page available, " 2054 - "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", 2055 - paddr_min, vm->page_size, memslot); 2055 + "min_gpa: 0x%lx page_size: 0x%x memslot: %u\n", 2056 + min_gpa, vm->page_size, memslot); 2056 2057 fputs("---- vm dump ----\n", stderr); 2057 2058 vm_dump(stderr, vm, 2); 2058 2059 abort(); ··· 2067 2068 return base * vm->page_size; 2068 2069 } 2069 2070 2070 - gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t paddr_min, u32 memslot) 2071 + gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot) 2071 2072 { 2072 - return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); 2073 + return vm_phy_pages_alloc(vm, 1, min_gpa, memslot); 2073 2074 } 2074 2075 2075 2076 gpa_t vm_alloc_page_table(struct kvm_vm *vm) ··· 2286 2287 kvm_selftest_arch_init(); 2287 2288 } 2288 2289 2289 - bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t paddr) 2290 + bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa) 2290 2291 { 2291 2292 sparsebit_idx_t pg = 0; 2292 2293 struct userspace_mem_region *region; ··· 2294 2295 if (!vm_arch_has_protected_memory(vm)) 2295 2296 return false; 2296 2297 2297 - region = userspace_mem_region_find(vm, paddr, paddr); 2298 - TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr); 2298 + region = userspace_mem_region_find(vm, gpa, gpa); 2299 + TEST_ASSERT(region, "No vm physical memory at 0x%lx", gpa); 2299 2300 2300 - pg = paddr >> vm->page_shift; 2301 + pg = gpa >> vm->page_shift; 2301 2302 return sparsebit_is_set(region->protected_phy_pages, pg); 2302 2303 } 2303 2304
+7 -7
tools/testing/selftests/kvm/lib/loongarch/processor.c
··· 116 116 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); 117 117 } 118 118 119 - void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr) 119 + void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) 120 120 { 121 121 u32 prot_bits; 122 122 u64 *ptep; ··· 126 126 "gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size); 127 127 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), 128 128 "Invalid virtual address, gva: 0x%lx", gva); 129 - TEST_ASSERT((paddr % vm->page_size) == 0, 129 + TEST_ASSERT((gpa % vm->page_size) == 0, 130 130 "Physical address not on page boundary,\n" 131 - "paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); 132 - TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, 131 + "gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size); 132 + TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, 133 133 "Physical address beyond maximum supported,\n" 134 - "paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 135 - paddr, vm->max_gfn, vm->page_size); 134 + "gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 135 + gpa, vm->max_gfn, vm->page_size); 136 136 137 137 ptep = virt_populate_pte(vm, gva, 1); 138 138 prot_bits = _PAGE_PRESENT | __READABLE | __WRITEABLE | _CACHE_CC | _PAGE_USER; 139 - WRITE_ONCE(*ptep, paddr | prot_bits); 139 + WRITE_ONCE(*ptep, gpa | prot_bits); 140 140 } 141 141 142 142 static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, u64 page, int level)
+8 -8
tools/testing/selftests/kvm/lib/riscv/processor.c
··· 75 75 vm->mmu.pgd_created = true; 76 76 } 77 77 78 - void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr) 78 + void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) 79 79 { 80 80 u64 *ptep, next_ppn; 81 81 int level = vm->mmu.pgtable_levels - 1; ··· 85 85 " gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size); 86 86 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), 87 87 "Invalid virtual address, gva: 0x%lx", gva); 88 - TEST_ASSERT((paddr % vm->page_size) == 0, 88 + TEST_ASSERT((gpa % vm->page_size) == 0, 89 89 "Physical address not on page boundary,\n" 90 - " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); 91 - TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, 90 + " gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size); 91 + TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, 92 92 "Physical address beyond maximum supported,\n" 93 - " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 94 - paddr, vm->max_gfn, vm->page_size); 93 + " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 94 + gpa, vm->max_gfn, vm->page_size); 95 95 96 96 ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, gva, level) * 8; 97 97 if (!*ptep) { ··· 113 113 level--; 114 114 } 115 115 116 - paddr = paddr >> PGTBL_PAGE_SIZE_SHIFT; 117 - *ptep = (paddr << PGTBL_PTE_ADDR_SHIFT) | 116 + gpa = gpa >> PGTBL_PAGE_SIZE_SHIFT; 117 + *ptep = (gpa << PGTBL_PTE_ADDR_SHIFT) | 118 118 PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK; 119 119 } 120 120
+6 -6
tools/testing/selftests/kvm/lib/s390/processor.c
··· 12 12 13 13 void virt_arch_pgd_alloc(struct kvm_vm *vm) 14 14 { 15 - gpa_t paddr; 15 + gpa_t gpa; 16 16 17 17 TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x", 18 18 vm->page_size); ··· 20 20 if (vm->mmu.pgd_created) 21 21 return; 22 22 23 - paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION, 23 + gpa = vm_phy_pages_alloc(vm, PAGES_PER_REGION, 24 24 KVM_GUEST_PAGE_TABLE_MIN_PADDR, 25 25 vm->memslots[MEM_REGION_PT]); 26 - memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); 26 + memset(addr_gpa2hva(vm, gpa), 0xff, PAGES_PER_REGION * vm->page_size); 27 27 28 - vm->mmu.pgd = paddr; 28 + vm->mmu.pgd = gpa; 29 29 vm->mmu.pgd_created = true; 30 30 } 31 31 ··· 60 60 "Invalid virtual address, gva: 0x%lx", gva); 61 61 TEST_ASSERT((gpa % vm->page_size) == 0, 62 62 "Physical address not on page boundary,\n" 63 - " paddr: 0x%lx vm->page_size: 0x%x", 63 + " gpa: 0x%lx vm->page_size: 0x%x", 64 64 gva, vm->page_size); 65 65 TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, 66 66 "Physical address beyond beyond maximum supported,\n" 67 - " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 67 + " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 68 68 gva, vm->max_gfn, vm->page_size); 69 69 70 70 /* Walk through region and segment tables */
+25 -25
tools/testing/selftests/kvm/lib/x86/processor.c
··· 224 224 struct kvm_mmu *mmu, 225 225 u64 *parent_pte, 226 226 gva_t gva, 227 - u64 paddr, 227 + gpa_t gpa, 228 228 int current_level, 229 229 int target_level) 230 230 { 231 231 u64 *pte = virt_get_pte(vm, mmu, parent_pte, gva, current_level); 232 232 233 - paddr = vm_untag_gpa(vm, paddr); 233 + gpa = vm_untag_gpa(vm, gpa); 234 234 235 235 if (!is_present_pte(mmu, pte)) { 236 236 *pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) | 237 237 PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) | 238 238 PTE_ALWAYS_SET_MASK(mmu); 239 239 if (current_level == target_level) 240 - *pte |= PTE_HUGE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK); 240 + *pte |= PTE_HUGE_MASK(mmu) | (gpa & PHYSICAL_PAGE_MASK); 241 241 else 242 242 *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK; 243 243 } else { ··· 257 257 } 258 258 259 259 void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva, 260 - u64 paddr, int level) 260 + gpa_t gpa, int level) 261 261 { 262 262 const u64 pg_size = PG_LEVEL_SIZE(level); 263 263 u64 *pte = &mmu->pgd; ··· 271 271 "gva: 0x%lx page size: 0x%lx", gva, pg_size); 272 272 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), 273 273 "Invalid virtual address, gva: 0x%lx", gva); 274 - TEST_ASSERT((paddr % pg_size) == 0, 274 + TEST_ASSERT((gpa % pg_size) == 0, 275 275 "Physical address not aligned,\n" 276 - " paddr: 0x%lx page size: 0x%lx", paddr, pg_size); 277 - TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, 276 + " gpa: 0x%lx page size: 0x%lx", gpa, pg_size); 277 + TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, 278 278 "Physical address beyond maximum supported,\n" 279 - " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 280 - paddr, vm->max_gfn, vm->page_size); 281 - TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr, 282 - "Unexpected bits in paddr: %lx", paddr); 279 + " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 280 + gpa, vm->max_gfn, vm->page_size); 281 + TEST_ASSERT(vm_untag_gpa(vm, gpa) == gpa, 282 + "Unexpected bits in gpa: %lx", gpa); 283 283 284 284 TEST_ASSERT(!PTE_EXECUTABLE_MASK(mmu) || !PTE_NX_MASK(mmu), 285 285 "X and NX bit masks cannot be used simultaneously"); ··· 291 291 for (current_level = mmu->pgtable_levels; 292 292 current_level > PG_LEVEL_4K; 293 293 current_level--) { 294 - pte = virt_create_upper_pte(vm, mmu, pte, gva, paddr, 294 + pte = virt_create_upper_pte(vm, mmu, pte, gva, gpa, 295 295 current_level, level); 296 296 if (is_huge_pte(mmu, pte)) 297 297 return; ··· 303 303 "PTE already present for 4k page at gva: 0x%lx", gva); 304 304 *pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) | 305 305 PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) | 306 - PTE_ALWAYS_SET_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK); 306 + PTE_ALWAYS_SET_MASK(mmu) | (gpa & PHYSICAL_PAGE_MASK); 307 307 308 308 /* 309 309 * Neither SEV nor TDX supports shared page tables, so only the final 310 310 * leaf PTE needs manually set the C/S-bit. 311 311 */ 312 - if (vm_is_gpa_protected(vm, paddr)) 312 + if (vm_is_gpa_protected(vm, gpa)) 313 313 *pte |= PTE_C_BIT_MASK(mmu); 314 314 else 315 315 *pte |= PTE_S_BIT_MASK(mmu); 316 316 } 317 317 318 - void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr) 318 + void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) 319 319 { 320 - __virt_pg_map(vm, &vm->mmu, gva, paddr, PG_LEVEL_4K); 320 + __virt_pg_map(vm, &vm->mmu, gva, gpa, PG_LEVEL_4K); 321 321 } 322 322 323 - void virt_map_level(struct kvm_vm *vm, gva_t gva, u64 paddr, 323 + void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa, 324 324 u64 nr_bytes, int level) 325 325 { 326 326 u64 pg_size = PG_LEVEL_SIZE(level); ··· 332 332 nr_bytes, pg_size); 333 333 334 334 for (i = 0; i < nr_pages; i++) { 335 - __virt_pg_map(vm, &vm->mmu, gva, paddr, level); 335 + __virt_pg_map(vm, &vm->mmu, gva, gpa, level); 336 336 sparsebit_set_num(vm->vpages_mapped, gva >> vm->page_shift, 337 337 nr_bytes / PAGE_SIZE); 338 338 339 339 gva += pg_size; 340 - paddr += pg_size; 340 + gpa += pg_size; 341 341 } 342 342 } 343 343 ··· 495 495 return kvm_cpu_has_ept() || kvm_cpu_has_npt(); 496 496 } 497 497 498 - void __tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, u64 paddr, u64 size, int level) 498 + void __tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size, int level) 499 499 { 500 500 size_t page_size = PG_LEVEL_SIZE(level); 501 501 size_t npages = size / page_size; 502 502 503 503 TEST_ASSERT(l2_gpa + size > l2_gpa, "L2 GPA overflow"); 504 - TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); 504 + TEST_ASSERT(gpa + size > gpa, "GPA overflow"); 505 505 506 506 while (npages--) { 507 - __virt_pg_map(vm, &vm->stage2_mmu, l2_gpa, paddr, level); 507 + __virt_pg_map(vm, &vm->stage2_mmu, l2_gpa, gpa, level); 508 508 l2_gpa += page_size; 509 - paddr += page_size; 509 + gpa += page_size; 510 510 } 511 511 } 512 512 513 - void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, u64 paddr, u64 size) 513 + void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size) 514 514 { 515 - __tdp_map(vm, l2_gpa, paddr, size, PG_LEVEL_4K); 515 + __tdp_map(vm, l2_gpa, gpa, size, PG_LEVEL_4K); 516 516 } 517 517 518 518 /* Prepare an identity extended page table that maps all the