Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

KVM: selftests: Use gpa_t instead of vm_paddr_t

Replace all occurrences of vm_paddr_t with gpa_t to align with KVM code
and with the conversion helpers (e.g. addr_hva2gpa()).

This commit was generated with the following command:

git ls-files tools/testing/selftests/kvm | xargs sed -i 's/vm_paddr_/gpa_/g'

Then by manually adjusting whitespace to make checkpatch.pl happy.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
[sean: drop bogus changelog blurb about renaming functions]
Link: https://patch.msgid.link/20260420212004.3938325-3-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>

authored by

David Matlack and committed by
Sean Christopherson
97dcda3f 5567fc9d

+92 -96
+2 -2
tools/testing/selftests/kvm/arm64/sea_to_user.c
··· 51 51 #define EINJ_OFFSET 0x01234badUL 52 52 #define EINJ_GVA ((START_GVA) + (EINJ_OFFSET)) 53 53 54 - static vm_paddr_t einj_gpa; 54 + static gpa_t einj_gpa; 55 55 static void *einj_hva; 56 56 static uint64_t einj_hpa; 57 57 static bool far_invalid; ··· 254 254 size_t guest_page_size; 255 255 size_t alignment; 256 256 uint64_t num_guest_pages; 257 - vm_paddr_t start_gpa; 257 + gpa_t start_gpa; 258 258 enum vm_mem_backing_src_type src_type = VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB; 259 259 struct kvm_vm *vm; 260 260
+10 -10
tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c
··· 23 23 #define GIC_LPI_OFFSET 8192 24 24 25 25 static size_t nr_iterations = 1000; 26 - static vm_paddr_t gpa_base; 26 + static gpa_t gpa_base; 27 27 28 28 static struct kvm_vm *vm; 29 29 static struct kvm_vcpu **vcpus; ··· 35 35 u32 nr_devices; 36 36 u32 nr_event_ids; 37 37 38 - vm_paddr_t device_table; 39 - vm_paddr_t collection_table; 40 - vm_paddr_t cmdq_base; 38 + gpa_t device_table; 39 + gpa_t collection_table; 40 + gpa_t cmdq_base; 41 41 void *cmdq_base_va; 42 - vm_paddr_t itt_tables; 42 + gpa_t itt_tables; 43 43 44 - vm_paddr_t lpi_prop_table; 45 - vm_paddr_t lpi_pend_tables; 44 + gpa_t lpi_prop_table; 45 + gpa_t lpi_pend_tables; 46 46 } test_data = { 47 47 .nr_cpus = 1, 48 48 .nr_devices = 1, ··· 73 73 /* Round-robin the LPIs to all of the vCPUs in the VM */ 74 74 coll_id = 0; 75 75 for (device_id = 0; device_id < nr_devices; device_id++) { 76 - vm_paddr_t itt_base = test_data.itt_tables + (device_id * SZ_64K); 76 + gpa_t itt_base = test_data.itt_tables + (device_id * SZ_64K); 77 77 78 78 its_send_mapd_cmd(test_data.cmdq_base_va, device_id, 79 79 itt_base, SZ_64K, true); ··· 188 188 size_t pages_per_64k = vm_calc_num_guest_pages(vm->mode, SZ_64K); 189 189 u32 nr_devices = test_data.nr_devices; 190 190 u32 nr_cpus = test_data.nr_cpus; 191 - vm_paddr_t cmdq_base; 191 + gpa_t cmdq_base; 192 192 193 193 test_data.device_table = vm_phy_pages_alloc(vm, pages_per_64k, 194 194 gpa_base, ··· 224 224 225 225 static void signal_lpi(u32 device_id, u32 event_id) 226 226 { 227 - vm_paddr_t db_addr = GITS_BASE_GPA + GITS_TRANSLATER; 227 + gpa_t db_addr = GITS_BASE_GPA + GITS_TRANSLATER; 228 228 229 229 struct kvm_msi msi = { 230 230 .address_lo = db_addr,
+1 -1
tools/testing/selftests/kvm/dirty_log_test.c
··· 667 667 virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages); 668 668 669 669 /* Cache the HVA pointer of the region */ 670 - host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem); 670 + host_test_mem = addr_gpa2hva(vm, (gpa_t)guest_test_phys_mem); 671 671 672 672 /* Export the shared variables to the guest */ 673 673 sync_global_to_guest(vm, host_page_size);
+2 -2
tools/testing/selftests/kvm/include/arm64/gic.h
··· 59 59 void gic_irq_set_config(unsigned int intid, bool is_edge); 60 60 void gic_irq_set_group(unsigned int intid, bool group); 61 61 62 - void gic_rdist_enable_lpis(vm_paddr_t cfg_table, size_t cfg_table_size, 63 - vm_paddr_t pend_table); 62 + void gic_rdist_enable_lpis(gpa_t cfg_table, size_t cfg_table_size, 63 + gpa_t pend_table); 64 64 65 65 #endif /* SELFTEST_KVM_GIC_H */
+3 -4
tools/testing/selftests/kvm/include/arm64/gic_v3_its.h
··· 5 5 6 6 #include <linux/sizes.h> 7 7 8 - void its_init(vm_paddr_t coll_tbl, size_t coll_tbl_sz, 9 - vm_paddr_t device_tbl, size_t device_tbl_sz, 10 - vm_paddr_t cmdq, size_t cmdq_size); 8 + void its_init(gpa_t coll_tbl, size_t coll_tbl_sz, gpa_t device_tbl, 9 + size_t device_tbl_sz, gpa_t cmdq, size_t cmdq_size); 11 10 12 - void its_send_mapd_cmd(void *cmdq_base, u32 device_id, vm_paddr_t itt_base, 11 + void its_send_mapd_cmd(void *cmdq_base, u32 device_id, gpa_t itt_base, 13 12 size_t itt_size, bool valid); 14 13 void its_send_mapc_cmd(void *cmdq_base, u32 vcpu_id, u32 collection_id, bool valid); 15 14 void its_send_mapti_cmd(void *cmdq_base, u32 device_id, u32 event_id,
+15 -16
tools/testing/selftests/kvm/include/kvm_util.h
··· 111 111 struct sparsebit *vpages_valid; 112 112 struct sparsebit *vpages_mapped; 113 113 bool has_irqchip; 114 - vm_paddr_t ucall_mmio_addr; 114 + gpa_t ucall_mmio_addr; 115 115 gva_t handlers; 116 116 uint32_t dirty_ring_size; 117 117 uint64_t gpa_tag_mask; ··· 728 728 729 729 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 730 730 unsigned int npages); 731 - void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); 731 + void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa); 732 732 void *addr_gva2hva(struct kvm_vm *vm, gva_t gva); 733 - vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); 734 - void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa); 733 + gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva); 734 + void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa); 735 735 736 736 #ifndef vcpu_arch_put_guest 737 737 #define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0) 738 738 #endif 739 739 740 - static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa) 740 + static inline gpa_t vm_untag_gpa(struct kvm_vm *vm, gpa_t gpa) 741 741 { 742 742 return gpa & ~vm->gpa_tag_mask; 743 743 } ··· 988 988 989 989 const char *exit_reason_str(unsigned int exit_reason); 990 990 991 - vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, 992 - uint32_t memslot); 993 - vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 994 - vm_paddr_t paddr_min, uint32_t memslot, 995 - bool protected); 996 - vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); 991 + gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t paddr_min, uint32_t memslot); 992 + gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 993 + gpa_t paddr_min, uint32_t memslot, 994 + bool protected); 995 + gpa_t vm_alloc_page_table(struct kvm_vm *vm); 997 996 998 - static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 999 - vm_paddr_t paddr_min, uint32_t memslot) 997 + static inline gpa_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 998 + gpa_t paddr_min, uint32_t memslot) 1000 999 { 1001 1000 /* 1002 1001 * By default, allocate memory as protected for VMs that support ··· 1239 1240 * Returns the VM physical address of the translated VM virtual 1240 1241 * address given by @gva. 1241 1242 */ 1242 - vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva); 1243 + gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva); 1243 1244 1244 - static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, gva_t gva) 1245 + static inline gpa_t addr_gva2gpa(struct kvm_vm *vm, gva_t gva) 1245 1246 { 1246 1247 return addr_arch_gva2gpa(vm, gva); 1247 1248 } ··· 1290 1291 void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm); 1291 1292 void kvm_arch_vm_release(struct kvm_vm *vm); 1292 1293 1293 - bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr); 1294 + bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t paddr); 1294 1295 1295 1296 uint32_t guest_get_vcpuid(void); 1296 1297
+1 -1
tools/testing/selftests/kvm/include/kvm_util_types.h
··· 14 14 #define __kvm_static_assert(expr, msg, ...) _Static_assert(expr, msg) 15 15 #define kvm_static_assert(expr, ...) __kvm_static_assert(expr, ##__VA_ARGS__, #expr) 16 16 17 - typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */ 17 + typedef uint64_t gpa_t; /* Virtual Machine (Guest) physical address */ 18 18 typedef uint64_t gva_t; /* Virtual Machine (Guest) virtual address */ 19 19 20 20 #define INVALID_GPA (~(uint64_t)0)
+1 -1
tools/testing/selftests/kvm/include/riscv/ucall.h
··· 7 7 8 8 #define UCALL_EXIT_REASON KVM_EXIT_RISCV_SBI 9 9 10 - static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 10 + static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) 11 11 { 12 12 } 13 13
+1 -1
tools/testing/selftests/kvm/include/s390/ucall.h
··· 6 6 7 7 #define UCALL_EXIT_REASON KVM_EXIT_S390_SIEIC 8 8 9 - static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 9 + static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) 10 10 { 11 11 } 12 12
+2 -2
tools/testing/selftests/kvm/include/ucall_common.h
··· 29 29 struct ucall *hva; 30 30 }; 31 31 32 - void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa); 32 + void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa); 33 33 void ucall_arch_do_ucall(gva_t uc); 34 34 void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu); 35 35 ··· 39 39 const char *file, unsigned int line, 40 40 const char *fmt, ...); 41 41 uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc); 42 - void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa); 42 + void ucall_init(struct kvm_vm *vm, gpa_t mmio_gpa); 43 43 int ucall_nr_pages_required(uint64_t page_size); 44 44 45 45 /*
+2 -2
tools/testing/selftests/kvm/include/x86/sev.h
··· 120 120 vm_ioctl(vm, KVM_MEMORY_ENCRYPT_REG_REGION, &range); 121 121 } 122 122 123 - static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, 123 + static inline void sev_launch_update_data(struct kvm_vm *vm, gpa_t gpa, 124 124 uint64_t size) 125 125 { 126 126 struct kvm_sev_launch_update_data update_data = { ··· 131 131 vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_DATA, &update_data); 132 132 } 133 133 134 - static inline void snp_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, 134 + static inline void snp_launch_update_data(struct kvm_vm *vm, gpa_t gpa, 135 135 uint64_t hva, uint64_t size, uint8_t type) 136 136 { 137 137 struct kvm_sev_snp_launch_update update_data = {
+1 -1
tools/testing/selftests/kvm/include/x86/ucall.h
··· 6 6 7 7 #define UCALL_EXIT_REASON KVM_EXIT_IO 8 8 9 - static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 9 + static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) 10 10 { 11 11 } 12 12
+1 -1
tools/testing/selftests/kvm/kvm_page_table_test.c
··· 281 281 virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages); 282 282 283 283 /* Cache the HVA pointer of the region */ 284 - host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem); 284 + host_test_mem = addr_gpa2hva(vm, (gpa_t)guest_test_phys_mem); 285 285 286 286 /* Export shared structure test_args to guest */ 287 287 sync_global_to_guest(vm, test_args);
+2 -2
tools/testing/selftests/kvm/lib/arm64/gic_v3.c
··· 424 424 .gic_irq_set_group = gicv3_set_group, 425 425 }; 426 426 427 - void gic_rdist_enable_lpis(vm_paddr_t cfg_table, size_t cfg_table_size, 428 - vm_paddr_t pend_table) 427 + void gic_rdist_enable_lpis(gpa_t cfg_table, size_t cfg_table_size, 428 + gpa_t pend_table) 429 429 { 430 430 volatile void *rdist_base = gicr_base_cpu(guest_get_vcpuid()); 431 431
+5 -6
tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c
··· 54 54 return -1; 55 55 } 56 56 57 - static void its_install_table(unsigned int type, vm_paddr_t base, size_t size) 57 + static void its_install_table(unsigned int type, gpa_t base, size_t size) 58 58 { 59 59 unsigned long offset = its_find_baser(type); 60 60 u64 baser; ··· 69 69 its_write_u64(offset, baser); 70 70 } 71 71 72 - static void its_install_cmdq(vm_paddr_t base, size_t size) 72 + static void its_install_cmdq(gpa_t base, size_t size) 73 73 { 74 74 u64 cbaser; 75 75 ··· 82 82 its_write_u64(GITS_CBASER, cbaser); 83 83 } 84 84 85 - void its_init(vm_paddr_t coll_tbl, size_t coll_tbl_sz, 86 - vm_paddr_t device_tbl, size_t device_tbl_sz, 87 - vm_paddr_t cmdq, size_t cmdq_size) 85 + void its_init(gpa_t coll_tbl, size_t coll_tbl_sz, gpa_t device_tbl, 86 + size_t device_tbl_sz, gpa_t cmdq, size_t cmdq_size) 88 87 { 89 88 u32 ctlr; 90 89 ··· 203 204 } 204 205 } 205 206 206 - void its_send_mapd_cmd(void *cmdq_base, u32 device_id, vm_paddr_t itt_base, 207 + void its_send_mapd_cmd(void *cmdq_base, u32 device_id, gpa_t itt_base, 207 208 size_t itt_size, bool valid) 208 209 { 209 210 struct its_cmd_block cmd = {};
+1 -1
tools/testing/selftests/kvm/lib/arm64/processor.c
··· 230 230 return virt_get_pte_hva_at_level(vm, gva, 3); 231 231 } 232 232 233 - vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 233 + gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 234 234 { 235 235 uint64_t *ptep = virt_get_pte_hva(vm, gva); 236 236
+1 -1
tools/testing/selftests/kvm/lib/arm64/ucall.c
··· 8 8 9 9 gva_t *ucall_exit_mmio_addr; 10 10 11 - void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 11 + void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) 12 12 { 13 13 gva_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); 14 14
+13 -14
tools/testing/selftests/kvm/lib/kvm_util.c
··· 1457 1457 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); 1458 1458 1459 1459 virt_pgd_alloc(vm); 1460 - vm_paddr_t paddr = __vm_phy_pages_alloc(vm, pages, 1461 - KVM_UTIL_MIN_PFN * vm->page_size, 1462 - vm->memslots[type], protected); 1460 + gpa_t paddr = __vm_phy_pages_alloc(vm, pages, 1461 + KVM_UTIL_MIN_PFN * vm->page_size, 1462 + vm->memslots[type], protected); 1463 1463 1464 1464 /* 1465 1465 * Find an unused range of virtual page addresses of at least ··· 1607 1607 * address providing the memory to the vm physical address is returned. 1608 1608 * A TEST_ASSERT failure occurs if no region containing gpa exists. 1609 1609 */ 1610 - void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) 1610 + void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa) 1611 1611 { 1612 1612 struct userspace_mem_region *region; 1613 1613 ··· 1640 1640 * VM physical address is returned. A TEST_ASSERT failure occurs if no 1641 1641 * region containing hva exists. 1642 1642 */ 1643 - vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) 1643 + gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva) 1644 1644 { 1645 1645 struct rb_node *node; 1646 1646 ··· 1651 1651 if (hva >= region->host_mem) { 1652 1652 if (hva <= (region->host_mem 1653 1653 + region->region.memory_size - 1)) 1654 - return (vm_paddr_t)((uintptr_t) 1654 + return (gpa_t)((uintptr_t) 1655 1655 region->region.guest_phys_addr 1656 1656 + (hva - (uintptr_t)region->host_mem)); 1657 1657 ··· 1683 1683 * memory without mapping said memory in the guest's address space. And, for 1684 1684 * userfaultfd-based demand paging, to do so without triggering userfaults. 1685 1685 */ 1686 - void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa) 1686 + void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa) 1687 1687 { 1688 1688 struct userspace_mem_region *region; 1689 1689 uintptr_t offset; ··· 2087 2087 * and their base address is returned. A TEST_ASSERT failure occurs if 2088 2088 * not enough pages are available at or above paddr_min. 2089 2089 */ 2090 - vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 2091 - vm_paddr_t paddr_min, uint32_t memslot, 2092 - bool protected) 2090 + gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 2091 + gpa_t paddr_min, uint32_t memslot, 2092 + bool protected) 2093 2093 { 2094 2094 struct userspace_mem_region *region; 2095 2095 sparsebit_idx_t pg, base; ··· 2133 2133 return base * vm->page_size; 2134 2134 } 2135 2135 2136 - vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, 2137 - uint32_t memslot) 2136 + gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t paddr_min, uint32_t memslot) 2138 2137 { 2139 2138 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); 2140 2139 } 2141 2140 2142 - vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) 2141 + gpa_t vm_alloc_page_table(struct kvm_vm *vm) 2143 2142 { 2144 2143 return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 2145 2144 vm->memslots[MEM_REGION_PT]); ··· 2352 2353 kvm_selftest_arch_init(); 2353 2354 } 2354 2355 2355 - bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr) 2356 + bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t paddr) 2356 2357 { 2357 2358 sparsebit_idx_t pg = 0; 2358 2359 struct userspace_mem_region *region;
+5 -5
tools/testing/selftests/kvm/lib/loongarch/processor.c
··· 12 12 #define LOONGARCH_PAGE_TABLE_PHYS_MIN 0x200000 13 13 #define LOONGARCH_GUEST_STACK_VADDR_MIN 0x200000 14 14 15 - static vm_paddr_t invalid_pgtable[4]; 15 + static gpa_t invalid_pgtable[4]; 16 16 static gva_t exception_handlers; 17 17 18 18 static uint64_t virt_pte_index(struct kvm_vm *vm, gva_t gva, int level) ··· 35 35 return 1 << (vm->page_shift - 3); 36 36 } 37 37 38 - static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t child) 38 + static void virt_set_pgtable(struct kvm_vm *vm, gpa_t table, gpa_t child) 39 39 { 40 40 uint64_t *ptep; 41 41 int i, ptrs_per_pte; ··· 49 49 void virt_arch_pgd_alloc(struct kvm_vm *vm) 50 50 { 51 51 int i; 52 - vm_paddr_t child, table; 52 + gpa_t child, table; 53 53 54 54 if (vm->mmu.pgd_created) 55 55 return; ··· 76 76 { 77 77 int level; 78 78 uint64_t *ptep; 79 - vm_paddr_t child; 79 + gpa_t child; 80 80 81 81 if (!vm->mmu.pgd_created) 82 82 goto unmapped_gva; ··· 106 106 exit(EXIT_FAILURE); 107 107 } 108 108 109 - vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 109 + gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 110 110 { 111 111 uint64_t *ptep; 112 112
+1 -1
tools/testing/selftests/kvm/lib/loongarch/ucall.c
··· 11 11 */ 12 12 gva_t *ucall_exit_mmio_addr; 13 13 14 - void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 14 + void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) 15 15 { 16 16 gva_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); 17 17
+1 -1
tools/testing/selftests/kvm/lib/memstress.c
··· 203 203 /* Add extra memory slots for testing */ 204 204 for (i = 0; i < slots; i++) { 205 205 uint64_t region_pages = guest_num_pages / slots; 206 - vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i; 206 + gpa_t region_start = args->gpa + region_pages * args->guest_page_size * i; 207 207 208 208 vm_userspace_mem_region_add(vm, backing_src, region_start, 209 209 MEMSTRESS_MEM_SLOT_INDEX + i,
+1 -1
tools/testing/selftests/kvm/lib/riscv/processor.c
··· 119 119 PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK; 120 120 } 121 121 122 - vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 122 + gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 123 123 { 124 124 uint64_t *ptep; 125 125 int level = vm->mmu.pgtable_levels - 1;
+2 -2
tools/testing/selftests/kvm/lib/s390/processor.c
··· 12 12 13 13 void virt_arch_pgd_alloc(struct kvm_vm *vm) 14 14 { 15 - vm_paddr_t paddr; 15 + gpa_t paddr; 16 16 17 17 TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x", 18 18 vm->page_size); ··· 86 86 entry[idx] = gpa; 87 87 } 88 88 89 - vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 89 + gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 90 90 { 91 91 int ri, idx; 92 92 uint64_t *entry;
+1 -1
tools/testing/selftests/kvm/lib/ucall_common.c
··· 25 25 */ 26 26 static struct ucall_header *ucall_pool; 27 27 28 - void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 28 + void ucall_init(struct kvm_vm *vm, gpa_t mmio_gpa) 29 29 { 30 30 struct ucall_header *hdr; 31 31 struct ucall *uc;
+1 -1
tools/testing/selftests/kvm/lib/x86/processor.c
··· 618 618 segp->present = true; 619 619 } 620 620 621 - vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 621 + gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 622 622 { 623 623 int level = PG_LEVEL_NONE; 624 624 uint64_t *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level);
+1 -1
tools/testing/selftests/kvm/lib/x86/sev.c
··· 18 18 uint8_t page_type, bool private) 19 19 { 20 20 const struct sparsebit *protected_phy_pages = region->protected_phy_pages; 21 - const vm_paddr_t gpa_base = region->region.guest_phys_addr; 21 + const gpa_t gpa_base = region->region.guest_phys_addr; 22 22 const sparsebit_idx_t lowest_page_in_region = gpa_base >> vm->page_shift; 23 23 sparsebit_idx_t i, j; 24 24
+2 -2
tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
··· 24 24 /* Snapshot shared memory data */ 25 25 #define PMU_SNAPSHOT_GPA_BASE BIT(30) 26 26 static void *snapshot_gva; 27 - static vm_paddr_t snapshot_gpa; 27 + static gpa_t snapshot_gpa; 28 28 29 29 static int vcpu_shared_irq_count; 30 30 static int counter_in_use; ··· 259 259 __GUEST_ASSERT(0, "SBI implementation version doesn't support PMU Snapshot"); 260 260 } 261 261 262 - static void snapshot_set_shmem(vm_paddr_t gpa, unsigned long flags) 262 + static void snapshot_set_shmem(gpa_t gpa, unsigned long flags) 263 263 { 264 264 unsigned long lo = (unsigned long)gpa; 265 265 #if __riscv_xlen == 32
+1 -1
tools/testing/selftests/kvm/s390/irq_routing.c
··· 27 27 struct kvm_irq_routing *routing; 28 28 struct kvm_vcpu *vcpu; 29 29 struct kvm_vm *vm; 30 - vm_paddr_t mem; 30 + gpa_t mem; 31 31 int ret; 32 32 33 33 struct kvm_irq_routing_entry ue = {
+1 -1
tools/testing/selftests/kvm/s390/ucontrol_test.c
··· 111 111 uintptr_t base_hva; 112 112 uintptr_t code_hva; 113 113 int kvm_run_size; 114 - vm_paddr_t pgd; 114 + gpa_t pgd; 115 115 void *vm_mem; 116 116 int vcpu_fd; 117 117 int kvm_fd;
+2 -2
tools/testing/selftests/kvm/steal_time.c
··· 239 239 /* SBI STA shmem must have 64-byte alignment */ 240 240 #define STEAL_TIME_SIZE ((sizeof(struct sta_struct) + 63) & ~63) 241 241 242 - static vm_paddr_t st_gpa[NR_VCPUS]; 242 + static gpa_t st_gpa[NR_VCPUS]; 243 243 244 244 struct sta_struct { 245 245 uint32_t sequence; ··· 249 249 uint8_t pad[47]; 250 250 } __packed; 251 251 252 - static void sta_set_shmem(vm_paddr_t gpa, unsigned long flags) 252 + static void sta_set_shmem(gpa_t gpa, unsigned long flags) 253 253 { 254 254 unsigned long lo = (unsigned long)gpa; 255 255 #if __riscv_xlen == 32
+1 -1
tools/testing/selftests/kvm/x86/hyperv_clock.c
··· 98 98 GUEST_ASSERT(r2 >= t1 && r2 - t2 < 100000); 99 99 } 100 100 101 - static void guest_main(struct ms_hyperv_tsc_page *tsc_page, vm_paddr_t tsc_page_gpa) 101 + static void guest_main(struct ms_hyperv_tsc_page *tsc_page, gpa_t tsc_page_gpa) 102 102 { 103 103 u64 tsc_scale, tsc_offset; 104 104
+1 -1
tools/testing/selftests/kvm/x86/hyperv_extended_hypercalls.c
··· 15 15 /* Any value is fine */ 16 16 #define EXT_CAPABILITIES 0xbull 17 17 18 - static void guest_code(vm_paddr_t in_pg_gpa, vm_paddr_t out_pg_gpa, 18 + static void guest_code(gpa_t in_pg_gpa, gpa_t out_pg_gpa, 19 19 gva_t out_pg_gva) 20 20 { 21 21 uint64_t *output_gva;
+4 -4
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
··· 62 62 */ 63 63 struct test_data { 64 64 gva_t hcall_gva; 65 - vm_paddr_t hcall_gpa; 65 + gpa_t hcall_gpa; 66 66 gva_t test_pages; 67 67 gva_t test_pages_pte[NTEST_PAGES]; 68 68 }; ··· 133 133 * Update PTEs swapping two test pages. 134 134 * TODO: use swap()/xchg() when these are provided. 135 135 */ 136 - static void swap_two_test_pages(vm_paddr_t pte_gva1, vm_paddr_t pte_gva2) 136 + static void swap_two_test_pages(gpa_t pte_gva1, gpa_t pte_gva2) 137 137 { 138 138 uint64_t tmp = *(uint64_t *)pte_gva1; 139 139 ··· 201 201 struct test_data *data = (struct test_data *)test_data; 202 202 struct hv_tlb_flush *flush = (struct hv_tlb_flush *)data->hcall_gva; 203 203 struct hv_tlb_flush_ex *flush_ex = (struct hv_tlb_flush_ex *)data->hcall_gva; 204 - vm_paddr_t hcall_gpa = data->hcall_gpa; 204 + gpa_t hcall_gpa = data->hcall_gpa; 205 205 int i, stage = 1; 206 206 207 207 wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); ··· 582 582 struct kvm_vcpu *vcpu[3]; 583 583 pthread_t threads[2]; 584 584 gva_t test_data_page, gva; 585 - vm_paddr_t gpa; 585 + gpa_t gpa; 586 586 uint64_t *pte; 587 587 struct test_data *data; 588 588 struct ucall uc;
+2 -2
tools/testing/selftests/kvm/x86/kvm_clock_test.c
··· 31 31 #define GUEST_SYNC_CLOCK(__stage, __val) \ 32 32 GUEST_SYNC_ARGS(__stage, __val, 0, 0, 0) 33 33 34 - static void guest_main(vm_paddr_t pvti_pa, struct pvclock_vcpu_time_info *pvti) 34 + static void guest_main(gpa_t pvti_pa, struct pvclock_vcpu_time_info *pvti) 35 35 { 36 36 int i; 37 37 ··· 136 136 { 137 137 struct kvm_vcpu *vcpu; 138 138 gva_t pvti_gva; 139 - vm_paddr_t pvti_gpa; 139 + gpa_t pvti_gpa; 140 140 struct kvm_vm *vm; 141 141 int flags; 142 142
+1 -1
tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c
··· 30 30 #define L2_GUEST_STACK_SIZE 64 31 31 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 32 32 u64 guest_cr4; 33 - vm_paddr_t pml5_pa, pml4_pa; 33 + gpa_t pml5_pa, pml4_pa; 34 34 u64 *pml5; 35 35 u64 exit_reason; 36 36