Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm updates from Paolo Bonzini:
"On top of a lot of Arm fixes, this includes a massive rename of types
and variables in tools/testing/selftests/kvm - these were
unnecessarily different from what the kernel uses, so they're being
made consistent.

arm64:

- Allow tracing for non-pKVM, which was accidentally disabled when
the series was merged

- Rationalise the way the pKVM hypercall ranges are defined by using
the same mechanism as already used for the vcpu_sysreg enum

- Enforce that SMCCC function numbers relayed by the pKVM proxy are
actually compliant with the specification

- Fix a couple of feature to idreg mappings which resulted in the
wrong sanitisation being applied

- Fix the GICD_IIDR revision number field that could never been
written correctly by userspace

- Make kvm_vcpu_initialized() correctly use its parameter instead of
relying on the surrounding context

- Enforce correct ordering in __pkvm_init_vcpu(), plugging a
potential pin leak at the same time

- Move __pkvm_init_finalise() to a less dangerous spot, avoiding
future problems

- Restore functional userspace irqchip support after a four year
breakage (last functional kernel was 5.18...)

- Spelling fixes

Selftests:

- Rename types across all KVM selftests to more closely align with
types used in the kernel:

vm_vaddr_t -> gva_t
vm_paddr_t -> gpa_t

uint64_t -> u64
uint32_t -> u32
uint16_t -> u16
uint8_t -> u8

int64_t -> s64
int32_t -> s32
int16_t -> s16
int8_t -> s8

- Fix Loongarch compilation"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (31 commits)
KVM: selftests: Add check_steal_time_uapi() implementation for LoongArch
KVM: arm64: Wake-up from WFI when iqrchip is in userspace
KVM: arm64: Fix initialisation order in __pkvm_init_finalise()
KVM: arm64: Fix pin leak and publication ordering in __pkvm_init_vcpu()
KVM: arm64: Fix kvm_vcpu_initialized() macro parameter
KVM: arm64: Fix FEAT_SPE_FnE to use PMSIDR_EL1.FnE, not PMSVer
KVM: arm64: Fix typo in feature check comments
KVM: arm64: Fix FEAT_Debugv8p9 to check DebugVer, not PMUVer
KVM: arm64: Reject non compliant SMCCC function calls in pKVM
KVM: arm64: vgic: Fix IIDR revision field extracted from wrong value
KVM: selftests: Replace "paddr" with "gpa" throughout
KVM: selftests: Replace "u64 nested_paddr" with "gpa_t l2_gpa"
KVM: selftests: Replace "u64 gpa" with "gpa_t" throughout
KVM: selftests: Replace "vaddr" with "gva" throughout
KVM: selftests: Clarify that arm64's inject_uer() takes a host PA, not a guest PA
KVM: selftests: Rename translate_to_host_paddr() => translate_hva_to_hpa()
KVM: selftests: Rename vm_vaddr_populate_bitmap() => vm_populate_gva_bitmap()
KVM: selftests: Rename vm_vaddr_unused_gap() => vm_unused_gva_gap()
KVM: selftests: Drop "vaddr_" from APIs that allocate memory for a given VM
KVM: selftests: Use u8 instead of uint8_t
...

+2807 -2879
+18 -10
arch/arm64/include/asm/kvm_asm.h
··· 50 50 51 51 #include <linux/mm.h> 52 52 53 + #define MARKER(m) \ 54 + m, __after_##m = m - 1 55 + 53 56 enum __kvm_host_smccc_func { 54 57 /* Hypercalls that are unavailable once pKVM has finalised. */ 55 58 /* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */ ··· 62 59 __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs, 63 60 __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs, 64 61 __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config, 62 + 63 + MARKER(__KVM_HOST_SMCCC_FUNC_MIN_PKVM), 64 + 65 65 __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize, 66 - __KVM_HOST_SMCCC_FUNC_MIN_PKVM = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize, 67 66 68 67 /* Hypercalls that are always available and common to [nh]VHE/pKVM. */ 69 68 __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc, ··· 77 72 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range, 78 73 __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context, 79 74 __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff, 75 + __KVM_HOST_SMCCC_FUNC___tracing_load, 76 + __KVM_HOST_SMCCC_FUNC___tracing_unload, 77 + __KVM_HOST_SMCCC_FUNC___tracing_enable, 78 + __KVM_HOST_SMCCC_FUNC___tracing_swap_reader, 79 + __KVM_HOST_SMCCC_FUNC___tracing_update_clock, 80 + __KVM_HOST_SMCCC_FUNC___tracing_reset, 81 + __KVM_HOST_SMCCC_FUNC___tracing_enable_event, 82 + __KVM_HOST_SMCCC_FUNC___tracing_write_event, 80 83 __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs, 81 84 __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs, 82 85 __KVM_HOST_SMCCC_FUNC___vgic_v5_save_apr, 83 86 __KVM_HOST_SMCCC_FUNC___vgic_v5_restore_vmcr_apr, 84 - __KVM_HOST_SMCCC_FUNC_MAX_NO_PKVM = __KVM_HOST_SMCCC_FUNC___vgic_v5_restore_vmcr_apr, 87 + 88 + MARKER(__KVM_HOST_SMCCC_FUNC_PKVM_ONLY), 85 89 86 90 /* Hypercalls that are available only when pKVM has finalised. */ 87 91 __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp, ··· 114 100 __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load, 115 101 __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put, 116 102 __KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid, 117 - __KVM_HOST_SMCCC_FUNC___tracing_load, 118 - __KVM_HOST_SMCCC_FUNC___tracing_unload, 119 - __KVM_HOST_SMCCC_FUNC___tracing_enable, 120 - __KVM_HOST_SMCCC_FUNC___tracing_swap_reader, 121 - __KVM_HOST_SMCCC_FUNC___tracing_update_clock, 122 - __KVM_HOST_SMCCC_FUNC___tracing_reset, 123 - __KVM_HOST_SMCCC_FUNC___tracing_enable_event, 124 - __KVM_HOST_SMCCC_FUNC___tracing_write_event, 103 + 104 + MARKER(__KVM_HOST_SMCCC_FUNC_MAX) 125 105 }; 126 106 127 107 #define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
+1 -4
arch/arm64/include/asm/kvm_host.h
··· 450 450 r = __VNCR_START__ + ((VNCR_ ## r) / 8), \ 451 451 __after_##r = __MAX__(__before_##r - 1, r) 452 452 453 - #define MARKER(m) \ 454 - m, __after_##m = m - 1 455 - 456 453 enum vcpu_sysreg { 457 454 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */ 458 455 MPIDR_EL1, /* MultiProcessor Affinity Register */ ··· 1545 1548 #define kvm_vcpu_has_feature(k, f) __vcpu_has_feature(&(k)->arch, (f)) 1546 1549 #define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f)) 1547 1550 1548 - #define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED) 1551 + #define kvm_vcpu_initialized(v) vcpu_get_flag(v, VCPU_INITIALIZED) 1549 1552 1550 1553 int kvm_trng_call(struct kvm_vcpu *vcpu); 1551 1554 #ifdef CONFIG_KVM
+4
arch/arm64/kvm/arm.c
··· 824 824 { 825 825 bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF | HCR_VSE); 826 826 827 + irq_lines |= (!irqchip_in_kernel(v->kvm) && 828 + (kvm_timer_should_notify_user(v) || 829 + kvm_pmu_should_notify_user(v))); 830 + 827 831 return ((irq_lines || kvm_vgic_vcpu_pending_irq(v)) 828 832 && !kvm_arm_vcpu_stopped(v) && !v->arch.pause); 829 833 }
+16 -7
arch/arm64/kvm/config.c
··· 131 131 } 132 132 133 133 #define FEAT_SPE ID_AA64DFR0_EL1, PMSVer, IMP 134 - #define FEAT_SPE_FnE ID_AA64DFR0_EL1, PMSVer, V1P2 135 134 #define FEAT_BRBE ID_AA64DFR0_EL1, BRBE, IMP 136 135 #define FEAT_TRC_SR ID_AA64DFR0_EL1, TraceVer, IMP 137 136 #define FEAT_PMUv3 ID_AA64DFR0_EL1, PMUVer, IMP ··· 191 192 #define FEAT_SRMASK ID_AA64MMFR4_EL1, SRMASK, IMP 192 193 #define FEAT_PoPS ID_AA64MMFR4_EL1, PoPS, IMP 193 194 #define FEAT_PFAR ID_AA64PFR1_EL1, PFAR, IMP 194 - #define FEAT_Debugv8p9 ID_AA64DFR0_EL1, PMUVer, V3P9 195 + #define FEAT_Debugv8p9 ID_AA64DFR0_EL1, DebugVer, V8P9 195 196 #define FEAT_PMUv3_SS ID_AA64DFR0_EL1, PMSS, IMP 196 197 #define FEAT_SEBEP ID_AA64DFR0_EL1, SEBEP, IMP 197 198 #define FEAT_EBEP ID_AA64DFR1_EL1, EBEP, IMP ··· 282 283 static bool feat_sme_smps(struct kvm *kvm) 283 284 { 284 285 /* 285 - * Revists this if KVM ever supports SME -- this really should 286 + * Revisit this if KVM ever supports SME -- this really should 286 287 * look at the guest's view of SMIDR_EL1. Funnily enough, this 287 288 * is not captured in the JSON file, but only as a note in the 288 289 * ARM ARM. ··· 294 295 static bool feat_spe_fds(struct kvm *kvm) 295 296 { 296 297 /* 297 - * Revists this if KVM ever supports SPE -- this really should 298 + * Revisit this if KVM ever supports SPE -- this really should 298 299 * look at the guest's view of PMSIDR_EL1. 299 300 */ 300 301 return (kvm_has_feat(kvm, FEAT_SPEv1p4) && 301 302 (read_sysreg_s(SYS_PMSIDR_EL1) & PMSIDR_EL1_FDS)); 302 303 } 303 304 305 + static bool feat_spe_fne(struct kvm *kvm) 306 + { 307 + /* 308 + * Revisit this if KVM ever supports SPE -- this really should 309 + * look at the guest's view of PMSIDR_EL1. 310 + */ 311 + return (kvm_has_feat(kvm, FEAT_SPEv1p2) && 312 + (read_sysreg_s(SYS_PMSIDR_EL1) & PMSIDR_EL1_FnE)); 313 + } 314 + 304 315 static bool feat_trbe_mpam(struct kvm *kvm) 305 316 { 306 317 /* 307 - * Revists this if KVM ever supports both MPAM and TRBE -- 318 + * Revisit this if KVM ever supports both MPAM and TRBE -- 308 319 * this really should look at the guest's view of TRBIDR_EL1. 309 320 */ 310 321 return (kvm_has_feat(kvm, FEAT_TRBE) && ··· 546 537 HDFGRTR_EL2_PMBPTR_EL1 | 547 538 HDFGRTR_EL2_PMBLIMITR_EL1, 548 539 FEAT_SPE), 549 - NEEDS_FEAT(HDFGRTR_EL2_nPMSNEVFR_EL1, FEAT_SPE_FnE), 540 + NEEDS_FEAT(HDFGRTR_EL2_nPMSNEVFR_EL1, feat_spe_fne), 550 541 NEEDS_FEAT(HDFGRTR_EL2_nBRBDATA | 551 542 HDFGRTR_EL2_nBRBCTL | 552 543 HDFGRTR_EL2_nBRBIDR, ··· 614 605 HDFGWTR_EL2_PMBPTR_EL1 | 615 606 HDFGWTR_EL2_PMBLIMITR_EL1, 616 607 FEAT_SPE), 617 - NEEDS_FEAT(HDFGWTR_EL2_nPMSNEVFR_EL1, FEAT_SPE_FnE), 608 + NEEDS_FEAT(HDFGWTR_EL2_nPMSNEVFR_EL1, feat_spe_fne), 618 609 NEEDS_FEAT(HDFGWTR_EL2_nBRBDATA | 619 610 HDFGWTR_EL2_nBRBCTL, 620 611 FEAT_BRBE),
+17 -13
arch/arm64/kvm/hyp/nvhe/hyp-main.c
··· 709 709 HANDLE_FUNC(__kvm_tlb_flush_vmid_range), 710 710 HANDLE_FUNC(__kvm_flush_cpu_context), 711 711 HANDLE_FUNC(__kvm_timer_set_cntvoff), 712 + HANDLE_FUNC(__tracing_load), 713 + HANDLE_FUNC(__tracing_unload), 714 + HANDLE_FUNC(__tracing_enable), 715 + HANDLE_FUNC(__tracing_swap_reader), 716 + HANDLE_FUNC(__tracing_update_clock), 717 + HANDLE_FUNC(__tracing_reset), 718 + HANDLE_FUNC(__tracing_enable_event), 719 + HANDLE_FUNC(__tracing_write_event), 712 720 HANDLE_FUNC(__vgic_v3_save_aprs), 713 721 HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs), 714 722 HANDLE_FUNC(__vgic_v5_save_apr), ··· 743 735 HANDLE_FUNC(__pkvm_vcpu_load), 744 736 HANDLE_FUNC(__pkvm_vcpu_put), 745 737 HANDLE_FUNC(__pkvm_tlb_flush_vmid), 746 - HANDLE_FUNC(__tracing_load), 747 - HANDLE_FUNC(__tracing_unload), 748 - HANDLE_FUNC(__tracing_enable), 749 - HANDLE_FUNC(__tracing_swap_reader), 750 - HANDLE_FUNC(__tracing_update_clock), 751 - HANDLE_FUNC(__tracing_reset), 752 - HANDLE_FUNC(__tracing_enable_event), 753 - HANDLE_FUNC(__tracing_write_event), 754 738 }; 755 739 756 740 static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) 757 741 { 758 742 DECLARE_REG(unsigned long, id, host_ctxt, 0); 759 - unsigned long hcall_min = 0, hcall_max = -1; 743 + unsigned long hcall_min = 0, hcall_max = __KVM_HOST_SMCCC_FUNC_MAX; 760 744 hcall_t hfn; 745 + 746 + BUILD_BUG_ON(ARRAY_SIZE(host_hcall) != __KVM_HOST_SMCCC_FUNC_MAX); 761 747 762 748 /* 763 749 * If pKVM has been initialised then reject any calls to the ··· 765 763 if (static_branch_unlikely(&kvm_protected_mode_initialized)) { 766 764 hcall_min = __KVM_HOST_SMCCC_FUNC_MIN_PKVM; 767 765 } else { 768 - hcall_max = __KVM_HOST_SMCCC_FUNC_MAX_NO_PKVM; 766 + hcall_max = __KVM_HOST_SMCCC_FUNC_PKVM_ONLY; 769 767 } 770 768 771 769 id &= ~ARM_SMCCC_CALL_HINTS; 772 770 id -= KVM_HOST_SMCCC_ID(0); 773 771 774 - if (unlikely(id < hcall_min || id > hcall_max || 775 - id >= ARRAY_SIZE(host_hcall))) { 772 + if (unlikely(id < hcall_min || id >= hcall_max)) 776 773 goto inval; 777 - } 778 774 779 775 hfn = host_hcall[id]; 780 776 if (unlikely(!hfn)) ··· 805 805 } 806 806 807 807 func_id &= ~ARM_SMCCC_CALL_HINTS; 808 + if (upper_32_bits(func_id)) { 809 + cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED; 810 + goto exit_skip_instr; 811 + } 808 812 809 813 handled = kvm_host_psci_handler(host_ctxt, func_id); 810 814 if (!handled)
+25 -13
arch/arm64/kvm/hyp/nvhe/pkvm.c
··· 266 266 if (hyp_vm->kvm.created_vcpus <= vcpu_idx) 267 267 goto unlock; 268 268 269 - hyp_vcpu = hyp_vm->vcpus[vcpu_idx]; 269 + /* Pairs with smp_store_release() in register_hyp_vcpu(). */ 270 + hyp_vcpu = smp_load_acquire(&hyp_vm->vcpus[vcpu_idx]); 270 271 if (!hyp_vcpu) 271 272 goto unlock; 272 273 ··· 861 860 * the page-aligned size of 'struct pkvm_hyp_vcpu'. 862 861 * Return 0 on success, negative error code on failure. 863 862 */ 863 + static int register_hyp_vcpu(struct pkvm_hyp_vm *hyp_vm, 864 + struct pkvm_hyp_vcpu *hyp_vcpu) 865 + { 866 + unsigned int idx = hyp_vcpu->vcpu.vcpu_idx; 867 + 868 + if (idx >= hyp_vm->kvm.created_vcpus) 869 + return -EINVAL; 870 + 871 + if (hyp_vm->vcpus[idx]) 872 + return -EINVAL; 873 + 874 + /* 875 + * Ensure the hyp_vcpu is initialised before publishing it to 876 + * the vCPU-load path via 'hyp_vm->vcpus[]'. 877 + */ 878 + smp_store_release(&hyp_vm->vcpus[idx], hyp_vcpu); 879 + return 0; 880 + } 881 + 864 882 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu, 865 883 unsigned long vcpu_hva) 866 884 { 867 885 struct pkvm_hyp_vcpu *hyp_vcpu; 868 886 struct pkvm_hyp_vm *hyp_vm; 869 - unsigned int idx; 870 887 int ret; 871 888 872 889 hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu)); ··· 903 884 if (ret) 904 885 goto unlock; 905 886 906 - idx = hyp_vcpu->vcpu.vcpu_idx; 907 - if (idx >= hyp_vm->kvm.created_vcpus) { 908 - ret = -EINVAL; 909 - goto unlock; 887 + ret = register_hyp_vcpu(hyp_vm, hyp_vcpu); 888 + if (ret) { 889 + unpin_host_vcpu(host_vcpu); 890 + unpin_host_sve_state(hyp_vcpu); 910 891 } 911 - 912 - if (hyp_vm->vcpus[idx]) { 913 - ret = -EINVAL; 914 - goto unlock; 915 - } 916 - 917 - hyp_vm->vcpus[idx] = hyp_vcpu; 918 892 unlock: 919 893 hyp_spin_unlock(&vm_table_lock); 920 894
+4 -4
arch/arm64/kvm/hyp/nvhe/setup.c
··· 312 312 }; 313 313 pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops; 314 314 315 - ret = fix_host_ownership(); 316 - if (ret) 317 - goto out; 318 - 319 315 ret = fix_hyp_pgtable_refcnt(); 320 316 if (ret) 321 317 goto out; 322 318 323 319 ret = hyp_create_fixmap(); 320 + if (ret) 321 + goto out; 322 + 323 + ret = fix_host_ownership(); 324 324 if (ret) 325 325 goto out; 326 326
+1 -1
arch/arm64/kvm/vgic/vgic-mmio-v2.c
··· 91 91 * migration from old kernels to new kernels with legacy 92 92 * userspace. 93 93 */ 94 - reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg); 94 + reg = FIELD_GET(GICD_IIDR_REVISION_MASK, val); 95 95 switch (reg) { 96 96 case KVM_VGIC_IMP_REV_2: 97 97 case KVM_VGIC_IMP_REV_3:
+1 -1
arch/arm64/kvm/vgic/vgic-mmio-v3.c
··· 194 194 if ((reg ^ val) & ~GICD_IIDR_REVISION_MASK) 195 195 return -EINVAL; 196 196 197 - reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg); 197 + reg = FIELD_GET(GICD_IIDR_REVISION_MASK, val); 198 198 switch (reg) { 199 199 case KVM_VGIC_IMP_REV_2: 200 200 case KVM_VGIC_IMP_REV_3:
+22 -22
tools/testing/selftests/kvm/access_tracking_perf_test.c
··· 101 101 enum vm_mem_backing_src_type backing_src; 102 102 103 103 /* The amount of memory to allocate for each vCPU. */ 104 - uint64_t vcpu_memory_bytes; 104 + u64 vcpu_memory_bytes; 105 105 106 106 /* The number of vCPUs to create in the VM. */ 107 107 int nr_vcpus; 108 108 }; 109 109 110 - static uint64_t pread_uint64(int fd, const char *filename, uint64_t index) 110 + static u64 pread_u64(int fd, const char *filename, u64 index) 111 111 { 112 - uint64_t value; 112 + u64 value; 113 113 off_t offset = index * sizeof(value); 114 114 115 115 TEST_ASSERT(pread(fd, &value, sizeof(value), offset) == sizeof(value), ··· 123 123 #define PAGEMAP_PRESENT (1ULL << 63) 124 124 #define PAGEMAP_PFN_MASK ((1ULL << 55) - 1) 125 125 126 - static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva) 126 + static u64 lookup_pfn(int pagemap_fd, struct kvm_vm *vm, gva_t gva) 127 127 { 128 - uint64_t hva = (uint64_t) addr_gva2hva(vm, gva); 129 - uint64_t entry; 130 - uint64_t pfn; 128 + u64 hva = (u64)addr_gva2hva(vm, gva); 129 + u64 entry; 130 + u64 pfn; 131 131 132 - entry = pread_uint64(pagemap_fd, "pagemap", hva / getpagesize()); 132 + entry = pread_u64(pagemap_fd, "pagemap", hva / getpagesize()); 133 133 if (!(entry & PAGEMAP_PRESENT)) 134 134 return 0; 135 135 ··· 139 139 return pfn; 140 140 } 141 141 142 - static bool is_page_idle(int page_idle_fd, uint64_t pfn) 142 + static bool is_page_idle(int page_idle_fd, u64 pfn) 143 143 { 144 - uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64); 144 + u64 bits = pread_u64(page_idle_fd, "page_idle", pfn / 64); 145 145 146 146 return !!((bits >> (pfn % 64)) & 1); 147 147 } 148 148 149 - static void mark_page_idle(int page_idle_fd, uint64_t pfn) 149 + static void mark_page_idle(int page_idle_fd, u64 pfn) 150 150 { 151 - uint64_t bits = 1ULL << (pfn % 64); 151 + u64 bits = 1ULL << (pfn % 64); 152 152 153 153 TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 64)) == 8, 154 154 "Set page_idle bits for PFN 0x%" PRIx64, pfn); ··· 174 174 struct memstress_vcpu_args *vcpu_args) 175 175 { 176 176 int vcpu_idx = vcpu_args->vcpu_idx; 177 - uint64_t base_gva = vcpu_args->gva; 178 - uint64_t pages = vcpu_args->pages; 179 - uint64_t page; 180 - uint64_t still_idle = 0; 181 - uint64_t no_pfn = 0; 177 + gva_t base_gva = vcpu_args->gva; 178 + u64 pages = vcpu_args->pages; 179 + u64 page; 180 + u64 still_idle = 0; 181 + u64 no_pfn = 0; 182 182 int page_idle_fd; 183 183 int pagemap_fd; 184 184 ··· 193 193 TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap."); 194 194 195 195 for (page = 0; page < pages; page++) { 196 - uint64_t gva = base_gva + page * memstress_args.guest_page_size; 197 - uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva); 196 + gva_t gva = base_gva + page * memstress_args.guest_page_size; 197 + u64 pfn = lookup_pfn(pagemap_fd, vm, gva); 198 198 199 199 if (!pfn) { 200 200 no_pfn++; ··· 297 297 lru_gen_last_gen = new_gen; 298 298 } 299 299 300 - static void assert_ucall(struct kvm_vcpu *vcpu, uint64_t expected_ucall) 300 + static void assert_ucall(struct kvm_vcpu *vcpu, u64 expected_ucall) 301 301 { 302 302 struct ucall uc; 303 - uint64_t actual_ucall = get_ucall(vcpu, &uc); 303 + u64 actual_ucall = get_ucall(vcpu, &uc); 304 304 305 305 TEST_ASSERT(expected_ucall == actual_ucall, 306 306 "Guest exited unexpectedly (expected ucall %" PRIu64 ··· 417 417 */ 418 418 test_pages = params->nr_vcpus * params->vcpu_memory_bytes / 419 419 max(memstress_args.guest_page_size, 420 - (uint64_t)getpagesize()); 420 + (u64)getpagesize()); 421 421 422 422 memstress_start_vcpu_threads(nr_vcpus, vcpu_thread_main); 423 423
+3 -3
tools/testing/selftests/kvm/arch_timer.c
··· 78 78 return NULL; 79 79 } 80 80 81 - static uint32_t test_get_pcpu(void) 81 + static u32 test_get_pcpu(void) 82 82 { 83 - uint32_t pcpu; 83 + u32 pcpu; 84 84 unsigned int nproc_conf; 85 85 cpu_set_t online_cpuset; 86 86 ··· 98 98 static int test_migrate_vcpu(unsigned int vcpu_idx) 99 99 { 100 100 int ret; 101 - uint32_t new_pcpu = test_get_pcpu(); 101 + u32 new_pcpu = test_get_pcpu(); 102 102 103 103 pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu); 104 104
+7 -7
tools/testing/selftests/kvm/arm64/aarch32_id_regs.c
··· 66 66 } 67 67 } 68 68 69 - static uint64_t raz_wi_reg_ids[] = { 69 + static u64 raz_wi_reg_ids[] = { 70 70 KVM_ARM64_SYS_REG(SYS_ID_PFR0_EL1), 71 71 KVM_ARM64_SYS_REG(SYS_ID_PFR1_EL1), 72 72 KVM_ARM64_SYS_REG(SYS_ID_DFR0_EL1), ··· 94 94 int i; 95 95 96 96 for (i = 0; i < ARRAY_SIZE(raz_wi_reg_ids); i++) { 97 - uint64_t reg_id = raz_wi_reg_ids[i]; 98 - uint64_t val; 97 + u64 reg_id = raz_wi_reg_ids[i]; 98 + u64 val; 99 99 100 100 val = vcpu_get_reg(vcpu, reg_id); 101 101 TEST_ASSERT_EQ(val, 0); ··· 111 111 } 112 112 } 113 113 114 - static uint64_t raz_invariant_reg_ids[] = { 114 + static u64 raz_invariant_reg_ids[] = { 115 115 KVM_ARM64_SYS_REG(SYS_ID_AFR0_EL1), 116 116 KVM_ARM64_SYS_REG(sys_reg(3, 0, 0, 3, 3)), 117 117 KVM_ARM64_SYS_REG(SYS_ID_DFR1_EL1), ··· 123 123 int i, r; 124 124 125 125 for (i = 0; i < ARRAY_SIZE(raz_invariant_reg_ids); i++) { 126 - uint64_t reg_id = raz_invariant_reg_ids[i]; 127 - uint64_t val; 126 + u64 reg_id = raz_invariant_reg_ids[i]; 127 + u64 val; 128 128 129 129 val = vcpu_get_reg(vcpu, reg_id); 130 130 TEST_ASSERT_EQ(val, 0); ··· 142 142 143 143 static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu) 144 144 { 145 - uint64_t val, el0; 145 + u64 val, el0; 146 146 147 147 val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1)); 148 148
+4 -4
tools/testing/selftests/kvm/arm64/arch_timer.c
··· 56 56 struct test_vcpu_shared_data *shared_data) 57 57 { 58 58 enum guest_stage stage = shared_data->guest_stage; 59 - uint64_t xcnt = 0, xcnt_diff_us, cval = 0; 59 + u64 xcnt = 0, xcnt_diff_us, cval = 0; 60 60 unsigned long xctl = 0; 61 61 unsigned int timer_irq = 0; 62 62 unsigned int accessor; ··· 105 105 static void guest_irq_handler(struct ex_regs *regs) 106 106 { 107 107 unsigned int intid = gic_get_and_ack_irq(); 108 - uint32_t cpu = guest_get_vcpuid(); 108 + u32 cpu = guest_get_vcpuid(); 109 109 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; 110 110 111 111 guest_validate_irq(intid, shared_data); ··· 116 116 static void guest_run_stage(struct test_vcpu_shared_data *shared_data, 117 117 enum guest_stage stage) 118 118 { 119 - uint32_t irq_iter, config_iter; 119 + u32 irq_iter, config_iter; 120 120 121 121 shared_data->guest_stage = stage; 122 122 shared_data->nr_iter = 0; ··· 140 140 141 141 static void guest_code(void) 142 142 { 143 - uint32_t cpu = guest_get_vcpuid(); 143 + u32 cpu = guest_get_vcpuid(); 144 144 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; 145 145 146 146 local_irq_disable();
+80 -81
tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c
··· 23 23 #include "vgic.h" 24 24 25 25 /* Depends on counter width. */ 26 - static uint64_t CVAL_MAX; 26 + static u64 CVAL_MAX; 27 27 /* tval is a signed 32-bit int. */ 28 - static const int32_t TVAL_MAX = INT32_MAX; 29 - static const int32_t TVAL_MIN = INT32_MIN; 28 + static const s32 TVAL_MAX = INT32_MAX; 29 + static const s32 TVAL_MIN = INT32_MIN; 30 30 31 31 /* After how much time we say there is no IRQ. */ 32 - static const uint32_t TIMEOUT_NO_IRQ_US = 50000; 32 + static const u32 TIMEOUT_NO_IRQ_US = 50000; 33 33 34 34 /* Counter value to use as the starting one for most tests. Set to CVAL_MAX/2 */ 35 - static uint64_t DEF_CNT; 35 + static u64 DEF_CNT; 36 36 37 37 /* Number of runs. */ 38 - static const uint32_t NR_TEST_ITERS_DEF = 5; 38 + static const u32 NR_TEST_ITERS_DEF = 5; 39 39 40 40 /* Default wait test time in ms. */ 41 - static const uint32_t WAIT_TEST_MS = 10; 41 + static const u32 WAIT_TEST_MS = 10; 42 42 43 43 /* Default "long" wait test time in ms. */ 44 - static const uint32_t LONG_WAIT_TEST_MS = 100; 44 + static const u32 LONG_WAIT_TEST_MS = 100; 45 45 46 46 /* Shared with IRQ handler. */ 47 47 struct test_vcpu_shared_data { ··· 53 53 /* Virtual or physical timer and counter tests. */ 54 54 enum arch_timer timer; 55 55 /* Delay used for most timer tests. */ 56 - uint64_t wait_ms; 56 + u64 wait_ms; 57 57 /* Delay used in the test_long_timer_delays test. */ 58 - uint64_t long_wait_ms; 58 + u64 long_wait_ms; 59 59 /* Number of iterations. */ 60 60 int iterations; 61 61 /* Whether to test the physical timer. */ ··· 82 82 NO_USERSPACE_CMD, 83 83 }; 84 84 85 - typedef void (*sleep_method_t)(enum arch_timer timer, uint64_t usec); 85 + typedef void (*sleep_method_t)(enum arch_timer timer, u64 usec); 86 86 87 - static void sleep_poll(enum arch_timer timer, uint64_t usec); 88 - static void sleep_sched_poll(enum arch_timer timer, uint64_t usec); 89 - static void sleep_in_userspace(enum arch_timer timer, uint64_t usec); 90 - static void sleep_migrate(enum arch_timer timer, uint64_t usec); 87 + static void sleep_poll(enum arch_timer timer, u64 usec); 88 + static void sleep_sched_poll(enum arch_timer timer, u64 usec); 89 + static void sleep_in_userspace(enum arch_timer timer, u64 usec); 90 + static void sleep_migrate(enum arch_timer timer, u64 usec); 91 91 92 92 sleep_method_t sleep_method[] = { 93 93 sleep_poll, ··· 115 115 TIMER_TVAL, 116 116 }; 117 117 118 - static void assert_irqs_handled(uint32_t n) 118 + static void assert_irqs_handled(u32 n) 119 119 { 120 120 int h = atomic_read(&shared_data.handled); 121 121 122 122 __GUEST_ASSERT(h == n, "Handled %d IRQS but expected %d", h, n); 123 123 } 124 124 125 - static void userspace_cmd(uint64_t cmd) 125 + static void userspace_cmd(u64 cmd) 126 126 { 127 127 GUEST_SYNC_ARGS(cmd, 0, 0, 0, 0); 128 128 } ··· 132 132 userspace_cmd(USERSPACE_MIGRATE_SELF); 133 133 } 134 134 135 - static void userspace_sleep(uint64_t usecs) 135 + static void userspace_sleep(u64 usecs) 136 136 { 137 137 GUEST_SYNC_ARGS(USERSPACE_USLEEP, usecs, 0, 0, 0); 138 138 } 139 139 140 - static void set_counter(enum arch_timer timer, uint64_t counter) 140 + static void set_counter(enum arch_timer timer, u64 counter) 141 141 { 142 142 GUEST_SYNC_ARGS(SET_COUNTER_VALUE, counter, timer, 0, 0); 143 143 } ··· 146 146 { 147 147 unsigned int intid = gic_get_and_ack_irq(); 148 148 enum arch_timer timer; 149 - uint64_t cnt, cval; 150 - uint32_t ctl; 149 + u64 cnt, cval; 150 + u32 ctl; 151 151 bool timer_condition, istatus; 152 152 153 153 if (intid == IAR_SPURIOUS) { ··· 178 178 gic_set_eoi(intid); 179 179 } 180 180 181 - static void set_cval_irq(enum arch_timer timer, uint64_t cval_cycles, 182 - uint32_t ctl) 181 + static void set_cval_irq(enum arch_timer timer, u64 cval_cycles, 182 + u32 ctl) 183 183 { 184 184 atomic_set(&shared_data.handled, 0); 185 185 atomic_set(&shared_data.spurious, 0); ··· 187 187 timer_set_ctl(timer, ctl); 188 188 } 189 189 190 - static void set_tval_irq(enum arch_timer timer, uint64_t tval_cycles, 191 - uint32_t ctl) 190 + static void set_tval_irq(enum arch_timer timer, u64 tval_cycles, 191 + u32 ctl) 192 192 { 193 193 atomic_set(&shared_data.handled, 0); 194 194 atomic_set(&shared_data.spurious, 0); ··· 196 196 timer_set_ctl(timer, ctl); 197 197 } 198 198 199 - static void set_xval_irq(enum arch_timer timer, uint64_t xval, uint32_t ctl, 199 + static void set_xval_irq(enum arch_timer timer, u64 xval, u32 ctl, 200 200 enum timer_view tv) 201 201 { 202 202 switch (tv) { ··· 275 275 * Sleep for usec microseconds by polling in the guest or in 276 276 * userspace (e.g. userspace_cmd=USERSPACE_SCHEDULE). 277 277 */ 278 - static void guest_poll(enum arch_timer test_timer, uint64_t usec, 278 + static void guest_poll(enum arch_timer test_timer, u64 usec, 279 279 enum sync_cmd usp_cmd) 280 280 { 281 - uint64_t cycles = usec_to_cycles(usec); 281 + u64 cycles = usec_to_cycles(usec); 282 282 /* Whichever timer we are testing with, sleep with the other. */ 283 283 enum arch_timer sleep_timer = 1 - test_timer; 284 - uint64_t start = timer_get_cntct(sleep_timer); 284 + u64 start = timer_get_cntct(sleep_timer); 285 285 286 286 while ((timer_get_cntct(sleep_timer) - start) < cycles) { 287 287 if (usp_cmd == NO_USERSPACE_CMD) ··· 291 291 } 292 292 } 293 293 294 - static void sleep_poll(enum arch_timer timer, uint64_t usec) 294 + static void sleep_poll(enum arch_timer timer, u64 usec) 295 295 { 296 296 guest_poll(timer, usec, NO_USERSPACE_CMD); 297 297 } 298 298 299 - static void sleep_sched_poll(enum arch_timer timer, uint64_t usec) 299 + static void sleep_sched_poll(enum arch_timer timer, u64 usec) 300 300 { 301 301 guest_poll(timer, usec, USERSPACE_SCHED_YIELD); 302 302 } 303 303 304 - static void sleep_migrate(enum arch_timer timer, uint64_t usec) 304 + static void sleep_migrate(enum arch_timer timer, u64 usec) 305 305 { 306 306 guest_poll(timer, usec, USERSPACE_MIGRATE_SELF); 307 307 } 308 308 309 - static void sleep_in_userspace(enum arch_timer timer, uint64_t usec) 309 + static void sleep_in_userspace(enum arch_timer timer, u64 usec) 310 310 { 311 311 userspace_sleep(usec); 312 312 } ··· 315 315 * Reset the timer state to some nice values like the counter not being close 316 316 * to the edge, and the control register masked and disabled. 317 317 */ 318 - static void reset_timer_state(enum arch_timer timer, uint64_t cnt) 318 + static void reset_timer_state(enum arch_timer timer, u64 cnt) 319 319 { 320 320 set_counter(timer, cnt); 321 321 timer_set_ctl(timer, CTL_IMASK); 322 322 } 323 323 324 - static void test_timer_xval(enum arch_timer timer, uint64_t xval, 324 + static void test_timer_xval(enum arch_timer timer, u64 xval, 325 325 enum timer_view tv, irq_wait_method_t wm, bool reset_state, 326 - uint64_t reset_cnt) 326 + u64 reset_cnt) 327 327 { 328 328 local_irq_disable(); 329 329 ··· 348 348 * the "runner", like: tools/testing/selftests/kselftest/runner.sh. 349 349 */ 350 350 351 - static void test_timer_cval(enum arch_timer timer, uint64_t cval, 351 + static void test_timer_cval(enum arch_timer timer, u64 cval, 352 352 irq_wait_method_t wm, bool reset_state, 353 - uint64_t reset_cnt) 353 + u64 reset_cnt) 354 354 { 355 355 test_timer_xval(timer, cval, TIMER_CVAL, wm, reset_state, reset_cnt); 356 356 } 357 357 358 - static void test_timer_tval(enum arch_timer timer, int32_t tval, 358 + static void test_timer_tval(enum arch_timer timer, s32 tval, 359 359 irq_wait_method_t wm, bool reset_state, 360 - uint64_t reset_cnt) 360 + u64 reset_cnt) 361 361 { 362 - test_timer_xval(timer, (uint64_t) tval, TIMER_TVAL, wm, reset_state, 362 + test_timer_xval(timer, (u64)tval, TIMER_TVAL, wm, reset_state, 363 363 reset_cnt); 364 364 } 365 365 366 - static void test_xval_check_no_irq(enum arch_timer timer, uint64_t xval, 367 - uint64_t usec, enum timer_view timer_view, 366 + static void test_xval_check_no_irq(enum arch_timer timer, u64 xval, 367 + u64 usec, enum timer_view timer_view, 368 368 sleep_method_t guest_sleep) 369 369 { 370 370 local_irq_disable(); ··· 379 379 assert_irqs_handled(0); 380 380 } 381 381 382 - static void test_cval_no_irq(enum arch_timer timer, uint64_t cval, 383 - uint64_t usec, sleep_method_t wm) 382 + static void test_cval_no_irq(enum arch_timer timer, u64 cval, 383 + u64 usec, sleep_method_t wm) 384 384 { 385 385 test_xval_check_no_irq(timer, cval, usec, TIMER_CVAL, wm); 386 386 } 387 387 388 - static void test_tval_no_irq(enum arch_timer timer, int32_t tval, uint64_t usec, 388 + static void test_tval_no_irq(enum arch_timer timer, s32 tval, u64 usec, 389 389 sleep_method_t wm) 390 390 { 391 - /* tval will be cast to an int32_t in test_xval_check_no_irq */ 392 - test_xval_check_no_irq(timer, (uint64_t) tval, usec, TIMER_TVAL, wm); 391 + /* tval will be cast to an s32 in test_xval_check_no_irq */ 392 + test_xval_check_no_irq(timer, (u64)tval, usec, TIMER_TVAL, wm); 393 393 } 394 394 395 395 /* Test masking/unmasking a timer using the timer mask (not the IRQ mask). */ ··· 463 463 * timeout for the wait: we use the wfi instruction. 464 464 */ 465 465 static void test_reprogramming_timer(enum arch_timer timer, irq_wait_method_t wm, 466 - int32_t delta_1_ms, int32_t delta_2_ms) 466 + s32 delta_1_ms, s32 delta_2_ms) 467 467 { 468 468 local_irq_disable(); 469 469 reset_timer_state(timer, DEF_CNT); ··· 488 488 static void test_reprogram_timers(enum arch_timer timer) 489 489 { 490 490 int i; 491 - uint64_t base_wait = test_args.wait_ms; 491 + u64 base_wait = test_args.wait_ms; 492 492 493 493 for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) { 494 494 /* ··· 504 504 505 505 static void test_basic_functionality(enum arch_timer timer) 506 506 { 507 - int32_t tval = (int32_t) msec_to_cycles(test_args.wait_ms); 508 - uint64_t cval = DEF_CNT + msec_to_cycles(test_args.wait_ms); 507 + s32 tval = (s32)msec_to_cycles(test_args.wait_ms); 508 + u64 cval = DEF_CNT + msec_to_cycles(test_args.wait_ms); 509 509 int i; 510 510 511 511 for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) { ··· 593 593 reset_timer_state(timer, DEF_CNT); 594 594 595 595 set_cval_irq(timer, 596 - (uint64_t) TVAL_MAX + 596 + (u64)TVAL_MAX + 597 597 msec_to_cycles(test_args.wait_ms) / 2, CTL_ENABLE); 598 598 599 599 set_counter(timer, TVAL_MAX); ··· 608 608 /* Test timers set for: cval = now + TVAL_MAX + wait_ms / 2 */ 609 609 static void test_timers_above_tval_max(enum arch_timer timer) 610 610 { 611 - uint64_t cval; 611 + u64 cval; 612 612 int i; 613 613 614 614 /* ··· 638 638 * sets the counter to cnt_1, the [c|t]val, the counter to cnt_2, and 639 639 * then waits for an IRQ. 640 640 */ 641 - static void test_set_cnt_after_xval(enum arch_timer timer, uint64_t cnt_1, 642 - uint64_t xval, uint64_t cnt_2, 641 + static void test_set_cnt_after_xval(enum arch_timer timer, u64 cnt_1, 642 + u64 xval, u64 cnt_2, 643 643 irq_wait_method_t wm, enum timer_view tv) 644 644 { 645 645 local_irq_disable(); ··· 662 662 * then waits for an IRQ. 663 663 */ 664 664 static void test_set_cnt_after_xval_no_irq(enum arch_timer timer, 665 - uint64_t cnt_1, uint64_t xval, 666 - uint64_t cnt_2, 665 + u64 cnt_1, u64 xval, 666 + u64 cnt_2, 667 667 sleep_method_t guest_sleep, 668 668 enum timer_view tv) 669 669 { ··· 684 684 timer_set_ctl(timer, CTL_IMASK); 685 685 } 686 686 687 - static void test_set_cnt_after_tval(enum arch_timer timer, uint64_t cnt_1, 688 - int32_t tval, uint64_t cnt_2, 687 + static void test_set_cnt_after_tval(enum arch_timer timer, u64 cnt_1, 688 + s32 tval, u64 cnt_2, 689 689 irq_wait_method_t wm) 690 690 { 691 691 test_set_cnt_after_xval(timer, cnt_1, tval, cnt_2, wm, TIMER_TVAL); 692 692 } 693 693 694 - static void test_set_cnt_after_cval(enum arch_timer timer, uint64_t cnt_1, 695 - uint64_t cval, uint64_t cnt_2, 694 + static void test_set_cnt_after_cval(enum arch_timer timer, u64 cnt_1, 695 + u64 cval, u64 cnt_2, 696 696 irq_wait_method_t wm) 697 697 { 698 698 test_set_cnt_after_xval(timer, cnt_1, cval, cnt_2, wm, TIMER_CVAL); 699 699 } 700 700 701 701 static void test_set_cnt_after_tval_no_irq(enum arch_timer timer, 702 - uint64_t cnt_1, int32_t tval, 703 - uint64_t cnt_2, sleep_method_t wm) 702 + u64 cnt_1, s32 tval, 703 + u64 cnt_2, sleep_method_t wm) 704 704 { 705 705 test_set_cnt_after_xval_no_irq(timer, cnt_1, tval, cnt_2, wm, 706 706 TIMER_TVAL); 707 707 } 708 708 709 709 static void test_set_cnt_after_cval_no_irq(enum arch_timer timer, 710 - uint64_t cnt_1, uint64_t cval, 711 - uint64_t cnt_2, sleep_method_t wm) 710 + u64 cnt_1, u64 cval, 711 + u64 cnt_2, sleep_method_t wm) 712 712 { 713 713 test_set_cnt_after_xval_no_irq(timer, cnt_1, cval, cnt_2, wm, 714 714 TIMER_CVAL); ··· 718 718 static void test_move_counters_ahead_of_timers(enum arch_timer timer) 719 719 { 720 720 int i; 721 - int32_t tval; 721 + s32 tval; 722 722 723 723 for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) { 724 724 irq_wait_method_t wm = irq_wait_method[i]; ··· 730 730 test_set_cnt_after_tval(timer, 0, -1, DEF_CNT + 1, wm); 731 731 test_set_cnt_after_tval(timer, 0, -1, TVAL_MAX, wm); 732 732 tval = TVAL_MAX; 733 - test_set_cnt_after_tval(timer, 0, tval, (uint64_t) tval + 1, 734 - wm); 733 + test_set_cnt_after_tval(timer, 0, tval, (u64)tval + 1, wm); 735 734 } 736 735 } 737 736 ··· 753 754 754 755 static void test_timers_in_the_past(enum arch_timer timer) 755 756 { 756 - int32_t tval = -1 * (int32_t) msec_to_cycles(test_args.wait_ms); 757 - uint64_t cval; 757 + s32 tval = -1 * (s32)msec_to_cycles(test_args.wait_ms); 758 + u64 cval; 758 759 int i; 759 760 760 761 for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) { ··· 789 790 790 791 static void test_long_timer_delays(enum arch_timer timer) 791 792 { 792 - int32_t tval = (int32_t) msec_to_cycles(test_args.long_wait_ms); 793 - uint64_t cval = DEF_CNT + msec_to_cycles(test_args.long_wait_ms); 793 + s32 tval = (s32)msec_to_cycles(test_args.long_wait_ms); 794 + u64 cval = DEF_CNT + msec_to_cycles(test_args.long_wait_ms); 794 795 int i; 795 796 796 797 for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) { ··· 845 846 846 847 static cpu_set_t default_cpuset; 847 848 848 - static uint32_t next_pcpu(void) 849 + static u32 next_pcpu(void) 849 850 { 850 - uint32_t max = get_nprocs(); 851 - uint32_t cur = sched_getcpu(); 852 - uint32_t next = cur; 851 + u32 max = get_nprocs(); 852 + u32 cur = sched_getcpu(); 853 + u32 next = cur; 853 854 cpu_set_t cpuset = default_cpuset; 854 855 855 856 TEST_ASSERT(max > 1, "Need at least two physical cpus"); ··· 861 862 return next; 862 863 } 863 864 864 - static void kvm_set_cntxct(struct kvm_vcpu *vcpu, uint64_t cnt, 865 + static void kvm_set_cntxct(struct kvm_vcpu *vcpu, u64 cnt, 865 866 enum arch_timer timer) 866 867 { 867 868 if (timer == PHYSICAL) ··· 873 874 static void handle_sync(struct kvm_vcpu *vcpu, struct ucall *uc) 874 875 { 875 876 enum sync_cmd cmd = uc->args[1]; 876 - uint64_t val = uc->args[2]; 877 + u64 val = uc->args[2]; 877 878 enum arch_timer timer = uc->args[3]; 878 879 879 880 switch (cmd) { ··· 1017 1018 1018 1019 static void set_counter_defaults(void) 1019 1020 { 1020 - const uint64_t MIN_ROLLOVER_SECS = 40ULL * 365 * 24 * 3600; 1021 - uint64_t freq = read_sysreg(CNTFRQ_EL0); 1021 + const u64 MIN_ROLLOVER_SECS = 40ULL * 365 * 24 * 3600; 1022 + u64 freq = read_sysreg(CNTFRQ_EL0); 1022 1023 int width = ilog2(MIN_ROLLOVER_SECS * freq); 1023 1024 1024 1025 width = clamp(width, 56, 64);
+35 -37
tools/testing/selftests/kvm/arm64/debug-exceptions.c
··· 31 31 32 32 extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start, hw_bp_ctx; 33 33 extern unsigned char iter_ss_begin, iter_ss_end; 34 - static volatile uint64_t sw_bp_addr, hw_bp_addr; 35 - static volatile uint64_t wp_addr, wp_data_addr; 36 - static volatile uint64_t svc_addr; 37 - static volatile uint64_t ss_addr[4], ss_idx; 38 - #define PC(v) ((uint64_t)&(v)) 34 + static volatile u64 sw_bp_addr, hw_bp_addr; 35 + static volatile u64 wp_addr, wp_data_addr; 36 + static volatile u64 svc_addr; 37 + static volatile u64 ss_addr[4], ss_idx; 38 + #define PC(v) ((u64)&(v)) 39 39 40 40 #define GEN_DEBUG_WRITE_REG(reg_name) \ 41 - static void write_##reg_name(int num, uint64_t val) \ 41 + static void write_##reg_name(int num, u64 val) \ 42 42 { \ 43 43 switch (num) { \ 44 44 case 0: \ ··· 102 102 103 103 static void reset_debug_state(void) 104 104 { 105 - uint8_t brps, wrps, i; 106 - uint64_t dfr0; 105 + u8 brps, wrps, i; 106 + u64 dfr0; 107 107 108 108 asm volatile("msr daifset, #8"); 109 109 ··· 140 140 141 141 static void enable_monitor_debug_exceptions(void) 142 142 { 143 - uint64_t mdscr; 143 + u64 mdscr; 144 144 145 145 asm volatile("msr daifclr, #8"); 146 146 ··· 149 149 isb(); 150 150 } 151 151 152 - static void install_wp(uint8_t wpn, uint64_t addr) 152 + static void install_wp(u8 wpn, u64 addr) 153 153 { 154 - uint32_t wcr; 154 + u32 wcr; 155 155 156 156 wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E; 157 157 write_dbgwcr(wpn, wcr); ··· 162 162 enable_monitor_debug_exceptions(); 163 163 } 164 164 165 - static void install_hw_bp(uint8_t bpn, uint64_t addr) 165 + static void install_hw_bp(u8 bpn, u64 addr) 166 166 { 167 - uint32_t bcr; 167 + u32 bcr; 168 168 169 169 bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E; 170 170 write_dbgbcr(bpn, bcr); ··· 174 174 enable_monitor_debug_exceptions(); 175 175 } 176 176 177 - static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, uint64_t addr, 178 - uint64_t ctx) 177 + static void install_wp_ctx(u8 addr_wp, u8 ctx_bp, u64 addr, u64 ctx) 179 178 { 180 - uint32_t wcr; 181 - uint64_t ctx_bcr; 179 + u32 wcr; 180 + u64 ctx_bcr; 182 181 183 182 /* Setup a context-aware breakpoint for Linked Context ID Match */ 184 183 ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E | ··· 187 188 188 189 /* Setup a linked watchpoint (linked to the context-aware breakpoint) */ 189 190 wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E | 190 - DBGWCR_WT_LINK | ((uint32_t)ctx_bp << DBGWCR_LBN_SHIFT); 191 + DBGWCR_WT_LINK | ((u32)ctx_bp << DBGWCR_LBN_SHIFT); 191 192 write_dbgwcr(addr_wp, wcr); 192 193 write_dbgwvr(addr_wp, addr); 193 194 isb(); ··· 195 196 enable_monitor_debug_exceptions(); 196 197 } 197 198 198 - void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr, 199 - uint64_t ctx) 199 + void install_hw_bp_ctx(u8 addr_bp, u8 ctx_bp, u64 addr, u64 ctx) 200 200 { 201 - uint32_t addr_bcr, ctx_bcr; 201 + u32 addr_bcr, ctx_bcr; 202 202 203 203 /* Setup a context-aware breakpoint for Linked Context ID Match */ 204 204 ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E | ··· 211 213 */ 212 214 addr_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E | 213 215 DBGBCR_BT_ADDR_LINK_CTX | 214 - ((uint32_t)ctx_bp << DBGBCR_LBN_SHIFT); 216 + ((u32)ctx_bp << DBGBCR_LBN_SHIFT); 215 217 write_dbgbcr(addr_bp, addr_bcr); 216 218 write_dbgbvr(addr_bp, addr); 217 219 isb(); ··· 221 223 222 224 static void install_ss(void) 223 225 { 224 - uint64_t mdscr; 226 + u64 mdscr; 225 227 226 228 asm volatile("msr daifclr, #8"); 227 229 ··· 232 234 233 235 static volatile char write_data; 234 236 235 - static void guest_code(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn) 237 + static void guest_code(u8 bpn, u8 wpn, u8 ctx_bpn) 236 238 { 237 - uint64_t ctx = 0xabcdef; /* a random context number */ 239 + u64 ctx = 0xabcdef; /* a random context number */ 238 240 239 241 /* Software-breakpoint */ 240 242 reset_debug_state(); ··· 375 377 376 378 static void guest_code_ss(int test_cnt) 377 379 { 378 - uint64_t i; 379 - uint64_t bvr, wvr, w_bvr, w_wvr; 380 + u64 i; 381 + u64 bvr, wvr, w_bvr, w_wvr; 380 382 381 383 for (i = 0; i < test_cnt; i++) { 382 384 /* Bits [1:0] of dbg{b,w}vr are RES0 */ ··· 414 416 GUEST_DONE(); 415 417 } 416 418 417 - static int debug_version(uint64_t id_aa64dfr0) 419 + static int debug_version(u64 id_aa64dfr0) 418 420 { 419 421 return FIELD_GET(ID_AA64DFR0_EL1_DebugVer, id_aa64dfr0); 420 422 } 421 423 422 - static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn) 424 + static void test_guest_debug_exceptions(u8 bpn, u8 wpn, u8 ctx_bpn) 423 425 { 424 426 struct kvm_vcpu *vcpu; 425 427 struct kvm_vm *vm; ··· 466 468 struct kvm_vm *vm; 467 469 struct ucall uc; 468 470 struct kvm_run *run; 469 - uint64_t pc, cmd; 470 - uint64_t test_pc = 0; 471 + u64 pc, cmd; 472 + u64 test_pc = 0; 471 473 bool ss_enable = false; 472 474 struct kvm_guest_debug debug = {}; 473 475 ··· 504 506 "Unexpected pc 0x%lx (expected 0x%lx)", 505 507 pc, test_pc); 506 508 507 - if ((pc + 4) == (uint64_t)&iter_ss_end) { 509 + if ((pc + 4) == (u64)&iter_ss_end) { 508 510 test_pc = 0; 509 511 debug.control = KVM_GUESTDBG_ENABLE; 510 512 ss_enable = false; ··· 517 519 * iter_ss_end, the pc for the next KVM_EXIT_DEBUG should 518 520 * be the current pc + 4. 519 521 */ 520 - if ((pc >= (uint64_t)&iter_ss_begin) && 521 - (pc < (uint64_t)&iter_ss_end)) 522 + if ((pc >= (u64)&iter_ss_begin) && 523 + (pc < (u64)&iter_ss_end)) 522 524 test_pc = pc + 4; 523 525 else 524 526 test_pc = 0; ··· 531 533 * Run debug testing using the various breakpoint#, watchpoint# and 532 534 * context-aware breakpoint# with the given ID_AA64DFR0_EL1 configuration. 533 535 */ 534 - void test_guest_debug_exceptions_all(uint64_t aa64dfr0) 536 + void test_guest_debug_exceptions_all(u64 aa64dfr0) 535 537 { 536 - uint8_t brp_num, wrp_num, ctx_brp_num, normal_brp_num, ctx_brp_base; 538 + u8 brp_num, wrp_num, ctx_brp_num, normal_brp_num, ctx_brp_base; 537 539 int b, w, c; 538 540 539 541 /* Number of breakpoints */ ··· 578 580 struct kvm_vm *vm; 579 581 int opt; 580 582 int ss_iteration = 10000; 581 - uint64_t aa64dfr0; 583 + u64 aa64dfr0; 582 584 583 585 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 584 586 aa64dfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1));
+12 -12
tools/testing/selftests/kvm/arm64/hypercalls.c
··· 29 29 #define KVM_REG_ARM_VENDOR_HYP_BMAP_2_RESET_VAL 0 30 30 31 31 struct kvm_fw_reg_info { 32 - uint64_t reg; /* Register definition */ 33 - uint64_t max_feat_bit; /* Bit that represents the upper limit of the feature-map */ 34 - uint64_t reset_val; /* Reset value for the register */ 32 + u64 reg; /* Register definition */ 33 + u64 max_feat_bit; /* Bit that represents the upper limit of the feature-map */ 34 + u64 reset_val; /* Reset value for the register */ 35 35 }; 36 36 37 37 #define FW_REG_INFO(r) \ ··· 59 59 static int stage = TEST_STAGE_REG_IFACE; 60 60 61 61 struct test_hvc_info { 62 - uint32_t func_id; 63 - uint64_t arg1; 62 + u32 func_id; 63 + u64 arg1; 64 64 }; 65 65 66 66 #define TEST_HVC_INFO(f, a1) \ ··· 152 152 } 153 153 154 154 struct st_time { 155 - uint32_t rev; 156 - uint32_t attr; 157 - uint64_t st_time; 155 + u32 rev; 156 + u32 attr; 157 + u64 st_time; 158 158 }; 159 159 160 160 #define STEAL_TIME_SIZE ((sizeof(struct st_time) + 63) & ~63) ··· 162 162 163 163 static void steal_time_init(struct kvm_vcpu *vcpu) 164 164 { 165 - uint64_t st_ipa = (ulong)ST_GPA_BASE; 165 + u64 st_ipa = (ulong)ST_GPA_BASE; 166 166 unsigned int gpages; 167 167 168 168 gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE); ··· 174 174 175 175 static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu) 176 176 { 177 - uint64_t val; 177 + u64 val; 178 178 unsigned int i; 179 179 int ret; 180 180 181 181 for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) { 182 182 const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i]; 183 - uint64_t set_val; 183 + u64 set_val; 184 184 185 185 /* First 'read' should be the reset value for the reg */ 186 186 val = vcpu_get_reg(vcpu, reg_info->reg); ··· 229 229 230 230 static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu) 231 231 { 232 - uint64_t val; 232 + u64 val; 233 233 unsigned int i; 234 234 int ret; 235 235
+2 -2
tools/testing/selftests/kvm/arm64/idreg-idst.c
··· 13 13 14 14 #define __check_sr_read(r) \ 15 15 ({ \ 16 - uint64_t val; \ 16 + u64 val; \ 17 17 \ 18 18 sys64 = false; \ 19 19 undef = false; \ ··· 101 101 { 102 102 struct kvm_vcpu *vcpu; 103 103 struct kvm_vm *vm; 104 - uint64_t mmfr2; 104 + u64 mmfr2; 105 105 106 106 test_disable_default_vgic(); 107 107
+4 -4
tools/testing/selftests/kvm/arm64/no-vgic.c
··· 15 15 16 16 #define __check_sr_read(r) \ 17 17 ({ \ 18 - uint64_t val; \ 18 + u64 val; \ 19 19 \ 20 20 handled = false; \ 21 21 dsb(sy); \ ··· 33 33 34 34 #define __check_gicv5_gicr_op(r) \ 35 35 ({ \ 36 - uint64_t val; \ 36 + u64 val; \ 37 37 \ 38 38 handled = false; \ 39 39 dsb(sy); \ ··· 82 82 83 83 static void guest_code_gicv3(void) 84 84 { 85 - uint64_t val; 85 + u64 val; 86 86 87 87 /* 88 88 * Check that we advertise that ID_AA64PFR0_EL1.GIC == 0, having ··· 262 262 struct kvm_vcpu *vcpu; 263 263 struct kvm_vm *vm; 264 264 bool has_v3, has_v5; 265 - uint64_t pfr; 265 + u64 pfr; 266 266 267 267 test_disable_default_vgic(); 268 268
+41 -41
tools/testing/selftests/kvm/arm64/page_fault_test.c
··· 23 23 #define TEST_PTE_GVA 0xb0000000 24 24 #define TEST_DATA 0x0123456789ABCDEF 25 25 26 - static uint64_t *guest_test_memory = (uint64_t *)TEST_GVA; 26 + static u64 *guest_test_memory = (u64 *)TEST_GVA; 27 27 28 28 #define CMD_NONE (0) 29 29 #define CMD_SKIP_TEST (1ULL << 1) ··· 48 48 49 49 struct test_desc { 50 50 const char *name; 51 - uint64_t mem_mark_cmd; 51 + u64 mem_mark_cmd; 52 52 /* Skip the test if any prepare function returns false */ 53 53 bool (*guest_prepare[PREPARE_FN_NR])(void); 54 54 void (*guest_test)(void); ··· 59 59 void (*iabt_handler)(struct ex_regs *regs); 60 60 void (*mmio_handler)(struct kvm_vm *vm, struct kvm_run *run); 61 61 void (*fail_vcpu_run_handler)(int ret); 62 - uint32_t pt_memslot_flags; 63 - uint32_t data_memslot_flags; 62 + u32 pt_memslot_flags; 63 + u32 data_memslot_flags; 64 64 bool skip; 65 65 struct event_cnt expected_events; 66 66 }; ··· 70 70 struct test_desc *test_desc; 71 71 }; 72 72 73 - static inline void flush_tlb_page(uint64_t vaddr) 73 + static inline void flush_tlb_page(gva_t gva) 74 74 { 75 - uint64_t page = vaddr >> 12; 75 + gva_t page = gva >> 12; 76 76 77 77 dsb(ishst); 78 78 asm volatile("tlbi vaae1is, %0" :: "r" (page)); ··· 82 82 83 83 static void guest_write64(void) 84 84 { 85 - uint64_t val; 85 + u64 val; 86 86 87 87 WRITE_ONCE(*guest_test_memory, TEST_DATA); 88 88 val = READ_ONCE(*guest_test_memory); ··· 92 92 /* Check the system for atomic instructions. */ 93 93 static bool guest_check_lse(void) 94 94 { 95 - uint64_t isar0 = read_sysreg(id_aa64isar0_el1); 96 - uint64_t atomic; 95 + u64 isar0 = read_sysreg(id_aa64isar0_el1); 96 + u64 atomic; 97 97 98 98 atomic = FIELD_GET(ID_AA64ISAR0_EL1_ATOMIC, isar0); 99 99 return atomic >= 2; ··· 101 101 102 102 static bool guest_check_dc_zva(void) 103 103 { 104 - uint64_t dczid = read_sysreg(dczid_el0); 105 - uint64_t dzp = FIELD_GET(DCZID_EL0_DZP, dczid); 104 + u64 dczid = read_sysreg(dczid_el0); 105 + u64 dzp = FIELD_GET(DCZID_EL0_DZP, dczid); 106 106 107 107 return dzp == 0; 108 108 } ··· 110 110 /* Compare and swap instruction. */ 111 111 static void guest_cas(void) 112 112 { 113 - uint64_t val; 113 + u64 val; 114 114 115 115 GUEST_ASSERT(guest_check_lse()); 116 116 asm volatile(".arch_extension lse\n" ··· 122 122 123 123 static void guest_read64(void) 124 124 { 125 - uint64_t val; 125 + u64 val; 126 126 127 127 val = READ_ONCE(*guest_test_memory); 128 128 GUEST_ASSERT_EQ(val, 0); ··· 131 131 /* Address translation instruction */ 132 132 static void guest_at(void) 133 133 { 134 - uint64_t par; 134 + u64 par; 135 135 136 136 asm volatile("at s1e1r, %0" :: "r" (guest_test_memory)); 137 137 isb(); ··· 148 148 */ 149 149 static void guest_dc_zva(void) 150 150 { 151 - uint16_t val; 151 + u16 val; 152 152 153 153 asm volatile("dc zva, %0" :: "r" (guest_test_memory)); 154 154 dsb(ish); ··· 164 164 */ 165 165 static void guest_ld_preidx(void) 166 166 { 167 - uint64_t val; 168 - uint64_t addr = TEST_GVA - 8; 167 + u64 val; 168 + u64 addr = TEST_GVA - 8; 169 169 170 170 /* 171 171 * This ends up accessing "TEST_GVA + 8 - 8", where "TEST_GVA - 8" is ··· 179 179 180 180 static void guest_st_preidx(void) 181 181 { 182 - uint64_t val = TEST_DATA; 183 - uint64_t addr = TEST_GVA - 8; 182 + u64 val = TEST_DATA; 183 + u64 addr = TEST_GVA - 8; 184 184 185 185 asm volatile("str %0, [%1, #8]!" 186 186 : "+r" (val), "+r" (addr)); ··· 191 191 192 192 static bool guest_set_ha(void) 193 193 { 194 - uint64_t mmfr1 = read_sysreg(id_aa64mmfr1_el1); 195 - uint64_t hadbs, tcr; 194 + u64 mmfr1 = read_sysreg(id_aa64mmfr1_el1); 195 + u64 hadbs, tcr; 196 196 197 197 /* Skip if HA is not supported. */ 198 198 hadbs = FIELD_GET(ID_AA64MMFR1_EL1_HAFDBS, mmfr1); ··· 208 208 209 209 static bool guest_clear_pte_af(void) 210 210 { 211 - *((uint64_t *)TEST_PTE_GVA) &= ~PTE_AF; 211 + *((u64 *)TEST_PTE_GVA) &= ~PTE_AF; 212 212 flush_tlb_page(TEST_GVA); 213 213 214 214 return true; ··· 217 217 static void guest_check_pte_af(void) 218 218 { 219 219 dsb(ish); 220 - GUEST_ASSERT_EQ(*((uint64_t *)TEST_PTE_GVA) & PTE_AF, PTE_AF); 220 + GUEST_ASSERT_EQ(*((u64 *)TEST_PTE_GVA) & PTE_AF, PTE_AF); 221 221 } 222 222 223 223 static void guest_check_write_in_dirty_log(void) ··· 302 302 static struct uffd_args { 303 303 char *copy; 304 304 void *hva; 305 - uint64_t paging_size; 305 + u64 paging_size; 306 306 } pt_args, data_args; 307 307 308 308 /* Returns true to continue the test, and false if it should be skipped. */ 309 309 static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg, 310 310 struct uffd_args *args) 311 311 { 312 - uint64_t addr = msg->arg.pagefault.address; 313 - uint64_t flags = msg->arg.pagefault.flags; 312 + u64 addr = msg->arg.pagefault.address; 313 + u64 flags = msg->arg.pagefault.flags; 314 314 struct uffdio_copy copy; 315 315 int ret; 316 316 317 317 TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING, 318 318 "The only expected UFFD mode is MISSING"); 319 - TEST_ASSERT_EQ(addr, (uint64_t)args->hva); 319 + TEST_ASSERT_EQ(addr, (u64)args->hva); 320 320 321 321 pr_debug("uffd fault: addr=%p write=%d\n", 322 322 (void *)addr, !!(flags & UFFD_PAGEFAULT_FLAG_WRITE)); 323 323 324 - copy.src = (uint64_t)args->copy; 324 + copy.src = (u64)args->copy; 325 325 copy.dst = addr; 326 326 copy.len = args->paging_size; 327 327 copy.mode = 0; ··· 407 407 struct userspace_mem_region *region) 408 408 { 409 409 void *hva = (void *)region->region.userspace_addr; 410 - uint64_t paging_size = region->region.memory_size; 410 + u64 paging_size = region->region.memory_size; 411 411 int ret, fd = region->fd; 412 412 413 413 if (fd != -1) { ··· 438 438 439 439 static void mmio_no_handler(struct kvm_vm *vm, struct kvm_run *run) 440 440 { 441 - uint64_t data; 441 + u64 data; 442 442 443 443 memcpy(&data, run->mmio.data, sizeof(data)); 444 444 pr_debug("addr=%lld len=%d w=%d data=%lx\n", ··· 449 449 450 450 static bool check_write_in_dirty_log(struct kvm_vm *vm, 451 451 struct userspace_mem_region *region, 452 - uint64_t host_pg_nr) 452 + u64 host_pg_nr) 453 453 { 454 454 unsigned long *bmap; 455 455 bool first_page_dirty; 456 - uint64_t size = region->region.memory_size; 456 + u64 size = region->region.memory_size; 457 457 458 458 /* getpage_size() is not always equal to vm->page_size */ 459 459 bmap = bitmap_zalloc(size / getpagesize()); ··· 468 468 { 469 469 struct userspace_mem_region *data_region, *pt_region; 470 470 bool continue_test = true; 471 - uint64_t pte_gpa, pte_pg; 471 + u64 pte_gpa, pte_pg; 472 472 473 473 data_region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA); 474 474 pt_region = vm_get_mem_region(vm, MEM_REGION_PT); ··· 510 510 events.fail_vcpu_runs += 1; 511 511 } 512 512 513 - typedef uint32_t aarch64_insn_t; 513 + typedef u32 aarch64_insn_t; 514 514 extern aarch64_insn_t __exec_test[2]; 515 515 516 516 noinline void __return_0x77(void) ··· 525 525 */ 526 526 static void load_exec_code_for_test(struct kvm_vm *vm) 527 527 { 528 - uint64_t *code; 528 + u64 *code; 529 529 struct userspace_mem_region *region; 530 530 void *hva; 531 531 ··· 552 552 static void setup_gva_maps(struct kvm_vm *vm) 553 553 { 554 554 struct userspace_mem_region *region; 555 - uint64_t pte_gpa; 555 + u64 pte_gpa; 556 556 557 557 region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA); 558 558 /* Map TEST_GVA first. This will install a new PTE. */ ··· 574 574 */ 575 575 static void setup_memslots(struct kvm_vm *vm, struct test_params *p) 576 576 { 577 - uint64_t backing_src_pagesz = get_backing_src_pagesz(p->src_type); 578 - uint64_t guest_page_size = vm->page_size; 579 - uint64_t max_gfn = vm_compute_max_gfn(vm); 577 + u64 backing_src_pagesz = get_backing_src_pagesz(p->src_type); 578 + u64 guest_page_size = vm->page_size; 579 + u64 max_gfn = vm_compute_max_gfn(vm); 580 580 /* Enough for 2M of code when using 4K guest pages. */ 581 - uint64_t code_npages = 512; 582 - uint64_t pt_size, data_size, data_gpa; 581 + u64 code_npages = 512; 582 + u64 pt_size, data_size, data_gpa; 583 583 584 584 /* 585 585 * This test requires 1 pgd, 2 pud, 4 pmd, and 6 pte pages when using
+12 -14
tools/testing/selftests/kvm/arm64/psci_test.c
··· 22 22 #define CPU_ON_ENTRY_ADDR 0xfeedf00dul 23 23 #define CPU_ON_CONTEXT_ID 0xdeadc0deul 24 24 25 - static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr, 26 - uint64_t context_id) 25 + static u64 psci_cpu_on(u64 target_cpu, u64 entry_addr, u64 context_id) 27 26 { 28 27 struct arm_smccc_res res; 29 28 ··· 32 33 return res.a0; 33 34 } 34 35 35 - static uint64_t psci_affinity_info(uint64_t target_affinity, 36 - uint64_t lowest_affinity_level) 36 + static u64 psci_affinity_info(u64 target_affinity, u64 lowest_affinity_level) 37 37 { 38 38 struct arm_smccc_res res; 39 39 ··· 42 44 return res.a0; 43 45 } 44 46 45 - static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id) 47 + static u64 psci_system_suspend(u64 entry_addr, u64 context_id) 46 48 { 47 49 struct arm_smccc_res res; 48 50 ··· 52 54 return res.a0; 53 55 } 54 56 55 - static uint64_t psci_system_off2(uint64_t type, uint64_t cookie) 57 + static u64 psci_system_off2(u64 type, u64 cookie) 56 58 { 57 59 struct arm_smccc_res res; 58 60 ··· 61 63 return res.a0; 62 64 } 63 65 64 - static uint64_t psci_features(uint32_t func_id) 66 + static u64 psci_features(u32 func_id) 65 67 { 66 68 struct arm_smccc_res res; 67 69 ··· 108 110 109 111 static void assert_vcpu_reset(struct kvm_vcpu *vcpu) 110 112 { 111 - uint64_t obs_pc, obs_x0; 113 + u64 obs_pc, obs_x0; 112 114 113 115 obs_pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc)); 114 116 obs_x0 = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0])); ··· 121 123 obs_x0, CPU_ON_CONTEXT_ID); 122 124 } 123 125 124 - static void guest_test_cpu_on(uint64_t target_cpu) 126 + static void guest_test_cpu_on(u64 target_cpu) 125 127 { 126 - uint64_t target_state; 128 + u64 target_state; 127 129 128 130 GUEST_ASSERT(!psci_cpu_on(target_cpu, CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID)); 129 131 ··· 140 142 static void host_test_cpu_on(void) 141 143 { 142 144 struct kvm_vcpu *source, *target; 143 - uint64_t target_mpidr; 145 + u64 target_mpidr; 144 146 struct kvm_vm *vm; 145 147 struct ucall uc; 146 148 ··· 164 166 165 167 static void guest_test_system_suspend(void) 166 168 { 167 - uint64_t ret; 169 + u64 ret; 168 170 169 171 /* assert that SYSTEM_SUSPEND is discoverable */ 170 172 GUEST_ASSERT(!psci_features(PSCI_1_0_FN_SYSTEM_SUSPEND)); ··· 198 200 199 201 static void guest_test_system_off2(void) 200 202 { 201 - uint64_t ret; 203 + u64 ret; 202 204 203 205 /* assert that SYSTEM_OFF2 is discoverable */ 204 206 GUEST_ASSERT(psci_features(PSCI_1_3_FN_SYSTEM_OFF2) & ··· 236 238 { 237 239 struct kvm_vcpu *source, *target; 238 240 struct kvm_mp_state mps; 239 - uint64_t psci_version = 0; 241 + u64 psci_version = 0; 240 242 int nr_shutdowns = 0; 241 243 struct kvm_run *run; 242 244 struct ucall uc;
+19 -22
tools/testing/selftests/kvm/arm64/sea_to_user.c
··· 51 51 #define EINJ_OFFSET 0x01234badUL 52 52 #define EINJ_GVA ((START_GVA) + (EINJ_OFFSET)) 53 53 54 - static vm_paddr_t einj_gpa; 54 + static gpa_t einj_gpa; 55 55 static void *einj_hva; 56 - static uint64_t einj_hpa; 56 + static u64 einj_hpa; 57 57 static bool far_invalid; 58 58 59 - static uint64_t translate_to_host_paddr(unsigned long vaddr) 59 + static u64 translate_hva_to_hpa(unsigned long hva) 60 60 { 61 - uint64_t pinfo; 62 - int64_t offset = vaddr / getpagesize() * sizeof(pinfo); 61 + u64 pinfo; 62 + s64 offset = hva / getpagesize() * sizeof(pinfo); 63 63 int fd; 64 - uint64_t page_addr; 65 - uint64_t paddr; 66 64 67 65 fd = open("/proc/self/pagemap", O_RDONLY); 68 66 if (fd < 0) ··· 75 77 if ((pinfo & PAGE_PRESENT) == 0) 76 78 ksft_exit_fail_perror("Page not present"); 77 79 78 - page_addr = (pinfo & PAGE_PHYSICAL) << MIN_PAGE_SHIFT; 79 - paddr = page_addr + (vaddr & (getpagesize() - 1)); 80 - return paddr; 80 + return ((pinfo & PAGE_PHYSICAL) << MIN_PAGE_SHIFT) + 81 + (hva & (getpagesize() - 1)); 81 82 } 82 83 83 - static void write_einj_entry(const char *einj_path, uint64_t val) 84 + static void write_einj_entry(const char *einj_path, u64 val) 84 85 { 85 86 char cmd[256] = {0}; 86 87 FILE *cmdfile = NULL; ··· 93 96 ksft_exit_fail_perror("Failed to write EINJ entry"); 94 97 } 95 98 96 - static void inject_uer(uint64_t paddr) 99 + static void inject_uer(u64 hpa) 97 100 { 98 101 if (access("/sys/firmware/acpi/tables/EINJ", R_OK) == -1) 99 102 ksft_test_result_skip("EINJ table no available in firmware"); ··· 103 106 104 107 write_einj_entry(EINJ_ETYPE, ERROR_TYPE_MEMORY_UER); 105 108 write_einj_entry(EINJ_FLAGS, MASK_MEMORY_UER); 106 - write_einj_entry(EINJ_ADDR, paddr); 109 + write_einj_entry(EINJ_ADDR, hpa); 107 110 write_einj_entry(EINJ_MASK, ~0x0UL); 108 111 write_einj_entry(EINJ_NOTRIGGER, 1); 109 112 write_einj_entry(EINJ_DOIT, 1); ··· 142 145 143 146 static void guest_code(void) 144 147 { 145 - uint64_t guest_data; 148 + u64 guest_data; 146 149 147 150 /* Consumes error will cause a SEA. */ 148 - guest_data = *(uint64_t *)EINJ_GVA; 151 + guest_data = *(u64 *)EINJ_GVA; 149 152 150 153 GUEST_FAIL("Poison not protected by SEA: gva=%#lx, guest_data=%#lx\n", 151 154 EINJ_GVA, guest_data); ··· 250 253 size_t backing_page_size; 251 254 size_t guest_page_size; 252 255 size_t alignment; 253 - uint64_t num_guest_pages; 254 - vm_paddr_t start_gpa; 256 + u64 num_guest_pages; 257 + gpa_t start_gpa; 255 258 enum vm_mem_backing_src_type src_type = VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB; 256 259 struct kvm_vm *vm; 257 260 ··· 275 278 vm_userspace_mem_region_add( 276 279 /*vm=*/vm, 277 280 /*src_type=*/src_type, 278 - /*guest_paddr=*/start_gpa, 281 + /*gpa=*/start_gpa, 279 282 /*slot=*/1, 280 283 /*npages=*/num_guest_pages, 281 284 /*flags=*/0); ··· 289 292 290 293 static void vm_inject_memory_uer(struct kvm_vm *vm) 291 294 { 292 - uint64_t guest_data; 295 + u64 guest_data; 293 296 294 297 einj_gpa = addr_gva2gpa(vm, EINJ_GVA); 295 298 einj_hva = addr_gva2hva(vm, EINJ_GVA); 296 299 297 300 /* Populate certain data before injecting UER. */ 298 - *(uint64_t *)einj_hva = 0xBAADCAFE; 299 - guest_data = *(uint64_t *)einj_hva; 301 + *(u64 *)einj_hva = 0xBAADCAFE; 302 + guest_data = *(u64 *)einj_hva; 300 303 ksft_print_msg("Before EINJect: data=%#lx\n", 301 304 guest_data); 302 305 303 - einj_hpa = translate_to_host_paddr((unsigned long)einj_hva); 306 + einj_hpa = translate_hva_to_hpa((unsigned long)einj_hva); 304 307 305 308 ksft_print_msg("EINJ_GVA=%#lx, einj_gpa=%#lx, einj_hva=%p, einj_hpa=%#lx\n", 306 309 EINJ_GVA, einj_gpa, einj_hva, einj_hpa);
+35 -35
tools/testing/selftests/kvm/arm64/set_id_regs.c
··· 30 30 char *name; 31 31 bool sign; 32 32 enum ftr_type type; 33 - uint8_t shift; 34 - uint64_t mask; 33 + u8 shift; 34 + u64 mask; 35 35 /* 36 36 * For FTR_EXACT, safe_val is used as the exact safe value. 37 37 * For FTR_LOWER_SAFE, safe_val is used as the minimal safe value. 38 38 */ 39 - int64_t safe_val; 39 + s64 safe_val; 40 40 41 41 /* Allowed to be changed by the host after run */ 42 42 bool mutable; 43 43 }; 44 44 45 45 struct test_feature_reg { 46 - uint32_t reg; 46 + u32 reg; 47 47 const struct reg_ftr_bits *ftr_bits; 48 48 }; 49 49 ··· 275 275 } 276 276 277 277 /* Return a safe value to a given ftr_bits an ftr value */ 278 - uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr) 278 + u64 get_safe_value(const struct reg_ftr_bits *ftr_bits, u64 ftr) 279 279 { 280 - uint64_t ftr_max = ftr_bits->mask >> ftr_bits->shift; 280 + u64 ftr_max = ftr_bits->mask >> ftr_bits->shift; 281 281 282 282 TEST_ASSERT(ftr_max > 1, "This test doesn't support single bit features"); 283 283 ··· 329 329 } 330 330 331 331 /* Return an invalid value to a given ftr_bits an ftr value */ 332 - uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr) 332 + u64 get_invalid_value(const struct reg_ftr_bits *ftr_bits, u64 ftr) 333 333 { 334 - uint64_t ftr_max = ftr_bits->mask >> ftr_bits->shift; 334 + u64 ftr_max = ftr_bits->mask >> ftr_bits->shift; 335 335 336 336 TEST_ASSERT(ftr_max > 1, "This test doesn't support single bit features"); 337 337 338 338 if (ftr_bits->sign == FTR_UNSIGNED) { 339 339 switch (ftr_bits->type) { 340 340 case FTR_EXACT: 341 - ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1); 341 + ftr = max((u64)ftr_bits->safe_val + 1, ftr + 1); 342 342 break; 343 343 case FTR_LOWER_SAFE: 344 344 ftr++; ··· 358 358 } else if (ftr != ftr_max) { 359 359 switch (ftr_bits->type) { 360 360 case FTR_EXACT: 361 - ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1); 361 + ftr = max((u64)ftr_bits->safe_val + 1, ftr + 1); 362 362 break; 363 363 case FTR_LOWER_SAFE: 364 364 ftr++; ··· 382 382 return ftr; 383 383 } 384 384 385 - static uint64_t test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg, 386 - const struct reg_ftr_bits *ftr_bits) 385 + static u64 test_reg_set_success(struct kvm_vcpu *vcpu, u64 reg, 386 + const struct reg_ftr_bits *ftr_bits) 387 387 { 388 - uint8_t shift = ftr_bits->shift; 389 - uint64_t mask = ftr_bits->mask; 390 - uint64_t val, new_val, ftr; 388 + u8 shift = ftr_bits->shift; 389 + u64 mask = ftr_bits->mask; 390 + u64 val, new_val, ftr; 391 391 392 392 val = vcpu_get_reg(vcpu, reg); 393 393 ftr = (val & mask) >> shift; ··· 405 405 return new_val; 406 406 } 407 407 408 - static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg, 408 + static void test_reg_set_fail(struct kvm_vcpu *vcpu, u64 reg, 409 409 const struct reg_ftr_bits *ftr_bits) 410 410 { 411 - uint8_t shift = ftr_bits->shift; 412 - uint64_t mask = ftr_bits->mask; 413 - uint64_t val, old_val, ftr; 411 + u8 shift = ftr_bits->shift; 412 + u64 mask = ftr_bits->mask; 413 + u64 val, old_val, ftr; 414 414 int r; 415 415 416 416 val = vcpu_get_reg(vcpu, reg); ··· 431 431 TEST_ASSERT_EQ(val, old_val); 432 432 } 433 433 434 - static uint64_t test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE]; 434 + static u64 test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE]; 435 435 436 436 #define encoding_to_range_idx(encoding) \ 437 437 KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(encoding), sys_reg_Op1(encoding), \ ··· 441 441 442 442 static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only) 443 443 { 444 - uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; 444 + u64 masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; 445 445 struct reg_mask_range range = { 446 446 .addr = (__u64)masks, 447 447 }; ··· 458 458 459 459 for (int i = 0; i < ARRAY_SIZE(test_regs); i++) { 460 460 const struct reg_ftr_bits *ftr_bits = test_regs[i].ftr_bits; 461 - uint32_t reg_id = test_regs[i].reg; 462 - uint64_t reg = KVM_ARM64_SYS_REG(reg_id); 461 + u32 reg_id = test_regs[i].reg; 462 + u64 reg = KVM_ARM64_SYS_REG(reg_id); 463 463 int idx; 464 464 465 465 /* Get the index to masks array for the idreg */ ··· 489 489 #define MPAM_IDREG_TEST 6 490 490 static void test_user_set_mpam_reg(struct kvm_vcpu *vcpu) 491 491 { 492 - uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; 492 + u64 masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; 493 493 struct reg_mask_range range = { 494 494 .addr = (__u64)masks, 495 495 }; 496 - uint64_t val; 496 + u64 val; 497 497 int idx, err; 498 498 499 499 /* ··· 584 584 #define MTE_IDREG_TEST 1 585 585 static void test_user_set_mte_reg(struct kvm_vcpu *vcpu) 586 586 { 587 - uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; 587 + u64 masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; 588 588 struct reg_mask_range range = { 589 589 .addr = (__u64)masks, 590 590 }; 591 - uint64_t val; 592 - uint64_t mte; 593 - uint64_t mte_frac; 591 + u64 val; 592 + u64 mte; 593 + u64 mte_frac; 594 594 int idx, err; 595 595 596 596 val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1)); ··· 644 644 ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac no longer 0xF\n"); 645 645 } 646 646 647 - static uint64_t reset_mutable_bits(uint32_t id, uint64_t val) 647 + static u64 reset_mutable_bits(u32 id, u64 val) 648 648 { 649 649 struct test_feature_reg *reg = NULL; 650 650 ··· 674 674 struct ucall uc; 675 675 676 676 while (!done) { 677 - uint64_t val; 677 + u64 val; 678 678 679 679 vcpu_run(vcpu); 680 680 ··· 707 707 708 708 static void test_clidr(struct kvm_vcpu *vcpu) 709 709 { 710 - uint64_t clidr; 710 + u64 clidr; 711 711 int level; 712 712 713 713 clidr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1)); ··· 772 772 ksft_test_result_pass("%s\n", __func__); 773 773 } 774 774 775 - static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, uint32_t encoding) 775 + static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, u32 encoding) 776 776 { 777 777 size_t idx = encoding_to_range_idx(encoding); 778 - uint64_t observed; 778 + u64 observed; 779 779 780 780 observed = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding)); 781 781 TEST_ASSERT_EQ(reset_mutable_bits(encoding, test_reg_vals[idx]), ··· 808 808 struct kvm_vcpu *vcpu; 809 809 struct kvm_vm *vm; 810 810 bool aarch64_only; 811 - uint64_t val, el0; 811 + u64 val, el0; 812 812 int test_cnt, i, j; 813 813 814 814 TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES));
+5 -5
tools/testing/selftests/kvm/arm64/smccc_filter.c
··· 37 37 for (conduit = test_runs_at_el2() ? SMC_INSN : HVC_INSN; \ 38 38 conduit <= SMC_INSN; conduit++) 39 39 40 - static void guest_main(uint32_t func_id, enum smccc_conduit conduit) 40 + static void guest_main(u32 func_id, enum smccc_conduit conduit) 41 41 { 42 42 struct arm_smccc_res res; 43 43 ··· 49 49 GUEST_SYNC(res.a0); 50 50 } 51 51 52 - static int __set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions, 52 + static int __set_smccc_filter(struct kvm_vm *vm, u32 start, u32 nr_functions, 53 53 enum kvm_smccc_filter_action action) 54 54 { 55 55 struct kvm_smccc_filter filter = { ··· 62 62 KVM_ARM_VM_SMCCC_FILTER, &filter); 63 63 } 64 64 65 - static void set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions, 65 + static void set_smccc_filter(struct kvm_vm *vm, u32 start, u32 nr_functions, 66 66 enum kvm_smccc_filter_action action) 67 67 { 68 68 int ret = __set_smccc_filter(vm, start, nr_functions, action); ··· 112 112 { 113 113 struct kvm_vcpu *vcpu; 114 114 struct kvm_vm *vm = setup_vm(&vcpu); 115 - uint32_t smc64_fn; 115 + u32 smc64_fn; 116 116 int r; 117 117 118 118 r = __set_smccc_filter(vm, ARM_SMCCC_ARCH_WORKAROUND_1, ··· 217 217 } 218 218 } 219 219 220 - static void expect_call_fwd_to_user(struct kvm_vcpu *vcpu, uint32_t func_id, 220 + static void expect_call_fwd_to_user(struct kvm_vcpu *vcpu, u32 func_id, 221 221 enum smccc_conduit conduit) 222 222 { 223 223 struct kvm_run *run = vcpu->run;
+28 -28
tools/testing/selftests/kvm/arm64/vgic_init.c
··· 19 19 20 20 #define NR_VCPUS 4 21 21 22 - #define REG_OFFSET(vcpu, offset) (((uint64_t)vcpu << 32) | offset) 22 + #define REG_OFFSET(vcpu, offset) (((u64)vcpu << 32) | offset) 23 23 24 24 #define VGIC_DEV_IS_V2(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V2) 25 25 #define VGIC_DEV_IS_V3(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V3) ··· 27 27 struct vm_gic { 28 28 struct kvm_vm *vm; 29 29 int gic_fd; 30 - uint32_t gic_dev_type; 30 + u32 gic_dev_type; 31 31 }; 32 32 33 - static uint64_t max_phys_size; 33 + static u64 max_phys_size; 34 34 35 35 /* 36 36 * Helpers to access a redistributor register and verify the ioctl() failed or ··· 39 39 static void v3_redist_reg_get_errno(int gicv3_fd, int vcpu, int offset, 40 40 int want, const char *msg) 41 41 { 42 - uint32_t ignored_val; 42 + u32 ignored_val; 43 43 int ret = __kvm_device_attr_get(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS, 44 44 REG_OFFSET(vcpu, offset), &ignored_val); 45 45 46 46 TEST_ASSERT(ret && errno == want, "%s; want errno = %d", msg, want); 47 47 } 48 48 49 - static void v3_redist_reg_get(int gicv3_fd, int vcpu, int offset, uint32_t want, 49 + static void v3_redist_reg_get(int gicv3_fd, int vcpu, int offset, u32 want, 50 50 const char *msg) 51 51 { 52 - uint32_t val; 52 + u32 val; 53 53 54 54 kvm_device_attr_get(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS, 55 55 REG_OFFSET(vcpu, offset), &val); ··· 71 71 return __vcpu_run(vcpu) ? -errno : 0; 72 72 } 73 73 74 - static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type, 75 - uint32_t nr_vcpus, 74 + static struct vm_gic vm_gic_create_with_vcpus(u32 gic_dev_type, 75 + u32 nr_vcpus, 76 76 struct kvm_vcpu *vcpus[]) 77 77 { 78 78 struct vm_gic v; ··· 84 84 return v; 85 85 } 86 86 87 - static struct vm_gic vm_gic_create_barebones(uint32_t gic_dev_type) 87 + static struct vm_gic vm_gic_create_barebones(u32 gic_dev_type) 88 88 { 89 89 struct vm_gic v; 90 90 ··· 103 103 } 104 104 105 105 struct vgic_region_attr { 106 - uint64_t attr; 107 - uint64_t size; 108 - uint64_t alignment; 106 + u64 attr; 107 + u64 size; 108 + u64 alignment; 109 109 }; 110 110 111 111 struct vgic_region_attr gic_v3_dist_region = { ··· 143 143 static void subtest_dist_rdist(struct vm_gic *v) 144 144 { 145 145 int ret; 146 - uint64_t addr; 146 + u64 addr; 147 147 struct vgic_region_attr rdist; /* CPU interface in GICv2*/ 148 148 struct vgic_region_attr dist; 149 149 ··· 223 223 /* Test the new REDIST region API */ 224 224 static void subtest_v3_redist_regions(struct vm_gic *v) 225 225 { 226 - uint64_t addr, expected_addr; 226 + u64 addr, expected_addr; 227 227 int ret; 228 228 229 229 ret = __kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, ··· 332 332 * VGIC KVM device is created and initialized before the secondary CPUs 333 333 * get created 334 334 */ 335 - static void test_vgic_then_vcpus(uint32_t gic_dev_type) 335 + static void test_vgic_then_vcpus(u32 gic_dev_type) 336 336 { 337 337 struct kvm_vcpu *vcpus[NR_VCPUS]; 338 338 struct vm_gic v; ··· 353 353 } 354 354 355 355 /* All the VCPUs are created before the VGIC KVM device gets initialized */ 356 - static void test_vcpus_then_vgic(uint32_t gic_dev_type) 356 + static void test_vcpus_then_vgic(u32 gic_dev_type) 357 357 { 358 358 struct kvm_vcpu *vcpus[NR_VCPUS]; 359 359 struct vm_gic v; ··· 408 408 struct kvm_vcpu *vcpus[NR_VCPUS]; 409 409 void *dummy = NULL; 410 410 struct vm_gic v; 411 - uint64_t addr; 411 + u64 addr; 412 412 int ret; 413 413 414 414 v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus); ··· 460 460 static void test_v3_typer_accesses(void) 461 461 { 462 462 struct vm_gic v; 463 - uint64_t addr; 463 + u64 addr; 464 464 int ret, i; 465 465 466 466 v.vm = vm_create(NR_VCPUS); ··· 518 518 } 519 519 520 520 static struct vm_gic vm_gic_v3_create_with_vcpuids(int nr_vcpus, 521 - uint32_t vcpuids[]) 521 + u32 vcpuids[]) 522 522 { 523 523 struct vm_gic v; 524 524 int i; ··· 544 544 */ 545 545 static void test_v3_last_bit_redist_regions(void) 546 546 { 547 - uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 }; 547 + u32 vcpuids[] = { 0, 3, 5, 4, 1, 2 }; 548 548 struct vm_gic v; 549 - uint64_t addr; 549 + u64 addr; 550 550 551 551 v = vm_gic_v3_create_with_vcpuids(ARRAY_SIZE(vcpuids), vcpuids); 552 552 ··· 578 578 /* Test last bit with legacy region */ 579 579 static void test_v3_last_bit_single_rdist(void) 580 580 { 581 - uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 }; 581 + u32 vcpuids[] = { 0, 3, 5, 4, 1, 2 }; 582 582 struct vm_gic v; 583 - uint64_t addr; 583 + u64 addr; 584 584 585 585 v = vm_gic_v3_create_with_vcpuids(ARRAY_SIZE(vcpuids), vcpuids); 586 586 ··· 606 606 struct kvm_vcpu *vcpus[NR_VCPUS]; 607 607 struct vm_gic v; 608 608 int ret, i; 609 - uint64_t addr; 609 + u64 addr; 610 610 611 611 v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, 1, vcpus); 612 612 ··· 638 638 { 639 639 struct kvm_vcpu *vcpus[NR_VCPUS]; 640 640 struct vm_gic v; 641 - uint64_t addr; 641 + u64 addr; 642 642 int its_fd, ret; 643 643 644 644 v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus); ··· 717 717 /* 718 718 * Returns 0 if it's possible to create GIC device of a given type (V2 or V3). 719 719 */ 720 - int test_kvm_device(uint32_t gic_dev_type) 720 + int test_kvm_device(u32 gic_dev_type) 721 721 { 722 722 struct kvm_vcpu *vcpus[NR_VCPUS]; 723 723 struct vm_gic v; 724 - uint32_t other; 724 + u32 other; 725 725 int ret; 726 726 727 727 v.vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus); ··· 968 968 kvm_vm_free(vm); 969 969 } 970 970 971 - void run_tests(uint32_t gic_dev_type) 971 + void run_tests(u32 gic_dev_type) 972 972 { 973 973 test_vcpus_then_vgic(gic_dev_type); 974 974 test_vgic_then_vcpus(gic_dev_type);
+69 -68
tools/testing/selftests/kvm/arm64/vgic_irq.c
··· 24 24 * function. 25 25 */ 26 26 struct test_args { 27 - uint32_t nr_irqs; /* number of KVM supported IRQs. */ 27 + u32 nr_irqs; /* number of KVM supported IRQs. */ 28 28 bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */ 29 29 bool level_sensitive; /* 1 is level, 0 is edge */ 30 30 int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */ 31 31 bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */ 32 - uint32_t shared_data; 32 + u32 shared_data; 33 33 }; 34 34 35 35 /* ··· 64 64 65 65 struct kvm_inject_args { 66 66 kvm_inject_cmd cmd; 67 - uint32_t first_intid; 68 - uint32_t num; 67 + u32 first_intid; 68 + u32 num; 69 69 int level; 70 70 bool expect_failure; 71 71 }; 72 72 73 73 /* Used on the guest side to perform the hypercall. */ 74 - static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, 75 - uint32_t num, int level, bool expect_failure); 74 + static void kvm_inject_call(kvm_inject_cmd cmd, u32 first_intid, 75 + u32 num, int level, bool expect_failure); 76 76 77 77 /* Used on the host side to get the hypercall info. */ 78 78 static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc, ··· 133 133 for_each_supported_inject_fn((args), (t), (f)) 134 134 135 135 /* Shared between the guest main thread and the IRQ handlers. */ 136 - volatile uint64_t irq_handled; 137 - volatile uint32_t irqnr_received[MAX_SPI + 1]; 136 + volatile u64 irq_handled; 137 + volatile u32 irqnr_received[MAX_SPI + 1]; 138 138 139 139 static void reset_stats(void) 140 140 { ··· 145 145 irqnr_received[i] = 0; 146 146 } 147 147 148 - static uint64_t gic_read_ap1r0(void) 148 + static u64 gic_read_ap1r0(void) 149 149 { 150 - uint64_t reg = read_sysreg_s(SYS_ICC_AP1R0_EL1); 150 + u64 reg = read_sysreg_s(SYS_ICC_AP1R0_EL1); 151 151 152 152 dsb(sy); 153 153 return reg; 154 154 } 155 155 156 - static void gic_write_ap1r0(uint64_t val) 156 + static void gic_write_ap1r0(u64 val) 157 157 { 158 158 write_sysreg_s(val, SYS_ICC_AP1R0_EL1); 159 159 isb(); 160 160 } 161 161 162 - static void guest_set_irq_line(uint32_t intid, uint32_t level); 162 + static void guest_set_irq_line(u32 intid, u32 level); 163 163 164 164 static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive) 165 165 { 166 - uint32_t intid = gic_get_and_ack_irq(); 166 + u32 intid = gic_get_and_ack_irq(); 167 167 168 168 if (intid == IAR_SPURIOUS) 169 169 return; ··· 189 189 GUEST_ASSERT(!gic_irq_get_pending(intid)); 190 190 } 191 191 192 - static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, 193 - uint32_t num, int level, bool expect_failure) 192 + static void kvm_inject_call(kvm_inject_cmd cmd, u32 first_intid, 193 + u32 num, int level, bool expect_failure) 194 194 { 195 195 struct kvm_inject_args args = { 196 196 .cmd = cmd, ··· 204 204 205 205 #define GUEST_ASSERT_IAR_EMPTY() \ 206 206 do { \ 207 - uint32_t _intid; \ 207 + u32 _intid; \ 208 208 _intid = gic_get_and_ack_irq(); \ 209 209 GUEST_ASSERT(_intid == IAR_SPURIOUS); \ 210 210 } while (0) ··· 237 237 gic_set_priority(i, IRQ_DEFAULT_PRIO_REG); 238 238 } 239 239 240 - static void guest_set_irq_line(uint32_t intid, uint32_t level) 240 + static void guest_set_irq_line(u32 intid, u32 level) 241 241 { 242 242 kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false); 243 243 } 244 244 245 245 static void test_inject_fail(struct test_args *args, 246 - uint32_t intid, kvm_inject_cmd cmd) 246 + u32 intid, kvm_inject_cmd cmd) 247 247 { 248 248 reset_stats(); 249 249 ··· 255 255 } 256 256 257 257 static void guest_inject(struct test_args *args, 258 - uint32_t first_intid, uint32_t num, 259 - kvm_inject_cmd cmd) 258 + u32 first_intid, u32 num, 259 + kvm_inject_cmd cmd) 260 260 { 261 - uint32_t i; 261 + u32 i; 262 262 263 263 reset_stats(); 264 264 ··· 292 292 * deactivated yet. 293 293 */ 294 294 static void guest_restore_active(struct test_args *args, 295 - uint32_t first_intid, uint32_t num, 296 - kvm_inject_cmd cmd) 295 + u32 first_intid, u32 num, 296 + kvm_inject_cmd cmd) 297 297 { 298 - uint32_t prio, intid, ap1r; 298 + u32 prio, intid, ap1r; 299 299 int i; 300 300 301 301 /* ··· 342 342 * This function should only be used in test_inject_preemption (with IRQs 343 343 * masked). 344 344 */ 345 - static uint32_t wait_for_and_activate_irq(void) 345 + static u32 wait_for_and_activate_irq(void) 346 346 { 347 - uint32_t intid; 347 + u32 intid; 348 348 349 349 do { 350 350 asm volatile("wfi" : : : "memory"); ··· 360 360 * interrupts for the whole test. 361 361 */ 362 362 static void test_inject_preemption(struct test_args *args, 363 - uint32_t first_intid, int num, 363 + u32 first_intid, int num, 364 364 const unsigned long *exclude, 365 365 kvm_inject_cmd cmd) 366 366 { 367 - uint32_t intid, prio, step = KVM_PRIO_STEPS; 367 + u32 intid, prio, step = KVM_PRIO_STEPS; 368 368 int i; 369 369 370 370 /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs ··· 379 379 local_irq_disable(); 380 380 381 381 for (i = 0; i < num; i++) { 382 - uint32_t tmp; 382 + u32 tmp; 383 383 intid = i + first_intid; 384 384 385 385 if (exclude && test_bit(i, exclude)) ··· 431 431 432 432 static void test_injection(struct test_args *args, struct kvm_inject_desc *f) 433 433 { 434 - uint32_t nr_irqs = args->nr_irqs; 434 + u32 nr_irqs = args->nr_irqs; 435 435 436 436 if (f->sgi) { 437 437 guest_inject(args, MIN_SGI, 1, f->cmd); ··· 451 451 static void test_injection_failure(struct test_args *args, 452 452 struct kvm_inject_desc *f) 453 453 { 454 - uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, }; 454 + u32 bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, }; 455 455 int i; 456 456 457 457 for (i = 0; i < ARRAY_SIZE(bad_intid); i++) ··· 490 490 491 491 static void guest_code(struct test_args *args) 492 492 { 493 - uint32_t i, nr_irqs = args->nr_irqs; 493 + u32 i, nr_irqs = args->nr_irqs; 494 494 bool level_sensitive = args->level_sensitive; 495 495 struct kvm_inject_desc *f, *inject_fns; 496 496 ··· 529 529 GUEST_DONE(); 530 530 } 531 531 532 - static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level, 533 - struct test_args *test_args, bool expect_failure) 532 + static void kvm_irq_line_check(struct kvm_vm *vm, u32 intid, int level, 533 + struct test_args *test_args, bool expect_failure) 534 534 { 535 535 int ret; 536 536 ··· 548 548 } 549 549 } 550 550 551 - void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level, 552 - bool expect_failure) 551 + void kvm_irq_set_level_info_check(int gic_fd, u32 intid, int level, 552 + bool expect_failure) 553 553 { 554 554 if (!expect_failure) { 555 555 kvm_irq_set_level_info(gic_fd, intid, level); ··· 573 573 } 574 574 575 575 static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm, 576 - uint32_t intid, uint32_t num, uint32_t kvm_max_routes, 577 - bool expect_failure) 576 + u32 intid, u32 num, 577 + u32 kvm_max_routes, 578 + bool expect_failure) 578 579 { 579 580 struct kvm_irq_routing *routing; 580 581 int ret; 581 - uint64_t i; 582 + u64 i; 582 583 583 584 assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES); 584 585 585 586 routing = kvm_gsi_routing_create(); 586 - for (i = intid; i < (uint64_t)intid + num; i++) 587 + for (i = intid; i < (u64)intid + num; i++) 587 588 kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI); 588 589 589 590 if (!expect_failure) { ··· 592 591 } else { 593 592 ret = _kvm_gsi_routing_write(vm, routing); 594 593 /* The kernel only checks e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS */ 595 - if (((uint64_t)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS) 594 + if (((u64)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS) 596 595 TEST_ASSERT(ret != 0 && errno == EINVAL, 597 596 "Bad intid %u did not cause KVM_SET_GSI_ROUTING " 598 597 "error: rc: %i errno: %i", intid, ret, errno); ··· 603 602 } 604 603 } 605 604 606 - static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid, 605 + static void kvm_irq_write_ispendr_check(int gic_fd, u32 intid, 607 606 struct kvm_vcpu *vcpu, 608 607 bool expect_failure) 609 608 { ··· 619 618 } 620 619 621 620 static void kvm_routing_and_irqfd_check(struct kvm_vm *vm, 622 - uint32_t intid, uint32_t num, uint32_t kvm_max_routes, 623 - bool expect_failure) 621 + u32 intid, u32 num, u32 kvm_max_routes, 622 + bool expect_failure) 624 623 { 625 624 int fd[MAX_SPI]; 626 - uint64_t val; 625 + u64 val; 627 626 int ret, f; 628 - uint64_t i; 627 + u64 i; 629 628 630 629 /* 631 630 * There is no way to try injecting an SGI or PPI as the interface ··· 644 643 * that no actual interrupt was injected for those cases. 645 644 */ 646 645 647 - for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) 646 + for (f = 0, i = intid; i < (u64)intid + num; i++, f++) 648 647 fd[f] = kvm_new_eventfd(); 649 648 650 - for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) { 651 - assert(i <= (uint64_t)UINT_MAX); 649 + for (f = 0, i = intid; i < (u64)intid + num; i++, f++) { 650 + assert(i <= (u64)UINT_MAX); 652 651 kvm_assign_irqfd(vm, i - MIN_SPI, fd[f]); 653 652 } 654 653 655 - for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) { 654 + for (f = 0, i = intid; i < (u64)intid + num; i++, f++) { 656 655 val = 1; 657 - ret = write(fd[f], &val, sizeof(uint64_t)); 658 - TEST_ASSERT(ret == sizeof(uint64_t), 656 + ret = write(fd[f], &val, sizeof(u64)); 657 + TEST_ASSERT(ret == sizeof(u64), 659 658 __KVM_SYSCALL_ERROR("write()", ret)); 660 659 } 661 660 662 - for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) 661 + for (f = 0, i = intid; i < (u64)intid + num; i++, f++) 663 662 kvm_close(fd[f]); 664 663 } 665 664 666 665 /* handles the valid case: intid=0xffffffff num=1 */ 667 666 #define for_each_intid(first, num, tmp, i) \ 668 667 for ((tmp) = (i) = (first); \ 669 - (tmp) < (uint64_t)(first) + (uint64_t)(num); \ 668 + (tmp) < (u64)(first) + (u64)(num); \ 670 669 (tmp)++, (i)++) 671 670 672 671 static void run_guest_cmd(struct kvm_vcpu *vcpu, int gic_fd, ··· 674 673 struct test_args *test_args) 675 674 { 676 675 kvm_inject_cmd cmd = inject_args->cmd; 677 - uint32_t intid = inject_args->first_intid; 678 - uint32_t num = inject_args->num; 676 + u32 intid = inject_args->first_intid; 677 + u32 num = inject_args->num; 679 678 int level = inject_args->level; 680 679 bool expect_failure = inject_args->expect_failure; 681 680 struct kvm_vm *vm = vcpu->vm; 682 - uint64_t tmp; 683 - uint32_t i; 681 + u64 tmp; 682 + u32 i; 684 683 685 684 /* handles the valid case: intid=0xffffffff num=1 */ 686 685 assert(intid < UINT_MAX - num || num == 1); ··· 732 731 struct kvm_inject_args *args) 733 732 { 734 733 struct kvm_inject_args *kvm_args_hva; 735 - vm_vaddr_t kvm_args_gva; 734 + gva_t kvm_args_gva; 736 735 737 736 kvm_args_gva = uc->args[1]; 738 737 kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva); ··· 746 745 args->eoi_split); 747 746 } 748 747 749 - static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) 748 + static void test_vgic(u32 nr_irqs, bool level_sensitive, bool eoi_split) 750 749 { 751 750 struct ucall uc; 752 751 int gic_fd; 753 752 struct kvm_vcpu *vcpu; 754 753 struct kvm_vm *vm; 755 754 struct kvm_inject_args inject_args; 756 - vm_vaddr_t args_gva; 755 + gva_t args_gva; 757 756 758 757 struct test_args args = { 759 758 .nr_irqs = nr_irqs, ··· 771 770 vcpu_init_descriptor_tables(vcpu); 772 771 773 772 /* Setup the guest args page (so it gets the args). */ 774 - args_gva = vm_vaddr_alloc_page(vm); 773 + args_gva = vm_alloc_page(vm); 775 774 memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args)); 776 775 vcpu_args_set(vcpu, 1, args_gva); 777 776 ··· 811 810 gic_set_priority_mask(CPU_PRIO_MASK); 812 811 813 812 if (cpuid == 0) { 814 - uint32_t intid; 813 + u32 intid; 815 814 816 815 local_irq_disable(); 817 816 ··· 849 848 850 849 static void guest_code_group_en(struct test_args *args, int cpuid) 851 850 { 852 - uint32_t intid; 851 + u32 intid; 853 852 854 853 gic_init(GIC_V3, 2); 855 854 ··· 897 896 898 897 static void guest_code_timer_spi(struct test_args *args, int cpuid) 899 898 { 900 - uint32_t intid; 899 + u32 intid; 901 900 u64 val; 902 901 903 902 gic_init(GIC_V3, 2); ··· 987 986 struct kvm_vcpu *vcpus[2]; 988 987 struct test_args args = {}; 989 988 struct kvm_vm *vm; 990 - vm_vaddr_t args_gva; 989 + gva_t args_gva; 991 990 int gic_fd, ret; 992 991 993 992 vm = vm_create_with_vcpus(2, gcode, vcpus); ··· 997 996 vcpu_init_descriptor_tables(vcpus[1]); 998 997 999 998 /* Setup the guest args page (so it gets the args). */ 1000 - args_gva = vm_vaddr_alloc_page(vm); 999 + args_gva = vm_alloc_page(vm); 1001 1000 memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args)); 1002 1001 vcpu_args_set(vcpus[0], 2, args_gva, 0); 1003 1002 vcpu_args_set(vcpus[1], 2, args_gva, 1); ··· 1034 1033 1035 1034 int main(int argc, char **argv) 1036 1035 { 1037 - uint32_t nr_irqs = 64; 1036 + u32 nr_irqs = 64; 1038 1037 bool default_args = true; 1039 1038 bool level_sensitive = false; 1040 1039 int opt;
+10 -10
tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c
··· 23 23 #define GIC_LPI_OFFSET 8192 24 24 25 25 static size_t nr_iterations = 1000; 26 - static vm_paddr_t gpa_base; 26 + static gpa_t gpa_base; 27 27 28 28 static struct kvm_vm *vm; 29 29 static struct kvm_vcpu **vcpus; ··· 35 35 u32 nr_devices; 36 36 u32 nr_event_ids; 37 37 38 - vm_paddr_t device_table; 39 - vm_paddr_t collection_table; 40 - vm_paddr_t cmdq_base; 38 + gpa_t device_table; 39 + gpa_t collection_table; 40 + gpa_t cmdq_base; 41 41 void *cmdq_base_va; 42 - vm_paddr_t itt_tables; 42 + gpa_t itt_tables; 43 43 44 - vm_paddr_t lpi_prop_table; 45 - vm_paddr_t lpi_pend_tables; 44 + gpa_t lpi_prop_table; 45 + gpa_t lpi_pend_tables; 46 46 } test_data = { 47 47 .nr_cpus = 1, 48 48 .nr_devices = 1, ··· 73 73 /* Round-robin the LPIs to all of the vCPUs in the VM */ 74 74 coll_id = 0; 75 75 for (device_id = 0; device_id < nr_devices; device_id++) { 76 - vm_paddr_t itt_base = test_data.itt_tables + (device_id * SZ_64K); 76 + gpa_t itt_base = test_data.itt_tables + (device_id * SZ_64K); 77 77 78 78 its_send_mapd_cmd(test_data.cmdq_base_va, device_id, 79 79 itt_base, SZ_64K, true); ··· 188 188 size_t pages_per_64k = vm_calc_num_guest_pages(vm->mode, SZ_64K); 189 189 u32 nr_devices = test_data.nr_devices; 190 190 u32 nr_cpus = test_data.nr_cpus; 191 - vm_paddr_t cmdq_base; 191 + gpa_t cmdq_base; 192 192 193 193 test_data.device_table = vm_phy_pages_alloc(vm, pages_per_64k, 194 194 gpa_base, ··· 224 224 225 225 static void signal_lpi(u32 device_id, u32 event_id) 226 226 { 227 - vm_paddr_t db_addr = GITS_BASE_GPA + GITS_TRANSLATER; 227 + gpa_t db_addr = GITS_BASE_GPA + GITS_TRANSLATER; 228 228 229 229 struct kvm_msi msi = { 230 230 .address_lo = db_addr,
+5 -5
tools/testing/selftests/kvm/arm64/vgic_v5.c
··· 17 17 struct vm_gic { 18 18 struct kvm_vm *vm; 19 19 int gic_fd; 20 - uint32_t gic_dev_type; 20 + u32 gic_dev_type; 21 21 }; 22 22 23 - static uint64_t max_phys_size; 23 + static u64 max_phys_size; 24 24 25 25 #define GUEST_CMD_IRQ_CDIA 10 26 26 #define GUEST_CMD_IRQ_DIEOI 11 ··· 96 96 kvm_vm_free(v->vm); 97 97 } 98 98 99 - static void test_vgic_v5_ppis(uint32_t gic_dev_type) 99 + static void test_vgic_v5_ppis(u32 gic_dev_type) 100 100 { 101 101 struct kvm_vcpu *vcpus[NR_VCPUS]; 102 102 struct ucall uc; ··· 173 173 /* 174 174 * Returns 0 if it's possible to create GIC device of a given type (V5). 175 175 */ 176 - int test_kvm_device(uint32_t gic_dev_type) 176 + int test_kvm_device(u32 gic_dev_type) 177 177 { 178 178 struct kvm_vcpu *vcpus[NR_VCPUS]; 179 179 struct vm_gic v; ··· 199 199 return 0; 200 200 } 201 201 202 - void run_tests(uint32_t gic_dev_type) 202 + void run_tests(u32 gic_dev_type) 203 203 { 204 204 pr_info("Test VGICv5 PPIs\n"); 205 205 test_vgic_v5_ppis(gic_dev_type);
+28 -28
tools/testing/selftests/kvm/arm64/vpmu_counter_access.c
··· 33 33 static struct vpmu_vm vpmu_vm; 34 34 35 35 struct pmreg_sets { 36 - uint64_t set_reg_id; 37 - uint64_t clr_reg_id; 36 + u64 set_reg_id; 37 + u64 clr_reg_id; 38 38 }; 39 39 40 40 #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr} 41 41 42 - static uint64_t get_pmcr_n(uint64_t pmcr) 42 + static u64 get_pmcr_n(u64 pmcr) 43 43 { 44 44 return FIELD_GET(ARMV8_PMU_PMCR_N, pmcr); 45 45 } 46 46 47 - static uint64_t get_counters_mask(uint64_t n) 47 + static u64 get_counters_mask(u64 n) 48 48 { 49 - uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); 49 + u64 mask = BIT(ARMV8_PMU_CYCLE_IDX); 50 50 51 51 if (n) 52 52 mask |= GENMASK(n - 1, 0); ··· 89 89 90 90 static void pmu_disable_reset(void) 91 91 { 92 - uint64_t pmcr = read_sysreg(pmcr_el0); 92 + u64 pmcr = read_sysreg(pmcr_el0); 93 93 94 94 /* Reset all counters, disabling them */ 95 95 pmcr &= ~ARMV8_PMU_PMCR_E; ··· 169 169 170 170 #define GUEST_ASSERT_BITMAP_REG(regname, mask, set_expected) \ 171 171 { \ 172 - uint64_t _tval = read_sysreg(regname); \ 172 + u64 _tval = read_sysreg(regname); \ 173 173 \ 174 174 if (set_expected) \ 175 175 __GUEST_ASSERT((_tval & mask), \ ··· 185 185 * Check if @mask bits in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers 186 186 * are set or cleared as specified in @set_expected. 187 187 */ 188 - static void check_bitmap_pmu_regs(uint64_t mask, bool set_expected) 188 + static void check_bitmap_pmu_regs(u64 mask, bool set_expected) 189 189 { 190 190 GUEST_ASSERT_BITMAP_REG(pmcntenset_el0, mask, set_expected); 191 191 GUEST_ASSERT_BITMAP_REG(pmcntenclr_el0, mask, set_expected); ··· 207 207 */ 208 208 static void test_bitmap_pmu_regs(int pmc_idx, bool set_op) 209 209 { 210 - uint64_t pmcr_n, test_bit = BIT(pmc_idx); 210 + u64 pmcr_n, test_bit = BIT(pmc_idx); 211 211 bool set_expected = false; 212 212 213 213 if (set_op) { ··· 232 232 */ 233 233 static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx) 234 234 { 235 - uint64_t write_data, read_data; 235 + u64 write_data, read_data; 236 236 237 237 /* Disable all PMCs and reset all PMCs to zero. */ 238 238 pmu_disable_reset(); ··· 287 287 } 288 288 289 289 #define INVALID_EC (-1ul) 290 - uint64_t expected_ec = INVALID_EC; 290 + u64 expected_ec = INVALID_EC; 291 291 292 292 static void guest_sync_handler(struct ex_regs *regs) 293 293 { 294 - uint64_t esr, ec; 294 + u64 esr, ec; 295 295 296 296 esr = read_sysreg(esr_el1); 297 297 ec = ESR_ELx_EC(esr); ··· 351 351 * if reading/writing PMU registers for implemented or unimplemented 352 352 * counters works as expected. 353 353 */ 354 - static void guest_code(uint64_t expected_pmcr_n) 354 + static void guest_code(u64 expected_pmcr_n) 355 355 { 356 - uint64_t pmcr, pmcr_n, unimp_mask; 356 + u64 pmcr, pmcr_n, unimp_mask; 357 357 int i, pmc; 358 358 359 359 __GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS, ··· 402 402 static void create_vpmu_vm(void *guest_code) 403 403 { 404 404 struct kvm_vcpu_init init; 405 - uint8_t pmuver, ec; 406 - uint64_t dfr0, irq = 23; 405 + u8 pmuver, ec; 406 + u64 dfr0, irq = 23; 407 407 struct kvm_device_attr irq_attr = { 408 408 .group = KVM_ARM_VCPU_PMU_V3_CTRL, 409 409 .attr = KVM_ARM_VCPU_PMU_V3_IRQ, 410 - .addr = (uint64_t)&irq, 410 + .addr = (u64)&irq, 411 411 }; 412 412 413 413 /* The test creates the vpmu_vm multiple times. Ensure a clean state */ ··· 443 443 kvm_vm_free(vpmu_vm.vm); 444 444 } 445 445 446 - static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n) 446 + static void run_vcpu(struct kvm_vcpu *vcpu, u64 pmcr_n) 447 447 { 448 448 struct ucall uc; 449 449 ··· 489 489 * Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n, 490 490 * and run the test. 491 491 */ 492 - static void run_access_test(uint64_t pmcr_n) 492 + static void run_access_test(u64 pmcr_n) 493 493 { 494 - uint64_t sp; 494 + u64 sp; 495 495 struct kvm_vcpu *vcpu; 496 496 struct kvm_vcpu_init init; 497 497 ··· 514 514 aarch64_vcpu_setup(vcpu, &init); 515 515 vcpu_init_descriptor_tables(vcpu); 516 516 vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), sp); 517 - vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); 517 + vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (u64)guest_code); 518 518 519 519 run_vcpu(vcpu, pmcr_n); 520 520 ··· 531 531 * Create a VM, and check if KVM handles the userspace accesses of 532 532 * the PMU register sets in @validity_check_reg_sets[] correctly. 533 533 */ 534 - static void run_pmregs_validity_test(uint64_t pmcr_n) 534 + static void run_pmregs_validity_test(u64 pmcr_n) 535 535 { 536 536 int i; 537 537 struct kvm_vcpu *vcpu; 538 - uint64_t set_reg_id, clr_reg_id, reg_val; 539 - uint64_t valid_counters_mask, max_counters_mask; 538 + u64 set_reg_id, clr_reg_id, reg_val; 539 + u64 valid_counters_mask, max_counters_mask; 540 540 541 541 test_create_vpmu_vm_with_nr_counters(pmcr_n, false); 542 542 vcpu = vpmu_vm.vcpu; ··· 588 588 * the vCPU to @pmcr_n, which is larger than the host value. 589 589 * The attempt should fail as @pmcr_n is too big to set for the vCPU. 590 590 */ 591 - static void run_error_test(uint64_t pmcr_n) 591 + static void run_error_test(u64 pmcr_n) 592 592 { 593 593 pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n); 594 594 ··· 600 600 * Return the default number of implemented PMU event counters excluding 601 601 * the cycle counter (i.e. PMCR_EL0.N value) for the guest. 602 602 */ 603 - static uint64_t get_pmcr_n_limit(void) 603 + static u64 get_pmcr_n_limit(void) 604 604 { 605 - uint64_t pmcr; 605 + u64 pmcr; 606 606 607 607 create_vpmu_vm(guest_code); 608 608 pmcr = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0)); ··· 624 624 625 625 int main(void) 626 626 { 627 - uint64_t i, pmcr_n; 627 + u64 i, pmcr_n; 628 628 629 629 TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3)); 630 630 TEST_REQUIRE(kvm_supports_vgic_v3());
+19 -19
tools/testing/selftests/kvm/coalesced_io_test.c
··· 14 14 15 15 struct kvm_coalesced_io { 16 16 struct kvm_coalesced_mmio_ring *ring; 17 - uint32_t ring_size; 18 - uint64_t mmio_gpa; 19 - uint64_t *mmio; 17 + u32 ring_size; 18 + u64 mmio_gpa; 19 + u64 *mmio; 20 20 21 21 /* 22 22 * x86-only, but define pio_port for all architectures to minimize the 23 23 * amount of #ifdeffery and complexity, without having to sacrifice 24 24 * verbose error messages. 25 25 */ 26 - uint8_t pio_port; 26 + u8 pio_port; 27 27 }; 28 28 29 29 static struct kvm_coalesced_io kvm_builtin_io_ring; ··· 70 70 71 71 static void vcpu_run_and_verify_io_exit(struct kvm_vcpu *vcpu, 72 72 struct kvm_coalesced_io *io, 73 - uint32_t ring_start, 74 - uint32_t expected_exit) 73 + u32 ring_start, 74 + u32 expected_exit) 75 75 { 76 76 const bool want_pio = expected_exit == KVM_EXIT_IO; 77 77 struct kvm_coalesced_mmio_ring *ring = io->ring; 78 78 struct kvm_run *run = vcpu->run; 79 - uint32_t pio_value; 79 + u32 pio_value; 80 80 81 81 WRITE_ONCE(ring->first, ring_start); 82 82 WRITE_ONCE(ring->last, ring_start); ··· 88 88 * data_offset is garbage, e.g. an MMIO gpa. 89 89 */ 90 90 if (run->exit_reason == KVM_EXIT_IO) 91 - pio_value = *(uint32_t *)((void *)run + run->io.data_offset); 91 + pio_value = *(u32 *)((void *)run + run->io.data_offset); 92 92 else 93 93 pio_value = 0; 94 94 95 95 TEST_ASSERT((!want_pio && (run->exit_reason == KVM_EXIT_MMIO && run->mmio.is_write && 96 96 run->mmio.phys_addr == io->mmio_gpa && run->mmio.len == 8 && 97 - *(uint64_t *)run->mmio.data == io->mmio_gpa + io->ring_size - 1)) || 97 + *(u64 *)run->mmio.data == io->mmio_gpa + io->ring_size - 1)) || 98 98 (want_pio && (run->exit_reason == KVM_EXIT_IO && run->io.port == io->pio_port && 99 99 run->io.direction == KVM_EXIT_IO_OUT && run->io.count == 1 && 100 100 pio_value == io->pio_port + io->ring_size - 1)), ··· 105 105 want_pio ? (unsigned long long)io->pio_port : io->mmio_gpa, 106 106 (want_pio ? io->pio_port : io->mmio_gpa) + io->ring_size - 1, run->exit_reason, 107 107 run->exit_reason == KVM_EXIT_MMIO ? "MMIO" : run->exit_reason == KVM_EXIT_IO ? "PIO" : "other", 108 - run->mmio.phys_addr, run->mmio.is_write, run->mmio.len, *(uint64_t *)run->mmio.data, 108 + run->mmio.phys_addr, run->mmio.is_write, run->mmio.len, *(u64 *)run->mmio.data, 109 109 run->io.port, run->io.direction, run->io.size, run->io.count, pio_value); 110 110 } 111 111 112 112 static void vcpu_run_and_verify_coalesced_io(struct kvm_vcpu *vcpu, 113 113 struct kvm_coalesced_io *io, 114 - uint32_t ring_start, 115 - uint32_t expected_exit) 114 + u32 ring_start, 115 + u32 expected_exit) 116 116 { 117 117 struct kvm_coalesced_mmio_ring *ring = io->ring; 118 118 int i; ··· 124 124 ring->first, ring->last, io->ring_size, ring_start); 125 125 126 126 for (i = 0; i < io->ring_size - 1; i++) { 127 - uint32_t idx = (ring->first + i) % io->ring_size; 127 + u32 idx = (ring->first + i) % io->ring_size; 128 128 struct kvm_coalesced_mmio *entry = &ring->coalesced_mmio[idx]; 129 129 130 130 #ifdef __x86_64__ 131 131 if (i & 1) 132 132 TEST_ASSERT(entry->phys_addr == io->pio_port && 133 133 entry->len == 4 && entry->pio && 134 - *(uint32_t *)entry->data == io->pio_port + i, 134 + *(u32 *)entry->data == io->pio_port + i, 135 135 "Wanted 4-byte port I/O 0x%x = 0x%x in entry %u, got %u-byte %s 0x%llx = 0x%x", 136 136 io->pio_port, io->pio_port + i, i, 137 137 entry->len, entry->pio ? "PIO" : "MMIO", 138 - entry->phys_addr, *(uint32_t *)entry->data); 138 + entry->phys_addr, *(u32 *)entry->data); 139 139 else 140 140 #endif 141 141 TEST_ASSERT(entry->phys_addr == io->mmio_gpa && ··· 143 143 "Wanted 8-byte MMIO to 0x%lx = %lx in entry %u, got %u-byte %s 0x%llx = 0x%lx", 144 144 io->mmio_gpa, io->mmio_gpa + i, i, 145 145 entry->len, entry->pio ? "PIO" : "MMIO", 146 - entry->phys_addr, *(uint64_t *)entry->data); 146 + entry->phys_addr, *(u64 *)entry->data); 147 147 } 148 148 } 149 149 150 150 static void test_coalesced_io(struct kvm_vcpu *vcpu, 151 - struct kvm_coalesced_io *io, uint32_t ring_start) 151 + struct kvm_coalesced_io *io, u32 ring_start) 152 152 { 153 153 struct kvm_coalesced_mmio_ring *ring = io->ring; 154 154 ··· 219 219 * the MMIO GPA identity mapped in the guest. 220 220 */ 221 221 .mmio_gpa = 4ull * SZ_1G, 222 - .mmio = (uint64_t *)(4ull * SZ_1G), 222 + .mmio = (u64 *)(4ull * SZ_1G), 223 223 .pio_port = 0x80, 224 224 }; 225 225 226 - virt_map(vm, (uint64_t)kvm_builtin_io_ring.mmio, kvm_builtin_io_ring.mmio_gpa, 1); 226 + virt_map(vm, (u64)kvm_builtin_io_ring.mmio, kvm_builtin_io_ring.mmio_gpa, 1); 227 227 228 228 sync_global_to_guest(vm, kvm_builtin_io_ring); 229 229 vcpu_args_set(vcpu, 1, &kvm_builtin_io_ring);
+5 -5
tools/testing/selftests/kvm/demand_paging_test.c
··· 24 24 #ifdef __NR_userfaultfd 25 25 26 26 static int nr_vcpus = 1; 27 - static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; 27 + static u64 guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; 28 28 29 29 static size_t demand_paging_size; 30 30 static char *guest_data_prototype; ··· 58 58 struct uffd_msg *msg) 59 59 { 60 60 pid_t tid = syscall(__NR_gettid); 61 - uint64_t addr = msg->arg.pagefault.address; 61 + u64 addr = msg->arg.pagefault.address; 62 62 struct timespec start; 63 63 struct timespec ts_diff; 64 64 int r; ··· 68 68 if (uffd_mode == UFFDIO_REGISTER_MODE_MISSING) { 69 69 struct uffdio_copy copy; 70 70 71 - copy.src = (uint64_t)guest_data_prototype; 71 + copy.src = (u64)guest_data_prototype; 72 72 copy.dst = addr; 73 73 copy.len = demand_paging_size; 74 74 copy.mode = 0; ··· 138 138 bool partition_vcpu_memory_access; 139 139 }; 140 140 141 - static void prefault_mem(void *alias, uint64_t len) 141 + static void prefault_mem(void *alias, u64 len) 142 142 { 143 143 size_t p; 144 144 ··· 154 154 struct memstress_vcpu_args *vcpu_args; 155 155 struct test_params *p = arg; 156 156 struct uffd_desc **uffd_descs = NULL; 157 - uint64_t uffd_region_size; 157 + u64 uffd_region_size; 158 158 struct timespec start; 159 159 struct timespec ts_diff; 160 160 double vcpu_paging_rate;
+7 -7
tools/testing/selftests/kvm/dirty_log_perf_test.c
··· 24 24 #define TEST_HOST_LOOP_N 2UL 25 25 26 26 static int nr_vcpus = 1; 27 - static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; 27 + static u64 guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; 28 28 static bool run_vcpus_while_disabling_dirty_logging; 29 29 30 30 /* Host variables */ ··· 37 37 { 38 38 struct kvm_vcpu *vcpu = vcpu_args->vcpu; 39 39 int vcpu_idx = vcpu_args->vcpu_idx; 40 - uint64_t pages_count = 0; 40 + u64 pages_count = 0; 41 41 struct kvm_run *run; 42 42 struct timespec start; 43 43 struct timespec ts_diff; ··· 93 93 94 94 struct test_params { 95 95 unsigned long iterations; 96 - uint64_t phys_offset; 96 + u64 phys_offset; 97 97 bool partition_vcpu_memory_access; 98 98 enum vm_mem_backing_src_type backing_src; 99 99 int slots; 100 - uint32_t write_percent; 100 + u32 write_percent; 101 101 bool random_access; 102 102 }; 103 103 ··· 106 106 struct test_params *p = arg; 107 107 struct kvm_vm *vm; 108 108 unsigned long **bitmaps; 109 - uint64_t guest_num_pages; 110 - uint64_t host_num_pages; 111 - uint64_t pages_per_slot; 109 + u64 guest_num_pages; 110 + u64 host_num_pages; 111 + u64 pages_per_slot; 112 112 struct timespec start; 113 113 struct timespec ts_diff; 114 114 struct timespec get_dirty_log_total = (struct timespec){0};
+41 -41
tools/testing/selftests/kvm/dirty_log_test.c
··· 74 74 * the host. READ/WRITE_ONCE() should also be used with anything 75 75 * that may change. 76 76 */ 77 - static uint64_t host_page_size; 78 - static uint64_t guest_page_size; 79 - static uint64_t guest_num_pages; 80 - static uint64_t iteration; 81 - static uint64_t nr_writes; 77 + static u64 host_page_size; 78 + static u64 guest_page_size; 79 + static u64 guest_num_pages; 80 + static u64 iteration; 81 + static u64 nr_writes; 82 82 static bool vcpu_stop; 83 83 84 84 /* ··· 86 86 * This will be set to the topmost valid physical address minus 87 87 * the test memory size. 88 88 */ 89 - static uint64_t guest_test_phys_mem; 89 + static u64 guest_test_phys_mem; 90 90 91 91 /* 92 92 * Guest virtual memory offset of the testing memory slot. 93 93 * Must not conflict with identity mapped test code. 94 94 */ 95 - static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; 95 + static u64 guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; 96 96 97 97 /* 98 98 * Continuously write to the first 8 bytes of a random pages within ··· 100 100 */ 101 101 static void guest_code(void) 102 102 { 103 - uint64_t addr; 103 + u64 addr; 104 104 105 105 #ifdef __s390x__ 106 - uint64_t i; 106 + u64 i; 107 107 108 108 /* 109 109 * On s390x, all pages of a 1M segment are initially marked as dirty ··· 113 113 */ 114 114 for (i = 0; i < guest_num_pages; i++) { 115 115 addr = guest_test_virt_mem + i * guest_page_size; 116 - vcpu_arch_put_guest(*(uint64_t *)addr, READ_ONCE(iteration)); 116 + vcpu_arch_put_guest(*(u64 *)addr, READ_ONCE(iteration)); 117 117 nr_writes++; 118 118 } 119 119 #endif ··· 125 125 * guest_page_size; 126 126 addr = align_down(addr, host_page_size); 127 127 128 - vcpu_arch_put_guest(*(uint64_t *)addr, READ_ONCE(iteration)); 128 + vcpu_arch_put_guest(*(u64 *)addr, READ_ONCE(iteration)); 129 129 nr_writes++; 130 130 } 131 131 ··· 138 138 139 139 /* Points to the test VM memory region on which we track dirty logs */ 140 140 static void *host_test_mem; 141 - static uint64_t host_num_pages; 141 + static u64 host_num_pages; 142 142 143 143 /* For statistics only */ 144 - static uint64_t host_dirty_count; 145 - static uint64_t host_clear_count; 144 + static u64 host_dirty_count; 145 + static u64 host_clear_count; 146 146 147 147 /* Whether dirty ring reset is requested, or finished */ 148 148 static sem_t sem_vcpu_stop; ··· 169 169 * dirty gfn we've collected, so that if a mismatch of data found later in the 170 170 * verifying process, we let it pass. 171 171 */ 172 - static uint64_t dirty_ring_last_page = -1ULL; 172 + static u64 dirty_ring_last_page = -1ULL; 173 173 174 174 /* 175 175 * In addition to the above, it is possible (especially if this ··· 213 213 * and also don't fail when it is reported in the next iteration, together with 214 214 * an outdated iteration count. 215 215 */ 216 - static uint64_t dirty_ring_prev_iteration_last_page; 216 + static u64 dirty_ring_prev_iteration_last_page; 217 217 218 218 enum log_mode_t { 219 219 /* Only use KVM_GET_DIRTY_LOG for logging */ ··· 236 236 /* Logging mode for current run */ 237 237 static enum log_mode_t host_log_mode; 238 238 static pthread_t vcpu_thread; 239 - static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT; 239 + static u32 test_dirty_ring_count = TEST_DIRTY_RING_COUNT; 240 240 241 241 static bool clear_log_supported(void) 242 242 { ··· 255 255 } 256 256 257 257 static void dirty_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, 258 - void *bitmap, uint32_t num_pages, 259 - uint32_t *unused) 258 + void *bitmap, u32 num_pages, 259 + u32 *unused) 260 260 { 261 261 kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap); 262 262 } 263 263 264 264 static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, 265 - void *bitmap, uint32_t num_pages, 266 - uint32_t *unused) 265 + void *bitmap, u32 num_pages, 266 + u32 *unused) 267 267 { 268 268 kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap); 269 269 kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages); ··· 297 297 298 298 static void dirty_ring_create_vm_done(struct kvm_vm *vm) 299 299 { 300 - uint64_t pages; 301 - uint32_t limit; 300 + u64 pages; 301 + u32 limit; 302 302 303 303 /* 304 304 * We rely on vcpu exit due to full dirty ring state. Adjust ··· 333 333 smp_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET); 334 334 } 335 335 336 - static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns, 337 - int slot, void *bitmap, 338 - uint32_t num_pages, uint32_t *fetch_index) 336 + static u32 dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns, 337 + int slot, void *bitmap, 338 + u32 num_pages, u32 *fetch_index) 339 339 { 340 340 struct kvm_dirty_gfn *cur; 341 - uint32_t count = 0; 341 + u32 count = 0; 342 342 343 343 while (true) { 344 344 cur = &dirty_gfns[*fetch_index % test_dirty_ring_count]; ··· 359 359 } 360 360 361 361 static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, 362 - void *bitmap, uint32_t num_pages, 363 - uint32_t *ring_buf_idx) 362 + void *bitmap, u32 num_pages, 363 + u32 *ring_buf_idx) 364 364 { 365 - uint32_t count, cleared; 365 + u32 count, cleared; 366 366 367 367 /* Only have one vcpu */ 368 368 count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu), ··· 404 404 void (*create_vm_done)(struct kvm_vm *vm); 405 405 /* Hook to collect the dirty pages into the bitmap provided */ 406 406 void (*collect_dirty_pages) (struct kvm_vcpu *vcpu, int slot, 407 - void *bitmap, uint32_t num_pages, 408 - uint32_t *ring_buf_idx); 407 + void *bitmap, u32 num_pages, 408 + u32 *ring_buf_idx); 409 409 /* Hook to call when after each vcpu run */ 410 410 void (*after_vcpu_run)(struct kvm_vcpu *vcpu); 411 411 } log_modes[LOG_MODE_NUM] = { ··· 459 459 } 460 460 461 461 static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, 462 - void *bitmap, uint32_t num_pages, 463 - uint32_t *ring_buf_idx) 462 + void *bitmap, u32 num_pages, 463 + u32 *ring_buf_idx) 464 464 { 465 465 struct log_mode *mode = &log_modes[host_log_mode]; 466 466 ··· 494 494 495 495 static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long **bmap) 496 496 { 497 - uint64_t page, nr_dirty_pages = 0, nr_clean_pages = 0; 498 - uint64_t step = vm_num_host_pages(mode, 1); 497 + u64 page, nr_dirty_pages = 0, nr_clean_pages = 0; 498 + u64 step = vm_num_host_pages(mode, 1); 499 499 500 500 for (page = 0; page < host_num_pages; page += step) { 501 - uint64_t val = *(uint64_t *)(host_test_mem + page * host_page_size); 501 + u64 val = *(u64 *)(host_test_mem + page * host_page_size); 502 502 bool bmap0_dirty = __test_and_clear_bit_le(page, bmap[0]); 503 503 504 504 /* ··· 575 575 } 576 576 577 577 static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu, 578 - uint64_t extra_mem_pages, void *guest_code) 578 + u64 extra_mem_pages, void *guest_code) 579 579 { 580 580 struct kvm_vm *vm; 581 581 ··· 592 592 struct test_params { 593 593 unsigned long iterations; 594 594 unsigned long interval; 595 - uint64_t phys_offset; 595 + u64 phys_offset; 596 596 }; 597 597 598 598 static void run_test(enum vm_guest_mode mode, void *arg) ··· 601 601 struct kvm_vcpu *vcpu; 602 602 struct kvm_vm *vm; 603 603 unsigned long *bmap[2]; 604 - uint32_t ring_buf_idx = 0; 604 + u32 ring_buf_idx = 0; 605 605 int sem_val; 606 606 607 607 if (!log_mode_supported()) { ··· 667 667 virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages); 668 668 669 669 /* Cache the HVA pointer of the region */ 670 - host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem); 670 + host_test_mem = addr_gpa2hva(vm, (gpa_t)guest_test_phys_mem); 671 671 672 672 /* Export the shared variables to the guest */ 673 673 sync_global_to_guest(vm, host_page_size);
+1 -1
tools/testing/selftests/kvm/get-reg-list.c
··· 216 216 * since we don't know the capabilities of any new registers. 217 217 */ 218 218 for_each_present_blessed_reg(i) { 219 - uint8_t addr[2048 / 8]; 219 + u8 addr[2048 / 8]; 220 220 struct kvm_one_reg reg = { 221 221 .id = reg_list->reg[i], 222 222 .addr = (__u64)&addr,
+9 -9
tools/testing/selftests/kvm/guest_memfd_test.c
··· 171 171 kvm_munmap(mem, total_size); 172 172 } 173 173 174 - static void test_collapse(int fd, uint64_t flags) 174 + static void test_collapse(int fd, u64 flags) 175 175 { 176 176 const size_t pmd_size = get_trans_hugepagesz(); 177 177 void *reserved_addr; ··· 346 346 } 347 347 348 348 static void test_create_guest_memfd_invalid_sizes(struct kvm_vm *vm, 349 - uint64_t guest_memfd_flags) 349 + u64 guest_memfd_flags) 350 350 { 351 351 size_t size; 352 352 int fd; ··· 389 389 390 390 static void test_guest_memfd_flags(struct kvm_vm *vm) 391 391 { 392 - uint64_t valid_flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS); 393 - uint64_t flag; 392 + u64 valid_flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS); 393 + u64 flag; 394 394 int fd; 395 395 396 396 for (flag = BIT(0); flag; flag <<= 1) { ··· 419 419 #define gmem_test(__test, __vm, __flags) \ 420 420 __gmem_test(__test, __vm, __flags, page_size * 4) 421 421 422 - static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags) 422 + static void __test_guest_memfd(struct kvm_vm *vm, u64 flags) 423 423 { 424 424 test_create_guest_memfd_multiple(vm); 425 425 test_create_guest_memfd_invalid_sizes(vm, flags); ··· 452 452 static void test_guest_memfd(unsigned long vm_type) 453 453 { 454 454 struct kvm_vm *vm = vm_create_barebones_type(vm_type); 455 - uint64_t flags; 455 + u64 flags; 456 456 457 457 test_guest_memfd_flags(vm); 458 458 ··· 470 470 kvm_vm_free(vm); 471 471 } 472 472 473 - static void guest_code(uint8_t *mem, uint64_t size) 473 + static void guest_code(u8 *mem, u64 size) 474 474 { 475 475 size_t i; 476 476 ··· 489 489 * the guest's code, stack, and page tables, and low memory contains 490 490 * the PCI hole and other MMIO regions that need to be avoided. 491 491 */ 492 - const uint64_t gpa = SZ_4G; 492 + const gpa_t gpa = SZ_4G; 493 493 const int slot = 1; 494 494 495 495 struct kvm_vcpu *vcpu; 496 496 struct kvm_vm *vm; 497 - uint8_t *mem; 497 + u8 *mem; 498 498 size_t size; 499 499 int fd, i; 500 500
+11 -11
tools/testing/selftests/kvm/guest_print_test.c
··· 16 16 #include "ucall_common.h" 17 17 18 18 struct guest_vals { 19 - uint64_t a; 20 - uint64_t b; 21 - uint64_t type; 19 + u64 a; 20 + u64 b; 21 + u64 type; 22 22 }; 23 23 24 24 static struct guest_vals vals; 25 25 26 26 /* GUEST_PRINTF()/GUEST_ASSERT_FMT() does not support float or double. */ 27 27 #define TYPE_LIST \ 28 - TYPE(test_type_i64, I64, "%ld", int64_t) \ 29 - TYPE(test_type_u64, U64u, "%lu", uint64_t) \ 30 - TYPE(test_type_x64, U64x, "0x%lx", uint64_t) \ 31 - TYPE(test_type_X64, U64X, "0x%lX", uint64_t) \ 32 - TYPE(test_type_u32, U32u, "%u", uint32_t) \ 33 - TYPE(test_type_x32, U32x, "0x%x", uint32_t) \ 34 - TYPE(test_type_X32, U32X, "0x%X", uint32_t) \ 28 + TYPE(test_type_i64, I64, "%ld", s64) \ 29 + TYPE(test_type_u64, U64u, "%lu", u64) \ 30 + TYPE(test_type_x64, U64x, "0x%lx", u64) \ 31 + TYPE(test_type_X64, U64X, "0x%lX", u64) \ 32 + TYPE(test_type_u32, U32u, "%u", u32) \ 33 + TYPE(test_type_x32, U32x, "0x%x", u32) \ 34 + TYPE(test_type_X32, U32X, "0x%X", u32) \ 35 35 TYPE(test_type_int, INT, "%d", int) \ 36 36 TYPE(test_type_char, CHAR, "%c", char) \ 37 37 TYPE(test_type_str, STR, "'%s'", const char *) \ ··· 56 56 \ 57 57 snprintf(expected_printf, UCALL_BUFFER_LEN, PRINTF_FMT_##ext, a, b); \ 58 58 snprintf(expected_assert, UCALL_BUFFER_LEN, ASSERT_FMT_##ext, a, b); \ 59 - vals = (struct guest_vals){ (uint64_t)a, (uint64_t)b, TYPE_##ext }; \ 59 + vals = (struct guest_vals){ (u64)a, (u64)b, TYPE_##ext }; \ 60 60 sync_global_to_guest(vcpu->vm, vals); \ 61 61 run_test(vcpu, expected_printf, expected_assert); \ 62 62 }
+3 -3
tools/testing/selftests/kvm/hardware_disable_test.c
··· 80 80 TEST_ASSERT(r == 0, "%s: failed to join thread", __func__); 81 81 } 82 82 83 - static void run_test(uint32_t run) 83 + static void run_test(u32 run) 84 84 { 85 85 struct kvm_vcpu *vcpu; 86 86 struct kvm_vm *vm; ··· 88 88 pthread_t threads[VCPU_NUM]; 89 89 pthread_t throw_away; 90 90 void *b; 91 - uint32_t i, j; 91 + u32 i, j; 92 92 93 93 CPU_ZERO(&cpu_set); 94 94 for (i = 0; i < VCPU_NUM; i++) ··· 149 149 150 150 int main(int argc, char **argv) 151 151 { 152 - uint32_t i; 152 + u32 i; 153 153 int s, r; 154 154 pid_t pid; 155 155
+15 -15
tools/testing/selftests/kvm/include/arm64/arch_timer.h
··· 18 18 #define CTL_ISTATUS (1 << 2) 19 19 20 20 #define msec_to_cycles(msec) \ 21 - (timer_get_cntfrq() * (uint64_t)(msec) / 1000) 21 + (timer_get_cntfrq() * (u64)(msec) / 1000) 22 22 23 23 #define usec_to_cycles(usec) \ 24 - (timer_get_cntfrq() * (uint64_t)(usec) / 1000000) 24 + (timer_get_cntfrq() * (u64)(usec) / 1000000) 25 25 26 26 #define cycles_to_usec(cycles) \ 27 - ((uint64_t)(cycles) * 1000000 / timer_get_cntfrq()) 27 + ((u64)(cycles) * 1000000 / timer_get_cntfrq()) 28 28 29 - static inline uint32_t timer_get_cntfrq(void) 29 + static inline u32 timer_get_cntfrq(void) 30 30 { 31 31 return read_sysreg(cntfrq_el0); 32 32 } 33 33 34 - static inline uint64_t timer_get_cntct(enum arch_timer timer) 34 + static inline u64 timer_get_cntct(enum arch_timer timer) 35 35 { 36 36 isb(); 37 37 ··· 48 48 return 0; 49 49 } 50 50 51 - static inline void timer_set_cval(enum arch_timer timer, uint64_t cval) 51 + static inline void timer_set_cval(enum arch_timer timer, u64 cval) 52 52 { 53 53 switch (timer) { 54 54 case VIRTUAL: ··· 64 64 isb(); 65 65 } 66 66 67 - static inline uint64_t timer_get_cval(enum arch_timer timer) 67 + static inline u64 timer_get_cval(enum arch_timer timer) 68 68 { 69 69 switch (timer) { 70 70 case VIRTUAL: ··· 79 79 return 0; 80 80 } 81 81 82 - static inline void timer_set_tval(enum arch_timer timer, int32_t tval) 82 + static inline void timer_set_tval(enum arch_timer timer, s32 tval) 83 83 { 84 84 switch (timer) { 85 85 case VIRTUAL: ··· 95 95 isb(); 96 96 } 97 97 98 - static inline int32_t timer_get_tval(enum arch_timer timer) 98 + static inline s32 timer_get_tval(enum arch_timer timer) 99 99 { 100 100 isb(); 101 101 switch (timer) { ··· 111 111 return 0; 112 112 } 113 113 114 - static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl) 114 + static inline void timer_set_ctl(enum arch_timer timer, u32 ctl) 115 115 { 116 116 switch (timer) { 117 117 case VIRTUAL: ··· 127 127 isb(); 128 128 } 129 129 130 - static inline uint32_t timer_get_ctl(enum arch_timer timer) 130 + static inline u32 timer_get_ctl(enum arch_timer timer) 131 131 { 132 132 switch (timer) { 133 133 case VIRTUAL: ··· 142 142 return 0; 143 143 } 144 144 145 - static inline void timer_set_next_cval_ms(enum arch_timer timer, uint32_t msec) 145 + static inline void timer_set_next_cval_ms(enum arch_timer timer, u32 msec) 146 146 { 147 - uint64_t now_ct = timer_get_cntct(timer); 148 - uint64_t next_ct = now_ct + msec_to_cycles(msec); 147 + u64 now_ct = timer_get_cntct(timer); 148 + u64 next_ct = now_ct + msec_to_cycles(msec); 149 149 150 150 timer_set_cval(timer, next_ct); 151 151 } 152 152 153 - static inline void timer_set_next_tval_ms(enum arch_timer timer, uint32_t msec) 153 + static inline void timer_set_next_tval_ms(enum arch_timer timer, u32 msec) 154 154 { 155 155 timer_set_tval(timer, msec_to_cycles(msec)); 156 156 }
+2 -2
tools/testing/selftests/kvm/include/arm64/delay.h
··· 8 8 9 9 #include "arch_timer.h" 10 10 11 - static inline void __delay(uint64_t cycles) 11 + static inline void __delay(u64 cycles) 12 12 { 13 13 enum arch_timer timer = VIRTUAL; 14 - uint64_t start = timer_get_cntct(timer); 14 + u64 start = timer_get_cntct(timer); 15 15 16 16 while ((timer_get_cntct(timer) - start) < cycles) 17 17 cpu_relax();
+4 -4
tools/testing/selftests/kvm/include/arm64/gic.h
··· 48 48 * split is true, EOI drops the priority and deactivates the interrupt. 49 49 */ 50 50 void gic_set_eoi_split(bool split); 51 - void gic_set_priority_mask(uint64_t mask); 52 - void gic_set_priority(uint32_t intid, uint32_t prio); 51 + void gic_set_priority_mask(u64 mask); 52 + void gic_set_priority(u32 intid, u32 prio); 53 53 void gic_irq_set_active(unsigned int intid); 54 54 void gic_irq_clear_active(unsigned int intid); 55 55 bool gic_irq_get_active(unsigned int intid); ··· 59 59 void gic_irq_set_config(unsigned int intid, bool is_edge); 60 60 void gic_irq_set_group(unsigned int intid, bool group); 61 61 62 - void gic_rdist_enable_lpis(vm_paddr_t cfg_table, size_t cfg_table_size, 63 - vm_paddr_t pend_table); 62 + void gic_rdist_enable_lpis(gpa_t cfg_table, size_t cfg_table_size, 63 + gpa_t pend_table); 64 64 65 65 #endif /* SELFTEST_KVM_GIC_H */
+3 -4
tools/testing/selftests/kvm/include/arm64/gic_v3_its.h
··· 5 5 6 6 #include <linux/sizes.h> 7 7 8 - void its_init(vm_paddr_t coll_tbl, size_t coll_tbl_sz, 9 - vm_paddr_t device_tbl, size_t device_tbl_sz, 10 - vm_paddr_t cmdq, size_t cmdq_size); 8 + void its_init(gpa_t coll_tbl, size_t coll_tbl_sz, gpa_t device_tbl, 9 + size_t device_tbl_sz, gpa_t cmdq, size_t cmdq_size); 11 10 12 - void its_send_mapd_cmd(void *cmdq_base, u32 device_id, vm_paddr_t itt_base, 11 + void its_send_mapd_cmd(void *cmdq_base, u32 device_id, gpa_t itt_base, 13 12 size_t itt_size, bool valid); 14 13 void its_send_mapc_cmd(void *cmdq_base, u32 vcpu_id, u32 collection_id, bool valid); 15 14 void its_send_mapti_cmd(void *cmdq_base, u32 device_id, u32 event_id,
+11 -11
tools/testing/selftests/kvm/include/arm64/processor.h
··· 128 128 #define PTE_ADDR_51_50_LPA2_SHIFT 8 129 129 130 130 void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init); 131 - struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 131 + struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id, 132 132 struct kvm_vcpu_init *init, void *guest_code); 133 133 134 134 struct ex_regs { ··· 167 167 (v) == VECTOR_SYNC_LOWER_64 || \ 168 168 (v) == VECTOR_SYNC_LOWER_32) 169 169 170 - void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k, 171 - uint32_t *ipa16k, uint32_t *ipa64k); 170 + void aarch64_get_supported_page_sizes(u32 ipa, u32 *ipa4k, 171 + u32 *ipa16k, u32 *ipa64k); 172 172 173 173 void vm_init_descriptor_tables(struct kvm_vm *vm); 174 174 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu); ··· 179 179 void vm_install_sync_handler(struct kvm_vm *vm, 180 180 int vector, int ec, handler_fn handler); 181 181 182 - uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level); 183 - uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva); 182 + u64 *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level); 183 + u64 *virt_get_pte_hva(struct kvm_vm *vm, gva_t gva); 184 184 185 185 static inline void cpu_relax(void) 186 186 { ··· 287 287 * @res: pointer to write the return values from registers x0-x3 288 288 * 289 289 */ 290 - void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1, 291 - uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, 292 - uint64_t arg6, struct arm_smccc_res *res); 290 + void smccc_hvc(u32 function_id, u64 arg0, u64 arg1, 291 + u64 arg2, u64 arg3, u64 arg4, u64 arg5, 292 + u64 arg6, struct arm_smccc_res *res); 293 293 294 294 /** 295 295 * smccc_smc - Invoke a SMCCC function using the smc conduit ··· 298 298 * @res: pointer to write the return values from registers x0-x3 299 299 * 300 300 */ 301 - void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1, 302 - uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, 303 - uint64_t arg6, struct arm_smccc_res *res); 301 + void smccc_smc(u32 function_id, u64 arg0, u64 arg1, 302 + u64 arg2, u64 arg3, u64 arg4, u64 arg5, 303 + u64 arg6, struct arm_smccc_res *res); 304 304 305 305 /* Execute a Wait For Interrupt instruction. */ 306 306 void wfi(void);
+2 -2
tools/testing/selftests/kvm/include/arm64/ucall.h
··· 10 10 * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each 11 11 * VM), it must not be accessed from host code. 12 12 */ 13 - extern vm_vaddr_t *ucall_exit_mmio_addr; 13 + extern gva_t *ucall_exit_mmio_addr; 14 14 15 - static inline void ucall_arch_do_ucall(vm_vaddr_t uc) 15 + static inline void ucall_arch_do_ucall(gva_t uc) 16 16 { 17 17 WRITE_ONCE(*ucall_exit_mmio_addr, uc); 18 18 }
+11 -11
tools/testing/selftests/kvm/include/arm64/vgic.h
··· 11 11 #include "kvm_util.h" 12 12 13 13 #define REDIST_REGION_ATTR_ADDR(count, base, flags, index) \ 14 - (((uint64_t)(count) << 52) | \ 15 - ((uint64_t)((base) >> 16) << 16) | \ 16 - ((uint64_t)(flags) << 12) | \ 14 + (((u64)(count) << 52) | \ 15 + ((u64)((base) >> 16) << 16) | \ 16 + ((u64)(flags) << 12) | \ 17 17 index) 18 18 19 19 bool kvm_supports_vgic_v3(void); 20 - int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs); 20 + int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs); 21 21 void __vgic_v3_init(int fd); 22 - int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs); 22 + int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs); 23 23 24 24 #define VGIC_MAX_RESERVED 1023 25 25 26 - void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level); 27 - int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level); 26 + void kvm_irq_set_level_info(int gic_fd, u32 intid, int level); 27 + int _kvm_irq_set_level_info(int gic_fd, u32 intid, int level); 28 28 29 - void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level); 30 - int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level); 29 + void kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level); 30 + int _kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level); 31 31 32 32 /* The vcpu arg only applies to private interrupts. */ 33 - void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu); 34 - void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu); 33 + void kvm_irq_write_ispendr(int gic_fd, u32 intid, struct kvm_vcpu *vcpu); 34 + void kvm_irq_write_isactiver(int gic_fd, u32 intid, struct kvm_vcpu *vcpu); 35 35 36 36 #define KVM_IRQCHIP_NUM_PINS (1020 - 32) 37 37
+165 -179
tools/testing/selftests/kvm/include/kvm_util.h
··· 58 58 59 59 struct kvm_vcpu { 60 60 struct list_head list; 61 - uint32_t id; 61 + u32 id; 62 62 int fd; 63 63 struct kvm_vm *vm; 64 64 struct kvm_run *run; ··· 70 70 #endif 71 71 struct kvm_binary_stats stats; 72 72 struct kvm_dirty_gfn *dirty_gfns; 73 - uint32_t fetch_index; 74 - uint32_t dirty_gfns_count; 73 + u32 fetch_index; 74 + u32 dirty_gfns_count; 75 75 }; 76 76 77 77 struct userspace_mem_regions { ··· 90 90 91 91 struct kvm_mmu { 92 92 bool pgd_created; 93 - uint64_t pgd; 93 + u64 pgd; 94 94 int pgtable_levels; 95 95 96 96 struct kvm_mmu_arch arch; ··· 105 105 unsigned int page_shift; 106 106 unsigned int pa_bits; 107 107 unsigned int va_bits; 108 - uint64_t max_gfn; 108 + u64 max_gfn; 109 109 struct list_head vcpus; 110 110 struct userspace_mem_regions regions; 111 111 struct sparsebit *vpages_valid; 112 112 struct sparsebit *vpages_mapped; 113 113 bool has_irqchip; 114 - vm_paddr_t ucall_mmio_addr; 115 - vm_vaddr_t handlers; 116 - uint32_t dirty_ring_size; 117 - uint64_t gpa_tag_mask; 114 + gpa_t ucall_mmio_addr; 115 + gva_t handlers; 116 + u32 dirty_ring_size; 117 + gpa_t gpa_tag_mask; 118 118 119 119 /* 120 120 * "mmu" is the guest's stage-1, with a short name because the vast ··· 132 132 * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE] 133 133 * memslot. 134 134 */ 135 - uint32_t memslots[NR_MEM_REGIONS]; 135 + u32 memslots[NR_MEM_REGIONS]; 136 136 }; 137 137 138 138 struct vcpu_reg_sublist { ··· 164 164 else 165 165 166 166 struct userspace_mem_region * 167 - memslot2region(struct kvm_vm *vm, uint32_t memslot); 167 + memslot2region(struct kvm_vm *vm, u32 memslot); 168 168 169 169 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, 170 170 enum kvm_mem_region_type type) ··· 213 213 }; 214 214 215 215 struct vm_shape { 216 - uint32_t type; 217 - uint8_t mode; 218 - uint8_t pad0; 219 - uint16_t pad1; 216 + u32 type; 217 + u8 mode; 218 + u8 pad0; 219 + u16 pad1; 220 220 }; 221 221 222 - kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t)); 222 + kvm_static_assert(sizeof(struct vm_shape) == sizeof(u64)); 223 223 224 224 #define VM_TYPE_DEFAULT 0 225 225 ··· 404 404 return ret; 405 405 } 406 406 407 - static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) 407 + static inline int __vm_enable_cap(struct kvm_vm *vm, u32 cap, u64 arg0) 408 408 { 409 409 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 410 410 411 411 return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); 412 412 } 413 - static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) 413 + 414 + static inline void vm_enable_cap(struct kvm_vm *vm, u32 cap, u64 arg0) 414 415 { 415 416 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 416 417 417 418 vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); 418 419 } 419 420 420 - static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa, 421 - uint64_t size, uint64_t attributes) 421 + static inline void vm_set_memory_attributes(struct kvm_vm *vm, gpa_t gpa, 422 + u64 size, u64 attributes) 422 423 { 423 424 struct kvm_memory_attributes attr = { 424 425 .attributes = attributes, ··· 439 438 } 440 439 441 440 442 - static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa, 443 - uint64_t size) 441 + static inline void vm_mem_set_private(struct kvm_vm *vm, gpa_t gpa, 442 + u64 size) 444 443 { 445 444 vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE); 446 445 } 447 446 448 - static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa, 449 - uint64_t size) 447 + static inline void vm_mem_set_shared(struct kvm_vm *vm, gpa_t gpa, 448 + u64 size) 450 449 { 451 450 vm_set_memory_attributes(vm, gpa, size, 0); 452 451 } 453 452 454 - void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size, 453 + void vm_guest_mem_fallocate(struct kvm_vm *vm, gpa_t gpa, u64 size, 455 454 bool punch_hole); 456 455 457 - static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa, 458 - uint64_t size) 456 + static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, gpa_t gpa, 457 + u64 size) 459 458 { 460 459 vm_guest_mem_fallocate(vm, gpa, size, true); 461 460 } 462 461 463 - static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa, 464 - uint64_t size) 462 + static inline void vm_guest_mem_allocate(struct kvm_vm *vm, gpa_t gpa, 463 + u64 size) 465 464 { 466 465 vm_guest_mem_fallocate(vm, gpa, size, false); 467 466 } 468 467 469 - void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size); 470 - const char *vm_guest_mode_string(uint32_t i); 468 + void vm_enable_dirty_ring(struct kvm_vm *vm, u32 ring_size); 469 + const char *vm_guest_mode_string(u32 i); 471 470 472 471 void kvm_vm_free(struct kvm_vm *vmp); 473 472 void kvm_vm_restart(struct kvm_vm *vmp); ··· 475 474 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename); 476 475 int kvm_memfd_alloc(size_t size, bool hugepages); 477 476 478 - void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); 477 + void vm_dump(FILE *stream, struct kvm_vm *vm, u8 indent); 479 478 480 479 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) 481 480 { ··· 485 484 } 486 485 487 486 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, 488 - uint64_t first_page, uint32_t num_pages) 487 + u64 first_page, u32 num_pages) 489 488 { 490 489 struct kvm_clear_dirty_log args = { 491 490 .dirty_bitmap = log, ··· 497 496 vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args); 498 497 } 499 498 500 - static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) 499 + static inline u32 kvm_vm_reset_dirty_ring(struct kvm_vm *vm) 501 500 { 502 501 return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL); 503 502 } 504 503 505 504 static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm, 506 - uint64_t address, 507 - uint64_t size, bool pio) 505 + u64 address, 506 + u64 size, bool pio) 508 507 { 509 508 struct kvm_coalesced_mmio_zone zone = { 510 509 .addr = address, ··· 516 515 } 517 516 518 517 static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm, 519 - uint64_t address, 520 - uint64_t size, bool pio) 518 + u64 address, 519 + u64 size, bool pio) 521 520 { 522 521 struct kvm_coalesced_mmio_zone zone = { 523 522 .addr = address, ··· 536 535 return fd; 537 536 } 538 537 539 - static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, 540 - uint32_t flags) 538 + static inline int __kvm_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd, 539 + u32 flags) 541 540 { 542 541 struct kvm_irqfd irqfd = { 543 542 .fd = eventfd, ··· 549 548 return __vm_ioctl(vm, KVM_IRQFD, &irqfd); 550 549 } 551 550 552 - static inline void kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, 553 - uint32_t flags) 551 + static inline void kvm_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd, u32 flags) 554 552 { 555 553 int ret = __kvm_irqfd(vm, gsi, eventfd, flags); 556 554 557 555 TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_IRQFD, ret, vm); 558 556 } 559 557 560 - static inline void kvm_assign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd) 558 + static inline void kvm_assign_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd) 561 559 { 562 560 kvm_irqfd(vm, gsi, eventfd, 0); 563 561 } 564 562 565 - static inline void kvm_deassign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd) 563 + static inline void kvm_deassign_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd) 566 564 { 567 565 kvm_irqfd(vm, gsi, eventfd, KVM_IRQFD_FLAG_DEASSIGN); 568 566 } ··· 610 610 } 611 611 612 612 void read_stat_data(int stats_fd, struct kvm_stats_header *header, 613 - struct kvm_stats_desc *desc, uint64_t *data, 613 + struct kvm_stats_desc *desc, u64 *data, 614 614 size_t max_elements); 615 615 616 616 void kvm_get_stat(struct kvm_binary_stats *stats, const char *name, 617 - uint64_t *data, size_t max_elements); 617 + u64 *data, size_t max_elements); 618 618 619 619 #define __get_stat(stats, stat) \ 620 620 ({ \ 621 - uint64_t data; \ 621 + u64 data; \ 622 622 \ 623 623 kvm_get_stat(stats, #stat, &data, 1); \ 624 624 data; \ ··· 664 664 665 665 void vm_create_irqchip(struct kvm_vm *vm); 666 666 667 - static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, 668 - uint64_t flags) 667 + static inline int __vm_create_guest_memfd(struct kvm_vm *vm, u64 size, 668 + u64 flags) 669 669 { 670 670 struct kvm_create_guest_memfd guest_memfd = { 671 671 .size = size, ··· 675 675 return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd); 676 676 } 677 677 678 - static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, 679 - uint64_t flags) 678 + static inline int vm_create_guest_memfd(struct kvm_vm *vm, u64 size, 679 + u64 flags) 680 680 { 681 681 int fd = __vm_create_guest_memfd(vm, size, flags); 682 682 ··· 684 684 return fd; 685 685 } 686 686 687 - void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 688 - uint64_t gpa, uint64_t size, void *hva); 689 - int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 690 - uint64_t gpa, uint64_t size, void *hva); 691 - void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 692 - uint64_t gpa, uint64_t size, void *hva, 693 - uint32_t guest_memfd, uint64_t guest_memfd_offset); 694 - int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 695 - uint64_t gpa, uint64_t size, void *hva, 696 - uint32_t guest_memfd, uint64_t guest_memfd_offset); 687 + void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags, 688 + gpa_t gpa, u64 size, void *hva); 689 + int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags, 690 + gpa_t gpa, u64 size, void *hva); 691 + void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags, 692 + gpa_t gpa, u64 size, void *hva, 693 + u32 guest_memfd, u64 guest_memfd_offset); 694 + int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags, 695 + gpa_t gpa, u64 size, void *hva, 696 + u32 guest_memfd, u64 guest_memfd_offset); 697 697 698 698 void vm_userspace_mem_region_add(struct kvm_vm *vm, 699 699 enum vm_mem_backing_src_type src_type, 700 - uint64_t gpa, uint32_t slot, uint64_t npages, 701 - uint32_t flags); 700 + gpa_t gpa, u32 slot, u64 npages, u32 flags); 702 701 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, 703 - uint64_t gpa, uint32_t slot, uint64_t npages, uint32_t flags, 704 - int guest_memfd_fd, uint64_t guest_memfd_offset); 702 + gpa_t gpa, u32 slot, u64 npages, u32 flags, 703 + int guest_memfd_fd, u64 guest_memfd_offset); 705 704 706 705 #ifndef vm_arch_has_protected_memory 707 706 static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm) ··· 709 710 } 710 711 #endif 711 712 712 - void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); 713 - void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot); 714 - void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); 715 - void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); 716 - struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 717 - void vm_populate_vaddr_bitmap(struct kvm_vm *vm); 718 - vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); 719 - vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); 720 - vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, 721 - enum kvm_mem_region_type type); 722 - vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, 723 - vm_vaddr_t vaddr_min, 724 - enum kvm_mem_region_type type); 725 - vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); 726 - vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, 727 - enum kvm_mem_region_type type); 728 - vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm); 713 + void vm_mem_region_set_flags(struct kvm_vm *vm, u32 slot, u32 flags); 714 + void vm_mem_region_reload(struct kvm_vm *vm, u32 slot); 715 + void vm_mem_region_move(struct kvm_vm *vm, u32 slot, u64 new_gpa); 716 + void vm_mem_region_delete(struct kvm_vm *vm, u32 slot); 717 + struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id); 718 + void vm_populate_gva_bitmap(struct kvm_vm *vm); 719 + gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t min_gva); 720 + gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva); 721 + gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva, 722 + enum kvm_mem_region_type type); 723 + gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t min_gva, 724 + enum kvm_mem_region_type type); 725 + gva_t vm_alloc_pages(struct kvm_vm *vm, int nr_pages); 726 + gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type); 727 + gva_t vm_alloc_page(struct kvm_vm *vm); 729 728 730 - void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 729 + void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, 731 730 unsigned int npages); 732 - void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); 733 - void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); 734 - vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); 735 - void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa); 731 + void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa); 732 + void *addr_gva2hva(struct kvm_vm *vm, gva_t gva); 733 + gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva); 734 + void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa); 736 735 737 736 #ifndef vcpu_arch_put_guest 738 737 #define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0) 739 738 #endif 740 739 741 - static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa) 740 + static inline gpa_t vm_untag_gpa(struct kvm_vm *vm, gpa_t gpa) 742 741 { 743 742 return gpa & ~vm->gpa_tag_mask; 744 743 } ··· 752 755 void vcpu_run_complete_io(struct kvm_vcpu *vcpu); 753 756 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu); 754 757 755 - static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap, 756 - uint64_t arg0) 758 + static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, u32 cap, 759 + u64 arg0) 757 760 { 758 761 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 759 762 ··· 808 811 vcpu_ioctl(vcpu, KVM_SET_FPU, fpu); 809 812 } 810 813 811 - static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr) 814 + static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, u64 id, void *addr) 812 815 { 813 - struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr }; 816 + struct kvm_one_reg reg = { .id = id, .addr = (u64)addr }; 814 817 815 818 return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg); 816 819 } 817 - static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) 820 + 821 + static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val) 818 822 { 819 - struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 823 + struct kvm_one_reg reg = { .id = id, .addr = (u64)&val }; 820 824 821 825 return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg); 822 826 } 823 - static inline uint64_t vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id) 827 + 828 + static inline u64 vcpu_get_reg(struct kvm_vcpu *vcpu, u64 id) 824 829 { 825 - uint64_t val; 826 - struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 830 + u64 val; 831 + struct kvm_one_reg reg = { .id = id, .addr = (u64)&val }; 827 832 828 833 TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id); 829 834 830 835 vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg); 831 836 return val; 832 837 } 833 - static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) 838 + 839 + static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val) 834 840 { 835 - struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 841 + struct kvm_one_reg reg = { .id = id, .addr = (u64)&val }; 836 842 837 843 TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id); 838 844 ··· 880 880 return fd; 881 881 } 882 882 883 - int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr); 883 + int __kvm_has_device_attr(int dev_fd, u32 group, u64 attr); 884 884 885 - static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr) 885 + static inline void kvm_has_device_attr(int dev_fd, u32 group, u64 attr) 886 886 { 887 887 int ret = __kvm_has_device_attr(dev_fd, group, attr); 888 888 889 889 TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno); 890 890 } 891 891 892 - int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val); 892 + int __kvm_device_attr_get(int dev_fd, u32 group, u64 attr, void *val); 893 893 894 - static inline void kvm_device_attr_get(int dev_fd, uint32_t group, 895 - uint64_t attr, void *val) 894 + static inline void kvm_device_attr_get(int dev_fd, u32 group, 895 + u64 attr, void *val) 896 896 { 897 897 int ret = __kvm_device_attr_get(dev_fd, group, attr, val); 898 898 899 899 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret)); 900 900 } 901 901 902 - int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val); 902 + int __kvm_device_attr_set(int dev_fd, u32 group, u64 attr, void *val); 903 903 904 - static inline void kvm_device_attr_set(int dev_fd, uint32_t group, 905 - uint64_t attr, void *val) 904 + static inline void kvm_device_attr_set(int dev_fd, u32 group, 905 + u64 attr, void *val) 906 906 { 907 907 int ret = __kvm_device_attr_set(dev_fd, group, attr, val); 908 908 909 909 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret)); 910 910 } 911 911 912 - static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, 913 - uint64_t attr) 912 + static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, u32 group, 913 + u64 attr) 914 914 { 915 915 return __kvm_has_device_attr(vcpu->fd, group, attr); 916 916 } 917 917 918 - static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, 919 - uint64_t attr) 918 + static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, u32 group, 919 + u64 attr) 920 920 { 921 921 kvm_has_device_attr(vcpu->fd, group, attr); 922 922 } 923 923 924 - static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, 925 - uint64_t attr, void *val) 924 + static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, u32 group, 925 + u64 attr, void *val) 926 926 { 927 927 return __kvm_device_attr_get(vcpu->fd, group, attr, val); 928 928 } 929 929 930 - static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, 931 - uint64_t attr, void *val) 930 + static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, u32 group, 931 + u64 attr, void *val) 932 932 { 933 933 kvm_device_attr_get(vcpu->fd, group, attr, val); 934 934 } 935 935 936 - static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, 937 - uint64_t attr, void *val) 936 + static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, u32 group, 937 + u64 attr, void *val) 938 938 { 939 939 return __kvm_device_attr_set(vcpu->fd, group, attr, val); 940 940 } 941 941 942 - static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, 943 - uint64_t attr, void *val) 942 + static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, u32 group, 943 + u64 attr, void *val) 944 944 { 945 945 kvm_device_attr_set(vcpu->fd, group, attr, val); 946 946 } 947 947 948 - int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type); 949 - int __kvm_create_device(struct kvm_vm *vm, uint64_t type); 948 + int __kvm_test_create_device(struct kvm_vm *vm, u64 type); 949 + int __kvm_create_device(struct kvm_vm *vm, u64 type); 950 950 951 - static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type) 951 + static inline int kvm_create_device(struct kvm_vm *vm, u64 type) 952 952 { 953 953 int fd = __kvm_create_device(vm, type); 954 954 ··· 964 964 * Input Args: 965 965 * vcpu - vCPU 966 966 * num - number of arguments 967 - * ... - arguments, each of type uint64_t 967 + * ... - arguments, each of type u64 968 968 * 969 969 * Output Args: None 970 970 * ··· 972 972 * 973 973 * Sets the first @num input parameters for the function at @vcpu's entry point, 974 974 * per the C calling convention of the architecture, to the values given as 975 - * variable args. Each of the variable args is expected to be of type uint64_t. 975 + * variable args. Each of the variable args is expected to be of type u64. 976 976 * The maximum @num can be is specific to the architecture. 977 977 */ 978 978 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...); 979 979 980 - void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); 981 - int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); 980 + void kvm_irq_line(struct kvm_vm *vm, u32 irq, int level); 981 + int _kvm_irq_line(struct kvm_vm *vm, u32 irq, int level); 982 982 983 983 #define KVM_MAX_IRQ_ROUTES 4096 984 984 985 985 struct kvm_irq_routing *kvm_gsi_routing_create(void); 986 986 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, 987 - uint32_t gsi, uint32_t pin); 987 + u32 gsi, u32 pin); 988 988 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); 989 989 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); 990 990 991 991 const char *exit_reason_str(unsigned int exit_reason); 992 992 993 - vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, 994 - uint32_t memslot); 995 - vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 996 - vm_paddr_t paddr_min, uint32_t memslot, 997 - bool protected); 998 - vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); 993 + gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot); 994 + gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, gpa_t min_gpa, 995 + u32 memslot, bool protected); 996 + gpa_t vm_alloc_page_table(struct kvm_vm *vm); 999 997 1000 - static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 1001 - vm_paddr_t paddr_min, uint32_t memslot) 998 + static inline gpa_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 999 + gpa_t min_gpa, u32 memslot) 1002 1000 { 1003 1001 /* 1004 1002 * By default, allocate memory as protected for VMs that support 1005 1003 * protected memory, as the majority of memory for such VMs is 1006 1004 * protected, i.e. using shared memory is effectively opt-in. 1007 1005 */ 1008 - return __vm_phy_pages_alloc(vm, num, paddr_min, memslot, 1006 + return __vm_phy_pages_alloc(vm, num, min_gpa, memslot, 1009 1007 vm_arch_has_protected_memory(vm)); 1010 1008 } 1011 1009 ··· 1014 1016 * calculate the amount of memory needed for per-vCPU data, e.g. stacks. 1015 1017 */ 1016 1018 struct kvm_vm *____vm_create(struct vm_shape shape); 1017 - struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, 1018 - uint64_t nr_extra_pages); 1019 + struct kvm_vm *__vm_create(struct vm_shape shape, u32 nr_runnable_vcpus, 1020 + u64 nr_extra_pages); 1019 1021 1020 1022 static inline struct kvm_vm *vm_create_barebones(void) 1021 1023 { ··· 1032 1034 return ____vm_create(shape); 1033 1035 } 1034 1036 1035 - static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus) 1037 + static inline struct kvm_vm *vm_create(u32 nr_runnable_vcpus) 1036 1038 { 1037 1039 return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0); 1038 1040 } 1039 1041 1040 - struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus, 1041 - uint64_t extra_mem_pages, 1042 + struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, u32 nr_vcpus, 1043 + u64 extra_mem_pages, 1042 1044 void *guest_code, struct kvm_vcpu *vcpus[]); 1043 1045 1044 - static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus, 1046 + static inline struct kvm_vm *vm_create_with_vcpus(u32 nr_vcpus, 1045 1047 void *guest_code, 1046 1048 struct kvm_vcpu *vcpus[]) 1047 1049 { ··· 1052 1054 1053 1055 struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape, 1054 1056 struct kvm_vcpu **vcpu, 1055 - uint64_t extra_mem_pages, 1057 + u64 extra_mem_pages, 1056 1058 void *guest_code); 1057 1059 1058 1060 /* ··· 1060 1062 * additional pages of guest memory. Returns the VM and vCPU (via out param). 1061 1063 */ 1062 1064 static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, 1063 - uint64_t extra_mem_pages, 1065 + u64 extra_mem_pages, 1064 1066 void *guest_code) 1065 1067 { 1066 1068 return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu, ··· 1082 1084 1083 1085 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm); 1084 1086 1085 - void kvm_set_files_rlimit(uint32_t nr_vcpus); 1087 + void kvm_set_files_rlimit(u32 nr_vcpus); 1086 1088 1087 1089 int __pin_task_to_cpu(pthread_t task, int cpu); 1088 1090 ··· 1113 1115 } 1114 1116 1115 1117 void kvm_print_vcpu_pinning_help(void); 1116 - void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], 1118 + void kvm_parse_vcpu_pinning(const char *pcpus_string, u32 vcpu_to_pcpu[], 1117 1119 int nr_vcpus); 1118 1120 1119 1121 unsigned long vm_compute_max_gfn(struct kvm_vm *vm); ··· 1129 1131 } 1130 1132 1131 1133 #define sync_global_to_guest(vm, g) ({ \ 1132 - typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 1134 + typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \ 1133 1135 memcpy(_p, &(g), sizeof(g)); \ 1134 1136 }) 1135 1137 1136 1138 #define sync_global_from_guest(vm, g) ({ \ 1137 - typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 1139 + typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \ 1138 1140 memcpy(&(g), _p, sizeof(g)); \ 1139 1141 }) 1140 1142 ··· 1145 1147 * undesirable to change the host's copy of the global. 1146 1148 */ 1147 1149 #define write_guest_global(vm, g, val) ({ \ 1148 - typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 1150 + typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \ 1149 1151 typeof(g) _val = val; \ 1150 1152 \ 1151 1153 memcpy(_p, &(_val), sizeof(g)); \ ··· 1154 1156 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu); 1155 1157 1156 1158 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, 1157 - uint8_t indent); 1159 + u8 indent); 1158 1160 1159 1161 static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu, 1160 - uint8_t indent) 1162 + u8 indent) 1161 1163 { 1162 1164 vcpu_arch_dump(stream, vcpu, indent); 1163 1165 } ··· 1169 1171 * vm - Virtual Machine 1170 1172 * vcpu_id - The id of the VCPU to add to the VM. 1171 1173 */ 1172 - struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 1174 + struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id); 1173 1175 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code); 1174 1176 1175 - static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 1177 + static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id, 1176 1178 void *guest_code) 1177 1179 { 1178 1180 struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id); ··· 1183 1185 } 1184 1186 1185 1187 /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */ 1186 - struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id); 1188 + struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, u32 vcpu_id); 1187 1189 1188 1190 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm, 1189 - uint32_t vcpu_id) 1191 + u32 vcpu_id) 1190 1192 { 1191 1193 return vm_arch_vcpu_recreate(vm, vcpu_id); 1192 1194 } ··· 1201 1203 } 1202 1204 1203 1205 /* 1204 - * VM Virtual Page Map 1205 - * 1206 - * Input Args: 1207 - * vm - Virtual Machine 1208 - * vaddr - VM Virtual Address 1209 - * paddr - VM Physical Address 1210 - * memslot - Memory region slot for new virtual translation tables 1211 - * 1212 - * Output Args: None 1213 - * 1214 - * Return: None 1215 - * 1216 1206 * Within @vm, creates a virtual translation for the page starting 1217 - * at @vaddr to the page starting at @paddr. 1207 + * at @gva to the page starting at @gpa. 1218 1208 */ 1219 - void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr); 1209 + void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa); 1220 1210 1221 - static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) 1211 + static inline void virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) 1222 1212 { 1223 - virt_arch_pg_map(vm, vaddr, paddr); 1224 - sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); 1213 + virt_arch_pg_map(vm, gva, gpa); 1214 + sparsebit_set(vm->vpages_mapped, gva >> vm->page_shift); 1225 1215 } 1226 1216 1227 1217 ··· 1228 1242 * Returns the VM physical address of the translated VM virtual 1229 1243 * address given by @gva. 1230 1244 */ 1231 - vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva); 1245 + gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva); 1232 1246 1233 - static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) 1247 + static inline gpa_t addr_gva2gpa(struct kvm_vm *vm, gva_t gva) 1234 1248 { 1235 1249 return addr_arch_gva2gpa(vm, gva); 1236 1250 } ··· 1250 1264 * Dumps to the FILE stream given by @stream, the contents of all the 1251 1265 * virtual translation tables for the VM given by @vm. 1252 1266 */ 1253 - void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); 1267 + void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent); 1254 1268 1255 - static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 1269 + static inline void virt_dump(FILE *stream, struct kvm_vm *vm, u8 indent) 1256 1270 { 1257 1271 virt_arch_dump(stream, vm, indent); 1258 1272 } ··· 1263 1277 return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0); 1264 1278 } 1265 1279 1266 - static inline uint64_t vm_page_align(struct kvm_vm *vm, uint64_t v) 1280 + static inline u64 vm_page_align(struct kvm_vm *vm, u64 v) 1267 1281 { 1268 1282 return (v + vm->page_size - 1) & ~(vm->page_size - 1); 1269 1283 } ··· 1279 1293 void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm); 1280 1294 void kvm_arch_vm_release(struct kvm_vm *vm); 1281 1295 1282 - bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr); 1296 + bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa); 1283 1297 1284 - uint32_t guest_get_vcpuid(void); 1298 + u32 guest_get_vcpuid(void); 1285 1299 1286 1300 bool kvm_arch_has_default_irqchip(void); 1287 1301
+5 -3
tools/testing/selftests/kvm/include/kvm_util_types.h
··· 2 2 #ifndef SELFTEST_KVM_UTIL_TYPES_H 3 3 #define SELFTEST_KVM_UTIL_TYPES_H 4 4 5 + #include <linux/types.h> 6 + 5 7 /* 6 8 * Provide a version of static_assert() that is guaranteed to have an optional 7 9 * message param. _GNU_SOURCE is defined for all KVM selftests, _GNU_SOURCE ··· 16 14 #define __kvm_static_assert(expr, msg, ...) _Static_assert(expr, msg) 17 15 #define kvm_static_assert(expr, ...) __kvm_static_assert(expr, ##__VA_ARGS__, #expr) 18 16 19 - typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */ 20 - typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */ 17 + typedef u64 gpa_t; /* Virtual Machine (Guest) physical address */ 18 + typedef u64 gva_t; /* Virtual Machine (Guest) virtual address */ 21 19 22 - #define INVALID_GPA (~(uint64_t)0) 20 + #define INVALID_GPA (~(u64)0) 23 21 24 22 #endif /* SELFTEST_KVM_UTIL_TYPES_H */
+2 -2
tools/testing/selftests/kvm/include/loongarch/arch_timer.h
··· 70 70 csr_write(val, LOONGARCH_CSR_TCFG); 71 71 } 72 72 73 - static inline void __delay(uint64_t cycles) 73 + static inline void __delay(u64 cycles) 74 74 { 75 - uint64_t start = timer_get_cycles(); 75 + u64 start = timer_get_cycles(); 76 76 77 77 while ((timer_get_cycles() - start) < cycles) 78 78 cpu_relax();
+2 -2
tools/testing/selftests/kvm/include/loongarch/ucall.h
··· 10 10 * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each 11 11 * VM), it must not be accessed from host code. 12 12 */ 13 - extern vm_vaddr_t *ucall_exit_mmio_addr; 13 + extern gva_t *ucall_exit_mmio_addr; 14 14 15 - static inline void ucall_arch_do_ucall(vm_vaddr_t uc) 15 + static inline void ucall_arch_do_ucall(gva_t uc) 16 16 { 17 17 WRITE_ONCE(*ucall_exit_mmio_addr, uc); 18 18 }
+15 -15
tools/testing/selftests/kvm/include/memstress.h
··· 20 20 #define MEMSTRESS_MEM_SLOT_INDEX 1 21 21 22 22 struct memstress_vcpu_args { 23 - uint64_t gpa; 24 - uint64_t gva; 25 - uint64_t pages; 23 + gpa_t gpa; 24 + gva_t gva; 25 + u64 pages; 26 26 27 27 /* Only used by the host userspace part of the vCPU thread */ 28 28 struct kvm_vcpu *vcpu; ··· 32 32 struct memstress_args { 33 33 struct kvm_vm *vm; 34 34 /* The starting address and size of the guest test region. */ 35 - uint64_t gpa; 36 - uint64_t size; 37 - uint64_t guest_page_size; 38 - uint32_t random_seed; 39 - uint32_t write_percent; 35 + gpa_t gpa; 36 + u64 size; 37 + u64 guest_page_size; 38 + u32 random_seed; 39 + u32 write_percent; 40 40 41 41 /* Run vCPUs in L2 instead of L1, if the architecture supports it. */ 42 42 bool nested; ··· 45 45 /* True if all vCPUs are pinned to pCPUs */ 46 46 bool pin_vcpus; 47 47 /* The vCPU=>pCPU pinning map. Only valid if pin_vcpus is true. */ 48 - uint32_t vcpu_to_pcpu[KVM_MAX_VCPUS]; 48 + u32 vcpu_to_pcpu[KVM_MAX_VCPUS]; 49 49 50 50 /* Test is done, stop running vCPUs. */ 51 51 bool stop_vcpus; ··· 56 56 extern struct memstress_args memstress_args; 57 57 58 58 struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus, 59 - uint64_t vcpu_memory_bytes, int slots, 59 + u64 vcpu_memory_bytes, int slots, 60 60 enum vm_mem_backing_src_type backing_src, 61 61 bool partition_vcpu_memory_access); 62 62 void memstress_destroy_vm(struct kvm_vm *vm); 63 63 64 - void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent); 64 + void memstress_set_write_percent(struct kvm_vm *vm, u32 write_percent); 65 65 void memstress_set_random_access(struct kvm_vm *vm, bool random_access); 66 66 67 67 void memstress_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct memstress_vcpu_args *)); 68 68 void memstress_join_vcpu_threads(int vcpus); 69 - void memstress_guest_code(uint32_t vcpu_id); 69 + void memstress_guest_code(u32 vcpu_id); 70 70 71 - uint64_t memstress_nested_pages(int nr_vcpus); 71 + u64 memstress_nested_pages(int nr_vcpus); 72 72 void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]); 73 73 74 74 void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots); 75 75 void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots); 76 76 void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots); 77 77 void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], 78 - int slots, uint64_t pages_per_slot); 79 - unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot); 78 + int slots, u64 pages_per_slot); 79 + unsigned long **memstress_alloc_bitmaps(int slots, u64 pages_per_slot); 80 80 void memstress_free_bitmaps(unsigned long *bitmaps[], int slots); 81 81 82 82 #endif /* SELFTEST_KVM_MEMSTRESS_H */
+11 -11
tools/testing/selftests/kvm/include/riscv/arch_timer.h
··· 14 14 static unsigned long timer_freq; 15 15 16 16 #define msec_to_cycles(msec) \ 17 - ((timer_freq) * (uint64_t)(msec) / 1000) 17 + ((timer_freq) * (u64)(msec) / 1000) 18 18 19 19 #define usec_to_cycles(usec) \ 20 - ((timer_freq) * (uint64_t)(usec) / 1000000) 20 + ((timer_freq) * (u64)(usec) / 1000000) 21 21 22 22 #define cycles_to_usec(cycles) \ 23 - ((uint64_t)(cycles) * 1000000 / (timer_freq)) 23 + ((u64)(cycles) * 1000000 / (timer_freq)) 24 24 25 - static inline uint64_t timer_get_cycles(void) 25 + static inline u64 timer_get_cycles(void) 26 26 { 27 27 return csr_read(CSR_TIME); 28 28 } 29 29 30 - static inline void timer_set_cmp(uint64_t cval) 30 + static inline void timer_set_cmp(u64 cval) 31 31 { 32 32 csr_write(CSR_STIMECMP, cval); 33 33 } 34 34 35 - static inline uint64_t timer_get_cmp(void) 35 + static inline u64 timer_get_cmp(void) 36 36 { 37 37 return csr_read(CSR_STIMECMP); 38 38 } ··· 47 47 csr_clear(CSR_SIE, IE_TIE); 48 48 } 49 49 50 - static inline void timer_set_next_cmp_ms(uint32_t msec) 50 + static inline void timer_set_next_cmp_ms(u32 msec) 51 51 { 52 - uint64_t now_ct = timer_get_cycles(); 53 - uint64_t next_ct = now_ct + msec_to_cycles(msec); 52 + u64 now_ct = timer_get_cycles(); 53 + u64 next_ct = now_ct + msec_to_cycles(msec); 54 54 55 55 timer_set_cmp(next_ct); 56 56 } 57 57 58 - static inline void __delay(uint64_t cycles) 58 + static inline void __delay(u64 cycles) 59 59 { 60 - uint64_t start = timer_get_cycles(); 60 + u64 start = timer_get_cycles(); 61 61 62 62 while ((timer_get_cycles() - start) < cycles) 63 63 cpu_relax();
+4 -5
tools/testing/selftests/kvm/include/riscv/processor.h
··· 25 25 #define GET_RM(insn) (((insn) & INSN_MASK_FUNCT3) >> INSN_SHIFT_FUNCT3) 26 26 #define GET_CSR_NUM(insn) (((insn) & INSN_CSR_MASK) >> INSN_CSR_SHIFT) 27 27 28 - static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype, 29 - uint64_t idx, uint64_t size) 28 + static inline u64 __kvm_reg_id(u64 type, u64 subtype, u64 idx, u64 size) 30 29 { 31 30 return KVM_REG_RISCV | type | subtype | idx | size; 32 31 } ··· 61 62 KVM_REG_RISCV_SBI_SINGLE, \ 62 63 idx, KVM_REG_SIZE_ULONG) 63 64 64 - bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext); 65 + bool __vcpu_has_ext(struct kvm_vcpu *vcpu, u64 ext); 65 66 66 - static inline bool __vcpu_has_isa_ext(struct kvm_vcpu *vcpu, uint64_t isa_ext) 67 + static inline bool __vcpu_has_isa_ext(struct kvm_vcpu *vcpu, u64 isa_ext) 67 68 { 68 69 return __vcpu_has_ext(vcpu, RISCV_ISA_EXT_REG(isa_ext)); 69 70 } 70 71 71 - static inline bool __vcpu_has_sbi_ext(struct kvm_vcpu *vcpu, uint64_t sbi_ext) 72 + static inline bool __vcpu_has_sbi_ext(struct kvm_vcpu *vcpu, u64 sbi_ext) 72 73 { 73 74 return __vcpu_has_ext(vcpu, RISCV_SBI_EXT_REG(sbi_ext)); 74 75 }
+2 -2
tools/testing/selftests/kvm/include/riscv/ucall.h
··· 7 7 8 8 #define UCALL_EXIT_REASON KVM_EXIT_RISCV_SBI 9 9 10 - static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 10 + static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) 11 11 { 12 12 } 13 13 14 - static inline void ucall_arch_do_ucall(vm_vaddr_t uc) 14 + static inline void ucall_arch_do_ucall(gva_t uc) 15 15 { 16 16 sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT, 17 17 KVM_RISCV_SELFTESTS_SBI_UCALL,
+1 -1
tools/testing/selftests/kvm/include/s390/diag318_test_handler.h
··· 8 8 #ifndef SELFTEST_KVM_DIAG318_TEST_HANDLER 9 9 #define SELFTEST_KVM_DIAG318_TEST_HANDLER 10 10 11 - uint64_t get_diag318_info(void); 11 + u64 get_diag318_info(void); 12 12 13 13 #endif
+2 -2
tools/testing/selftests/kvm/include/s390/facility.h
··· 16 16 /* alt_stfle_fac_list[16] + stfle_fac_list[16] */ 17 17 #define NB_STFL_DOUBLEWORDS 32 18 18 19 - extern uint64_t stfl_doublewords[NB_STFL_DOUBLEWORDS]; 19 + extern u64 stfl_doublewords[NB_STFL_DOUBLEWORDS]; 20 20 extern bool stfle_flag; 21 21 22 22 static inline bool test_bit_inv(unsigned long nr, const unsigned long *ptr) ··· 24 24 return test_bit(nr ^ (BITS_PER_LONG - 1), ptr); 25 25 } 26 26 27 - static inline void stfle(uint64_t *fac, unsigned int nb_doublewords) 27 + static inline void stfle(u64 *fac, unsigned int nb_doublewords) 28 28 { 29 29 register unsigned long r0 asm("0") = nb_doublewords - 1; 30 30
+2 -2
tools/testing/selftests/kvm/include/s390/ucall.h
··· 6 6 7 7 #define UCALL_EXIT_REASON KVM_EXIT_S390_SIEIC 8 8 9 - static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 9 + static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) 10 10 { 11 11 } 12 12 13 - static inline void ucall_arch_do_ucall(vm_vaddr_t uc) 13 + static inline void ucall_arch_do_ucall(gva_t uc) 14 14 { 15 15 /* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */ 16 16 asm volatile ("diag 0,%0,0x501" : : "a"(uc) : "memory");
+3 -3
tools/testing/selftests/kvm/include/sparsebit.h
··· 6 6 * 7 7 * Header file that describes API to the sparsebit library. 8 8 * This library provides a memory efficient means of storing 9 - * the settings of bits indexed via a uint64_t. Memory usage 9 + * the settings of bits indexed via a u64. Memory usage 10 10 * is reasonable, significantly less than (2^64 / 8) bytes, as 11 11 * long as bits that are mostly set or mostly cleared are close 12 12 * to each other. This library is efficient in memory usage ··· 25 25 #endif 26 26 27 27 struct sparsebit; 28 - typedef uint64_t sparsebit_idx_t; 29 - typedef uint64_t sparsebit_num_t; 28 + typedef u64 sparsebit_idx_t; 29 + typedef u64 sparsebit_num_t; 30 30 31 31 struct sparsebit *sparsebit_alloc(void); 32 32 void sparsebit_free(struct sparsebit **sbitp);
+21 -19
tools/testing/selftests/kvm/include/test_util.h
··· 22 22 #include <sys/mman.h> 23 23 #include "kselftest.h" 24 24 25 + #include <linux/types.h> 26 + 25 27 #define msecs_to_usecs(msec) ((msec) * 1000ULL) 26 28 27 29 static inline __printf(1, 2) int _no_printf(const char *format, ...) { return 0; } ··· 101 99 102 100 size_t parse_size(const char *size); 103 101 104 - int64_t timespec_to_ns(struct timespec ts); 105 - struct timespec timespec_add_ns(struct timespec ts, int64_t ns); 102 + s64 timespec_to_ns(struct timespec ts); 103 + struct timespec timespec_add_ns(struct timespec ts, s64 ns); 106 104 struct timespec timespec_add(struct timespec ts1, struct timespec ts2); 107 105 struct timespec timespec_sub(struct timespec ts1, struct timespec ts2); 108 106 struct timespec timespec_elapsed(struct timespec start); 109 107 struct timespec timespec_div(struct timespec ts, int divisor); 110 108 111 109 struct guest_random_state { 112 - uint32_t seed; 110 + u32 seed; 113 111 }; 114 112 115 - extern uint32_t guest_random_seed; 113 + extern u32 guest_random_seed; 116 114 extern struct guest_random_state guest_rng; 117 115 118 - struct guest_random_state new_guest_random_state(uint32_t seed); 119 - uint32_t guest_random_u32(struct guest_random_state *state); 116 + struct guest_random_state new_guest_random_state(u32 seed); 117 + u32 guest_random_u32(struct guest_random_state *state); 120 118 121 119 static inline bool __guest_random_bool(struct guest_random_state *state, 122 - uint8_t percent) 120 + u8 percent) 123 121 { 124 122 return (guest_random_u32(state) % 100) < percent; 125 123 } ··· 129 127 return __guest_random_bool(state, 50); 130 128 } 131 129 132 - static inline uint64_t guest_random_u64(struct guest_random_state *state) 130 + static inline u64 guest_random_u64(struct guest_random_state *state) 133 131 { 134 - return ((uint64_t)guest_random_u32(state) << 32) | guest_random_u32(state); 132 + return ((u64)guest_random_u32(state) << 32) | guest_random_u32(state); 135 133 } 136 134 137 135 enum vm_mem_backing_src_type { ··· 160 158 161 159 struct vm_mem_backing_src_alias { 162 160 const char *name; 163 - uint32_t flag; 161 + u32 flag; 164 162 }; 165 163 166 164 #define MIN_RUN_DELAY_NS 200000UL ··· 168 166 bool thp_configured(void); 169 167 size_t get_trans_hugepagesz(void); 170 168 size_t get_def_hugetlb_pagesz(void); 171 - const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i); 172 - size_t get_backing_src_pagesz(uint32_t i); 173 - bool is_backing_src_hugetlb(uint32_t i); 169 + const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(u32 i); 170 + size_t get_backing_src_pagesz(u32 i); 171 + bool is_backing_src_hugetlb(u32 i); 174 172 void backing_src_help(const char *flag); 175 173 enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name); 176 174 long get_run_delay(void); ··· 191 189 } 192 190 193 191 /* Aligns x up to the next multiple of size. Size must be a power of 2. */ 194 - static inline uint64_t align_up(uint64_t x, uint64_t size) 192 + static inline u64 align_up(u64 x, u64 size) 195 193 { 196 - uint64_t mask = size - 1; 194 + u64 mask = size - 1; 197 195 198 196 TEST_ASSERT(size != 0 && !(size & (size - 1)), 199 197 "size not a power of 2: %lu", size); 200 198 return ((x + mask) & ~mask); 201 199 } 202 200 203 - static inline uint64_t align_down(uint64_t x, uint64_t size) 201 + static inline u64 align_down(u64 x, u64 size) 204 202 { 205 - uint64_t x_aligned_up = align_up(x, size); 203 + u64 x_aligned_up = align_up(x, size); 206 204 207 205 if (x == x_aligned_up) 208 206 return x; ··· 217 215 218 216 int atoi_paranoid(const char *num_str); 219 217 220 - static inline uint32_t atoi_positive(const char *name, const char *num_str) 218 + static inline u32 atoi_positive(const char *name, const char *num_str) 221 219 { 222 220 int num = atoi_paranoid(num_str); 223 221 ··· 225 223 return num; 226 224 } 227 225 228 - static inline uint32_t atoi_non_negative(const char *name, const char *num_str) 226 + static inline u32 atoi_non_negative(const char *name, const char *num_str) 229 227 { 230 228 int num = atoi_paranoid(num_str); 231 229
+9 -9
tools/testing/selftests/kvm/include/timer_test.h
··· 18 18 19 19 /* Timer test cmdline parameters */ 20 20 struct test_args { 21 - uint32_t nr_vcpus; 22 - uint32_t nr_iter; 23 - uint32_t timer_period_ms; 24 - uint32_t migration_freq_ms; 25 - uint32_t timer_err_margin_us; 21 + u32 nr_vcpus; 22 + u32 nr_iter; 23 + u32 timer_period_ms; 24 + u32 migration_freq_ms; 25 + u32 timer_err_margin_us; 26 26 /* Members of struct kvm_arm_counter_offset */ 27 - uint64_t counter_offset; 28 - uint64_t reserved; 27 + u64 counter_offset; 28 + u64 reserved; 29 29 }; 30 30 31 31 /* Shared variables between host and guest */ 32 32 struct test_vcpu_shared_data { 33 - uint32_t nr_iter; 33 + u32 nr_iter; 34 34 int guest_stage; 35 - uint64_t xcnt; 35 + u64 xcnt; 36 36 }; 37 37 38 38 extern struct test_args test_args;
+11 -11
tools/testing/selftests/kvm/include/ucall_common.h
··· 21 21 #define UCALL_BUFFER_LEN 1024 22 22 23 23 struct ucall { 24 - uint64_t cmd; 25 - uint64_t args[UCALL_MAX_ARGS]; 24 + u64 cmd; 25 + u64 args[UCALL_MAX_ARGS]; 26 26 char buffer[UCALL_BUFFER_LEN]; 27 27 28 28 /* Host virtual address of this struct. */ 29 29 struct ucall *hva; 30 30 }; 31 31 32 - void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa); 33 - void ucall_arch_do_ucall(vm_vaddr_t uc); 32 + void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa); 33 + void ucall_arch_do_ucall(gva_t uc); 34 34 void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu); 35 35 36 - void ucall(uint64_t cmd, int nargs, ...); 37 - __printf(2, 3) void ucall_fmt(uint64_t cmd, const char *fmt, ...); 38 - __printf(5, 6) void ucall_assert(uint64_t cmd, const char *exp, 36 + void ucall(u64 cmd, int nargs, ...); 37 + __printf(2, 3) void ucall_fmt(u64 cmd, const char *fmt, ...); 38 + __printf(5, 6) void ucall_assert(u64 cmd, const char *exp, 39 39 const char *file, unsigned int line, 40 40 const char *fmt, ...); 41 - uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc); 42 - void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa); 43 - int ucall_nr_pages_required(uint64_t page_size); 41 + u64 get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc); 42 + void ucall_init(struct kvm_vm *vm, gpa_t mmio_gpa); 43 + int ucall_nr_pages_required(u64 page_size); 44 44 45 45 /* 46 46 * Perform userspace call without any associated data. This bare call avoids ··· 48 48 * the full ucall() are problematic and/or unwanted. Note, this will come out 49 49 * as UCALL_NONE on the backend. 50 50 */ 51 - #define GUEST_UCALL_NONE() ucall_arch_do_ucall((vm_vaddr_t)NULL) 51 + #define GUEST_UCALL_NONE() ucall_arch_do_ucall((gva_t)NULL) 52 52 53 53 #define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \ 54 54 ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
+3 -3
tools/testing/selftests/kvm/include/userfaultfd_util.h
··· 25 25 26 26 struct uffd_desc { 27 27 int uffd; 28 - uint64_t num_readers; 28 + u64 num_readers; 29 29 /* Holds the write ends of the pipes for killing the readers. */ 30 30 int *pipefds; 31 31 pthread_t *readers; ··· 33 33 }; 34 34 35 35 struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay, 36 - void *hva, uint64_t len, 37 - uint64_t num_readers, 36 + void *hva, u64 len, 37 + u64 num_readers, 38 38 uffd_handler_t handler); 39 39 40 40 void uffd_stop_demand_paging(struct uffd_desc *uffd);
+11 -11
tools/testing/selftests/kvm/include/x86/apic.h
··· 79 79 void xapic_enable(void); 80 80 void x2apic_enable(void); 81 81 82 - static inline uint32_t get_bsp_flag(void) 82 + static inline u32 get_bsp_flag(void) 83 83 { 84 84 return rdmsr(MSR_IA32_APICBASE) & MSR_IA32_APICBASE_BSP; 85 85 } 86 86 87 - static inline uint32_t xapic_read_reg(unsigned int reg) 87 + static inline u32 xapic_read_reg(unsigned int reg) 88 88 { 89 - return ((volatile uint32_t *)APIC_DEFAULT_GPA)[reg >> 2]; 89 + return ((volatile u32 *)APIC_DEFAULT_GPA)[reg >> 2]; 90 90 } 91 91 92 - static inline void xapic_write_reg(unsigned int reg, uint32_t val) 92 + static inline void xapic_write_reg(unsigned int reg, u32 val) 93 93 { 94 - ((volatile uint32_t *)APIC_DEFAULT_GPA)[reg >> 2] = val; 94 + ((volatile u32 *)APIC_DEFAULT_GPA)[reg >> 2] = val; 95 95 } 96 96 97 - static inline uint64_t x2apic_read_reg(unsigned int reg) 97 + static inline u64 x2apic_read_reg(unsigned int reg) 98 98 { 99 99 return rdmsr(APIC_BASE_MSR + (reg >> 4)); 100 100 } 101 101 102 - static inline uint8_t x2apic_write_reg_safe(unsigned int reg, uint64_t value) 102 + static inline u8 x2apic_write_reg_safe(unsigned int reg, u64 value) 103 103 { 104 104 return wrmsr_safe(APIC_BASE_MSR + (reg >> 4), value); 105 105 } 106 106 107 - static inline void x2apic_write_reg(unsigned int reg, uint64_t value) 107 + static inline void x2apic_write_reg(unsigned int reg, u64 value) 108 108 { 109 - uint8_t fault = x2apic_write_reg_safe(reg, value); 109 + u8 fault = x2apic_write_reg_safe(reg, value); 110 110 111 111 __GUEST_ASSERT(!fault, "Unexpected fault 0x%x on WRMSR(%x) = %lx\n", 112 112 fault, APIC_BASE_MSR + (reg >> 4), value); 113 113 } 114 114 115 - static inline void x2apic_write_reg_fault(unsigned int reg, uint64_t value) 115 + static inline void x2apic_write_reg_fault(unsigned int reg, u64 value) 116 116 { 117 - uint8_t fault = x2apic_write_reg_safe(reg, value); 117 + u8 fault = x2apic_write_reg_safe(reg, value); 118 118 119 119 __GUEST_ASSERT(fault == GP_VECTOR, 120 120 "Wanted #GP on WRMSR(%x) = %lx, got 0x%x\n",
+11 -11
tools/testing/selftests/kvm/include/x86/evmcs.h
··· 10 10 #include "hyperv.h" 11 11 #include "vmx.h" 12 12 13 - #define u16 uint16_t 14 - #define u32 uint32_t 15 - #define u64 uint64_t 13 + #define u16 u16 14 + #define u32 u32 15 + #define u64 u64 16 16 17 17 #define EVMCS_VERSION 1 18 18 ··· 245 245 enable_evmcs = true; 246 246 } 247 247 248 - static inline int evmcs_vmptrld(uint64_t vmcs_pa, void *vmcs) 248 + static inline int evmcs_vmptrld(u64 vmcs_pa, void *vmcs) 249 249 { 250 250 current_vp_assist->current_nested_vmcs = vmcs_pa; 251 251 current_vp_assist->enlighten_vmentry = 1; ··· 265 265 return true; 266 266 } 267 267 268 - static inline int evmcs_vmptrst(uint64_t *value) 268 + static inline int evmcs_vmptrst(u64 *value) 269 269 { 270 270 *value = current_vp_assist->current_nested_vmcs & 271 271 ~HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; ··· 273 273 return 0; 274 274 } 275 275 276 - static inline int evmcs_vmread(uint64_t encoding, uint64_t *value) 276 + static inline int evmcs_vmread(u64 encoding, u64 *value) 277 277 { 278 278 switch (encoding) { 279 279 case GUEST_RIP: ··· 672 672 return 0; 673 673 } 674 674 675 - static inline int evmcs_vmwrite(uint64_t encoding, uint64_t value) 675 + static inline int evmcs_vmwrite(u64 encoding, u64 value) 676 676 { 677 677 switch (encoding) { 678 678 case GUEST_RIP: ··· 1226 1226 "pop %%rbp;" 1227 1227 : [ret]"=&a"(ret) 1228 1228 : [host_rsp]"r" 1229 - ((uint64_t)&current_evmcs->host_rsp), 1229 + ((u64)&current_evmcs->host_rsp), 1230 1230 [host_rip]"r" 1231 - ((uint64_t)&current_evmcs->host_rip) 1231 + ((u64)&current_evmcs->host_rip) 1232 1232 : "memory", "cc", "rbx", "r8", "r9", "r10", 1233 1233 "r11", "r12", "r13", "r14", "r15"); 1234 1234 return ret; ··· 1265 1265 "pop %%rbp;" 1266 1266 : [ret]"=&a"(ret) 1267 1267 : [host_rsp]"r" 1268 - ((uint64_t)&current_evmcs->host_rsp), 1268 + ((u64)&current_evmcs->host_rsp), 1269 1269 [host_rip]"r" 1270 - ((uint64_t)&current_evmcs->host_rip) 1270 + ((u64)&current_evmcs->host_rip) 1271 1271 : "memory", "cc", "rbx", "r8", "r9", "r10", 1272 1272 "r11", "r12", "r13", "r14", "r15"); 1273 1273 return ret;
+14 -14
tools/testing/selftests/kvm/include/x86/hyperv.h
··· 254 254 * Issue a Hyper-V hypercall. Returns exception vector raised or 0, 'hv_status' 255 255 * is set to the hypercall status (if no exception occurred). 256 256 */ 257 - static inline uint8_t __hyperv_hypercall(u64 control, vm_vaddr_t input_address, 258 - vm_vaddr_t output_address, 259 - uint64_t *hv_status) 257 + static inline u8 __hyperv_hypercall(u64 control, gva_t input_address, 258 + gva_t output_address, 259 + u64 *hv_status) 260 260 { 261 - uint64_t error_code; 262 - uint8_t vector; 261 + u64 error_code; 262 + u8 vector; 263 263 264 264 /* Note both the hypercall and the "asm safe" clobber r9-r11. */ 265 265 asm volatile("mov %[output_address], %%r8\n\t" ··· 274 274 } 275 275 276 276 /* Issue a Hyper-V hypercall and assert that it succeeded. */ 277 - static inline void hyperv_hypercall(u64 control, vm_vaddr_t input_address, 278 - vm_vaddr_t output_address) 277 + static inline void hyperv_hypercall(u64 control, gva_t input_address, 278 + gva_t output_address) 279 279 { 280 - uint64_t hv_status; 281 - uint8_t vector; 280 + u64 hv_status; 281 + u8 vector; 282 282 283 283 vector = __hyperv_hypercall(control, input_address, output_address, &hv_status); 284 284 ··· 327 327 328 328 extern struct hv_vp_assist_page *current_vp_assist; 329 329 330 - int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist); 330 + int enable_vp_assist(u64 vp_assist_pa, void *vp_assist); 331 331 332 332 struct hyperv_test_pages { 333 333 /* VP assist page */ 334 334 void *vp_assist_hva; 335 - uint64_t vp_assist_gpa; 335 + u64 vp_assist_gpa; 336 336 void *vp_assist; 337 337 338 338 /* Partition assist page */ 339 339 void *partition_assist_hva; 340 - uint64_t partition_assist_gpa; 340 + u64 partition_assist_gpa; 341 341 void *partition_assist; 342 342 343 343 /* Enlightened VMCS */ 344 344 void *enlightened_vmcs_hva; 345 - uint64_t enlightened_vmcs_gpa; 345 + u64 enlightened_vmcs_gpa; 346 346 void *enlightened_vmcs; 347 347 }; 348 348 349 349 struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm, 350 - vm_vaddr_t *p_hv_pages_gva); 350 + gva_t *p_hv_pages_gva); 351 351 352 352 /* HV_X64_MSR_TSC_INVARIANT_CONTROL bits */ 353 353 #define HV_INVARIANT_TSC_EXPOSED BIT_ULL(0)
+18 -18
tools/testing/selftests/kvm/include/x86/kvm_util_arch.h
··· 11 11 extern bool is_forced_emulation_enabled; 12 12 13 13 struct pte_masks { 14 - uint64_t present; 15 - uint64_t writable; 16 - uint64_t user; 17 - uint64_t readable; 18 - uint64_t executable; 19 - uint64_t accessed; 20 - uint64_t dirty; 21 - uint64_t huge; 22 - uint64_t nx; 23 - uint64_t c; 24 - uint64_t s; 14 + u64 present; 15 + u64 writable; 16 + u64 user; 17 + u64 readable; 18 + u64 executable; 19 + u64 accessed; 20 + u64 dirty; 21 + u64 huge; 22 + u64 nx; 23 + u64 c; 24 + u64 s; 25 25 26 - uint64_t always_set; 26 + u64 always_set; 27 27 }; 28 28 29 29 struct kvm_mmu_arch { ··· 33 33 struct kvm_mmu; 34 34 35 35 struct kvm_vm_arch { 36 - vm_vaddr_t gdt; 37 - vm_vaddr_t tss; 38 - vm_vaddr_t idt; 36 + gva_t gdt; 37 + gva_t tss; 38 + gva_t idt; 39 39 40 - uint64_t c_bit; 41 - uint64_t s_bit; 40 + u64 c_bit; 41 + u64 s_bit; 42 42 int sev_fd; 43 43 bool is_pt_protected; 44 44 }; ··· 62 62 : "+m" (mem) \ 63 63 : "r" (val) : "memory"); \ 64 64 } else { \ 65 - uint64_t __old = READ_ONCE(mem); \ 65 + u64 __old = READ_ONCE(mem); \ 66 66 \ 67 67 __asm__ __volatile__(KVM_FEP LOCK_PREFIX "cmpxchg %[new], %[ptr]" \ 68 68 : [ptr] "+m" (mem), [old] "+a" (__old) \
+5 -4
tools/testing/selftests/kvm/include/x86/pmu.h
··· 6 6 #define SELFTEST_KVM_PMU_H 7 7 8 8 #include <stdbool.h> 9 - #include <stdint.h> 10 9 10 + #include <linux/types.h> 11 11 #include <linux/bits.h> 12 12 13 13 #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300 ··· 104 104 NR_AMD_ZEN_EVENTS, 105 105 }; 106 106 107 - extern const uint64_t intel_pmu_arch_events[]; 108 - extern const uint64_t amd_pmu_zen_events[]; 107 + extern const u64 intel_pmu_arch_events[]; 108 + extern const u64 amd_pmu_zen_events[]; 109 109 110 110 enum pmu_errata { 111 111 INSTRUCTIONS_RETIRED_OVERCOUNT, 112 112 BRANCHES_RETIRED_OVERCOUNT, 113 113 }; 114 - extern uint64_t pmu_errata_mask; 114 + 115 + extern u64 pmu_errata_mask; 115 116 116 117 void kvm_init_pmu_errata(void); 117 118
+144 -148
tools/testing/selftests/kvm/include/x86/processor.h
··· 23 23 extern bool host_cpu_is_amd; 24 24 extern bool host_cpu_is_hygon; 25 25 extern bool host_cpu_is_amd_compatible; 26 - extern uint64_t guest_tsc_khz; 26 + extern u64 guest_tsc_khz; 27 27 28 28 #ifndef MAX_NR_CPUID_ENTRIES 29 29 #define MAX_NR_CPUID_ENTRIES 100 ··· 399 399 }; 400 400 401 401 struct desc64 { 402 - uint16_t limit0; 403 - uint16_t base0; 402 + u16 limit0; 403 + u16 base0; 404 404 unsigned base1:8, type:4, s:1, dpl:2, p:1; 405 405 unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8; 406 - uint32_t base3; 407 - uint32_t zero1; 406 + u32 base3; 407 + u32 zero1; 408 408 } __attribute__((packed)); 409 409 410 410 struct desc_ptr { 411 - uint16_t size; 412 - uint64_t address; 411 + u16 size; 412 + u64 address; 413 413 } __attribute__((packed)); 414 414 415 415 struct kvm_x86_state { ··· 427 427 struct kvm_msrs msrs; 428 428 }; 429 429 430 - static inline uint64_t get_desc64_base(const struct desc64 *desc) 430 + static inline u64 get_desc64_base(const struct desc64 *desc) 431 431 { 432 - return (uint64_t)desc->base3 << 32 | 433 - (uint64_t)desc->base2 << 24 | 434 - (uint64_t)desc->base1 << 16 | 435 - (uint64_t)desc->base0; 432 + return (u64)desc->base3 << 32 | 433 + (u64)desc->base2 << 24 | 434 + (u64)desc->base1 << 16 | 435 + (u64)desc->base0; 436 436 } 437 437 438 - static inline uint64_t rdtsc(void) 438 + static inline u64 rdtsc(void) 439 439 { 440 - uint32_t eax, edx; 441 - uint64_t tsc_val; 440 + u32 eax, edx; 441 + u64 tsc_val; 442 442 /* 443 443 * The lfence is to wait (on Intel CPUs) until all previous 444 444 * instructions have been executed. If software requires RDTSC to be ··· 446 446 * execute LFENCE immediately after RDTSC 447 447 */ 448 448 __asm__ __volatile__("lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx)); 449 - tsc_val = ((uint64_t)edx) << 32 | eax; 449 + tsc_val = ((u64)edx) << 32 | eax; 450 450 return tsc_val; 451 451 } 452 452 453 - static inline uint64_t rdtscp(uint32_t *aux) 453 + static inline u64 rdtscp(u32 *aux) 454 454 { 455 - uint32_t eax, edx; 455 + u32 eax, edx; 456 456 457 457 __asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux)); 458 - return ((uint64_t)edx) << 32 | eax; 458 + return ((u64)edx) << 32 | eax; 459 459 } 460 460 461 - static inline uint64_t rdmsr(uint32_t msr) 461 + static inline u64 rdmsr(u32 msr) 462 462 { 463 - uint32_t a, d; 463 + u32 a, d; 464 464 465 465 __asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory"); 466 466 467 - return a | ((uint64_t) d << 32); 467 + return a | ((u64)d << 32); 468 468 } 469 469 470 - static inline void wrmsr(uint32_t msr, uint64_t value) 470 + static inline void wrmsr(u32 msr, u64 value) 471 471 { 472 - uint32_t a = value; 473 - uint32_t d = value >> 32; 472 + u32 a = value; 473 + u32 d = value >> 32; 474 474 475 475 __asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory"); 476 476 } 477 477 478 478 479 - static inline uint16_t inw(uint16_t port) 479 + static inline u16 inw(u16 port) 480 480 { 481 - uint16_t tmp; 481 + u16 tmp; 482 482 483 483 __asm__ __volatile__("in %%dx, %%ax" 484 484 : /* output */ "=a" (tmp) ··· 487 487 return tmp; 488 488 } 489 489 490 - static inline uint16_t get_es(void) 490 + static inline u16 get_es(void) 491 491 { 492 - uint16_t es; 492 + u16 es; 493 493 494 494 __asm__ __volatile__("mov %%es, %[es]" 495 495 : /* output */ [es]"=rm"(es)); 496 496 return es; 497 497 } 498 498 499 - static inline uint16_t get_cs(void) 499 + static inline u16 get_cs(void) 500 500 { 501 - uint16_t cs; 501 + u16 cs; 502 502 503 503 __asm__ __volatile__("mov %%cs, %[cs]" 504 504 : /* output */ [cs]"=rm"(cs)); 505 505 return cs; 506 506 } 507 507 508 - static inline uint16_t get_ss(void) 508 + static inline u16 get_ss(void) 509 509 { 510 - uint16_t ss; 510 + u16 ss; 511 511 512 512 __asm__ __volatile__("mov %%ss, %[ss]" 513 513 : /* output */ [ss]"=rm"(ss)); 514 514 return ss; 515 515 } 516 516 517 - static inline uint16_t get_ds(void) 517 + static inline u16 get_ds(void) 518 518 { 519 - uint16_t ds; 519 + u16 ds; 520 520 521 521 __asm__ __volatile__("mov %%ds, %[ds]" 522 522 : /* output */ [ds]"=rm"(ds)); 523 523 return ds; 524 524 } 525 525 526 - static inline uint16_t get_fs(void) 526 + static inline u16 get_fs(void) 527 527 { 528 - uint16_t fs; 528 + u16 fs; 529 529 530 530 __asm__ __volatile__("mov %%fs, %[fs]" 531 531 : /* output */ [fs]"=rm"(fs)); 532 532 return fs; 533 533 } 534 534 535 - static inline uint16_t get_gs(void) 535 + static inline u16 get_gs(void) 536 536 { 537 - uint16_t gs; 537 + u16 gs; 538 538 539 539 __asm__ __volatile__("mov %%gs, %[gs]" 540 540 : /* output */ [gs]"=rm"(gs)); 541 541 return gs; 542 542 } 543 543 544 - static inline uint16_t get_tr(void) 544 + static inline u16 get_tr(void) 545 545 { 546 - uint16_t tr; 546 + u16 tr; 547 547 548 548 __asm__ __volatile__("str %[tr]" 549 549 : /* output */ [tr]"=rm"(tr)); 550 550 return tr; 551 551 } 552 552 553 - static inline uint64_t get_cr0(void) 553 + static inline u64 get_cr0(void) 554 554 { 555 - uint64_t cr0; 555 + u64 cr0; 556 556 557 557 __asm__ __volatile__("mov %%cr0, %[cr0]" 558 558 : /* output */ [cr0]"=r"(cr0)); 559 559 return cr0; 560 560 } 561 561 562 - static inline void set_cr0(uint64_t val) 562 + static inline void set_cr0(u64 val) 563 563 { 564 564 __asm__ __volatile__("mov %0, %%cr0" : : "r" (val) : "memory"); 565 565 } 566 566 567 - static inline uint64_t get_cr3(void) 567 + static inline u64 get_cr3(void) 568 568 { 569 - uint64_t cr3; 569 + u64 cr3; 570 570 571 571 __asm__ __volatile__("mov %%cr3, %[cr3]" 572 572 : /* output */ [cr3]"=r"(cr3)); 573 573 return cr3; 574 574 } 575 575 576 - static inline void set_cr3(uint64_t val) 576 + static inline void set_cr3(u64 val) 577 577 { 578 578 __asm__ __volatile__("mov %0, %%cr3" : : "r" (val) : "memory"); 579 579 } 580 580 581 - static inline uint64_t get_cr4(void) 581 + static inline u64 get_cr4(void) 582 582 { 583 - uint64_t cr4; 583 + u64 cr4; 584 584 585 585 __asm__ __volatile__("mov %%cr4, %[cr4]" 586 586 : /* output */ [cr4]"=r"(cr4)); 587 587 return cr4; 588 588 } 589 589 590 - static inline void set_cr4(uint64_t val) 590 + static inline void set_cr4(u64 val) 591 591 { 592 592 __asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory"); 593 593 } 594 594 595 - static inline uint64_t get_cr8(void) 595 + static inline u64 get_cr8(void) 596 596 { 597 - uint64_t cr8; 597 + u64 cr8; 598 598 599 599 __asm__ __volatile__("mov %%cr8, %[cr8]" : [cr8]"=r"(cr8)); 600 600 return cr8; 601 601 } 602 602 603 - static inline void set_cr8(uint64_t val) 603 + static inline void set_cr8(u64 val) 604 604 { 605 605 __asm__ __volatile__("mov %0, %%cr8" : : "r" (val) : "memory"); 606 606 } ··· 651 651 return idt; 652 652 } 653 653 654 - static inline void outl(uint16_t port, uint32_t value) 654 + static inline void outl(u16 port, u32 value) 655 655 { 656 656 __asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value)); 657 657 } 658 658 659 - static inline void __cpuid(uint32_t function, uint32_t index, 660 - uint32_t *eax, uint32_t *ebx, 661 - uint32_t *ecx, uint32_t *edx) 659 + static inline void __cpuid(u32 function, u32 index, 660 + u32 *eax, u32 *ebx, 661 + u32 *ecx, u32 *edx) 662 662 { 663 663 *eax = function; 664 664 *ecx = index; ··· 672 672 : "memory"); 673 673 } 674 674 675 - static inline void cpuid(uint32_t function, 676 - uint32_t *eax, uint32_t *ebx, 677 - uint32_t *ecx, uint32_t *edx) 675 + static inline void cpuid(u32 function, 676 + u32 *eax, u32 *ebx, 677 + u32 *ecx, u32 *edx) 678 678 { 679 679 return __cpuid(function, 0, eax, ebx, ecx, edx); 680 680 } 681 681 682 - static inline uint32_t this_cpu_fms(void) 682 + static inline u32 this_cpu_fms(void) 683 683 { 684 - uint32_t eax, ebx, ecx, edx; 684 + u32 eax, ebx, ecx, edx; 685 685 686 686 cpuid(1, &eax, &ebx, &ecx, &edx); 687 687 return eax; 688 688 } 689 689 690 - static inline uint32_t this_cpu_family(void) 690 + static inline u32 this_cpu_family(void) 691 691 { 692 692 return x86_family(this_cpu_fms()); 693 693 } 694 694 695 - static inline uint32_t this_cpu_model(void) 695 + static inline u32 this_cpu_model(void) 696 696 { 697 697 return x86_model(this_cpu_fms()); 698 698 } 699 699 700 700 static inline bool this_cpu_vendor_string_is(const char *vendor) 701 701 { 702 - const uint32_t *chunk = (const uint32_t *)vendor; 703 - uint32_t eax, ebx, ecx, edx; 702 + const u32 *chunk = (const u32 *)vendor; 703 + u32 eax, ebx, ecx, edx; 704 704 705 705 cpuid(0, &eax, &ebx, &ecx, &edx); 706 706 return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]); ··· 724 724 return this_cpu_vendor_string_is("HygonGenuine"); 725 725 } 726 726 727 - static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index, 728 - uint8_t reg, uint8_t lo, uint8_t hi) 727 + static inline u32 __this_cpu_has(u32 function, u32 index, u8 reg, u8 lo, u8 hi) 729 728 { 730 - uint32_t gprs[4]; 729 + u32 gprs[4]; 731 730 732 731 __cpuid(function, index, 733 732 &gprs[KVM_CPUID_EAX], &gprs[KVM_CPUID_EBX], ··· 741 742 feature.reg, feature.bit, feature.bit); 742 743 } 743 744 744 - static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property) 745 + static inline u32 this_cpu_property(struct kvm_x86_cpu_property property) 745 746 { 746 747 return __this_cpu_has(property.function, property.index, 747 748 property.reg, property.lo_bit, property.hi_bit); ··· 749 750 750 751 static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property) 751 752 { 752 - uint32_t max_leaf; 753 + u32 max_leaf; 753 754 754 755 switch (property.function & 0xc0000000) { 755 756 case 0: ··· 769 770 770 771 static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature) 771 772 { 772 - uint32_t nr_bits; 773 + u32 nr_bits; 773 774 774 775 if (feature.f.reg == KVM_CPUID_EBX) { 775 776 nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); ··· 781 782 return nr_bits > feature.f.bit || this_cpu_has(feature.f); 782 783 } 783 784 784 - static __always_inline uint64_t this_cpu_supported_xcr0(void) 785 + static __always_inline u64 this_cpu_supported_xcr0(void) 785 786 { 786 787 if (!this_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO)) 787 788 return 0; 788 789 789 790 return this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) | 790 - ((uint64_t)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); 791 + ((u64)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); 791 792 } 792 793 793 794 typedef u32 __attribute__((vector_size(16))) sse128_t; ··· 866 867 867 868 static inline void udelay(unsigned long usec) 868 869 { 869 - uint64_t start, now, cycles; 870 + u64 start, now, cycles; 870 871 871 872 GUEST_ASSERT(guest_tsc_khz); 872 873 cycles = guest_tsc_khz / 1000 * usec; ··· 897 898 898 899 const struct kvm_msr_list *kvm_get_msr_index_list(void); 899 900 const struct kvm_msr_list *kvm_get_feature_msr_index_list(void); 900 - bool kvm_msr_is_in_save_restore_list(uint32_t msr_index); 901 - uint64_t kvm_get_feature_msr(uint64_t msr_index); 901 + bool kvm_msr_is_in_save_restore_list(u32 msr_index); 902 + u64 kvm_get_feature_msr(u64 msr_index); 902 903 903 904 static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu, 904 905 struct kvm_msrs *msrs) ··· 953 954 } 954 955 955 956 const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, 956 - uint32_t function, uint32_t index); 957 + u32 function, u32 index); 957 958 const struct kvm_cpuid2 *kvm_get_supported_cpuid(void); 958 959 959 - static inline uint32_t kvm_cpu_fms(void) 960 + static inline u32 kvm_cpu_fms(void) 960 961 { 961 962 return get_cpuid_entry(kvm_get_supported_cpuid(), 0x1, 0)->eax; 962 963 } 963 964 964 - static inline uint32_t kvm_cpu_family(void) 965 + static inline u32 kvm_cpu_family(void) 965 966 { 966 967 return x86_family(kvm_cpu_fms()); 967 968 } 968 969 969 - static inline uint32_t kvm_cpu_model(void) 970 + static inline u32 kvm_cpu_model(void) 970 971 { 971 972 return x86_model(kvm_cpu_fms()); 972 973 } ··· 979 980 return kvm_cpuid_has(kvm_get_supported_cpuid(), feature); 980 981 } 981 982 982 - uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, 983 - struct kvm_x86_cpu_property property); 983 + u32 kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, 984 + struct kvm_x86_cpu_property property); 984 985 985 - static inline uint32_t kvm_cpu_property(struct kvm_x86_cpu_property property) 986 + static inline u32 kvm_cpu_property(struct kvm_x86_cpu_property property) 986 987 { 987 988 return kvm_cpuid_property(kvm_get_supported_cpuid(), property); 988 989 } 989 990 990 991 static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property) 991 992 { 992 - uint32_t max_leaf; 993 + u32 max_leaf; 993 994 994 995 switch (property.function & 0xc0000000) { 995 996 case 0: ··· 1009 1010 1010 1011 static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature) 1011 1012 { 1012 - uint32_t nr_bits; 1013 + u32 nr_bits; 1013 1014 1014 1015 if (feature.f.reg == KVM_CPUID_EBX) { 1015 1016 nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); ··· 1021 1022 return nr_bits > feature.f.bit || kvm_cpu_has(feature.f); 1022 1023 } 1023 1024 1024 - static __always_inline uint64_t kvm_cpu_supported_xcr0(void) 1025 + static __always_inline u64 kvm_cpu_supported_xcr0(void) 1025 1026 { 1026 1027 if (!kvm_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO)) 1027 1028 return 0; 1028 1029 1029 1030 return kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) | 1030 - ((uint64_t)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); 1031 + ((u64)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); 1031 1032 } 1032 1033 1033 1034 static inline size_t kvm_cpuid2_size(int nr_entries) ··· 1061 1062 } 1062 1063 1063 1064 static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu, 1064 - uint32_t function, 1065 - uint32_t index) 1065 + u32 function, 1066 + u32 index) 1066 1067 { 1067 1068 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first (or equivalent)"); 1068 1069 ··· 1073 1074 } 1074 1075 1075 1076 static inline struct kvm_cpuid_entry2 *vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu, 1076 - uint32_t function) 1077 + u32 function) 1077 1078 { 1078 1079 return __vcpu_get_cpuid_entry(vcpu, function, 0); 1079 1080 } ··· 1103 1104 1104 1105 void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu, 1105 1106 struct kvm_x86_cpu_property property, 1106 - uint32_t value); 1107 - void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr); 1107 + u32 value); 1108 + void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, u8 maxphyaddr); 1108 1109 1109 - void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function); 1110 + void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, u32 function); 1110 1111 1111 1112 static inline bool vcpu_cpuid_has(struct kvm_vcpu *vcpu, 1112 1113 struct kvm_x86_cpu_feature feature) ··· 1134 1135 vcpu_set_or_clear_cpuid_feature(vcpu, feature, false); 1135 1136 } 1136 1137 1137 - uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index); 1138 - int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value); 1138 + u64 vcpu_get_msr(struct kvm_vcpu *vcpu, u64 msr_index); 1139 + int _vcpu_set_msr(struct kvm_vcpu *vcpu, u64 msr_index, u64 msr_value); 1139 1140 1140 1141 /* 1141 1142 * Assert on an MSR access(es) and pretty print the MSR name when possible. ··· 1160 1161 * is changing, etc. This is NOT an exhaustive list! The intent is to filter 1161 1162 * out MSRs that are not durable _and_ that a selftest wants to write. 1162 1163 */ 1163 - static inline bool is_durable_msr(uint32_t msr) 1164 + static inline bool is_durable_msr(u32 msr) 1164 1165 { 1165 1166 return msr != MSR_IA32_TSC; 1166 1167 } 1167 1168 1168 1169 #define vcpu_set_msr(vcpu, msr, val) \ 1169 1170 do { \ 1170 - uint64_t r, v = val; \ 1171 + u64 r, v = val; \ 1171 1172 \ 1172 1173 TEST_ASSERT_MSR(_vcpu_set_msr(vcpu, msr, v) == 1, \ 1173 1174 "KVM_SET_MSRS failed on %s, value = 0x%lx", msr, #msr, v); \ ··· 1181 1182 void kvm_init_vm_address_properties(struct kvm_vm *vm); 1182 1183 1183 1184 struct ex_regs { 1184 - uint64_t rax, rcx, rdx, rbx; 1185 - uint64_t rbp, rsi, rdi; 1186 - uint64_t r8, r9, r10, r11; 1187 - uint64_t r12, r13, r14, r15; 1188 - uint64_t vector; 1189 - uint64_t error_code; 1190 - uint64_t rip; 1191 - uint64_t cs; 1192 - uint64_t rflags; 1185 + u64 rax, rcx, rdx, rbx; 1186 + u64 rbp, rsi, rdi; 1187 + u64 r8, r9, r10, r11; 1188 + u64 r12, r13, r14, r15; 1189 + u64 vector; 1190 + u64 error_code; 1191 + u64 rip; 1192 + u64 cs; 1193 + u64 rflags; 1193 1194 }; 1194 1195 1195 1196 struct idt_entry { 1196 - uint16_t offset0; 1197 - uint16_t selector; 1198 - uint16_t ist : 3; 1199 - uint16_t : 5; 1200 - uint16_t type : 4; 1201 - uint16_t : 1; 1202 - uint16_t dpl : 2; 1203 - uint16_t p : 1; 1204 - uint16_t offset1; 1205 - uint32_t offset2; uint32_t reserved; 1197 + u16 offset0; 1198 + u16 selector; 1199 + u16 ist : 3; 1200 + u16 : 5; 1201 + u16 type : 4; 1202 + u16 : 1; 1203 + u16 dpl : 2; 1204 + u16 p : 1; 1205 + u16 offset1; 1206 + u32 offset2; u32 reserved; 1206 1207 }; 1207 1208 1208 1209 void vm_install_exception_handler(struct kvm_vm *vm, int vector, ··· 1261 1262 1262 1263 #define kvm_asm_safe(insn, inputs...) \ 1263 1264 ({ \ 1264 - uint64_t ign_error_code; \ 1265 - uint8_t vector; \ 1265 + u64 ign_error_code; \ 1266 + u8 vector; \ 1266 1267 \ 1267 1268 asm volatile(KVM_ASM_SAFE(insn) \ 1268 1269 : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \ ··· 1273 1274 1274 1275 #define kvm_asm_safe_ec(insn, error_code, inputs...) \ 1275 1276 ({ \ 1276 - uint8_t vector; \ 1277 + u8 vector; \ 1277 1278 \ 1278 1279 asm volatile(KVM_ASM_SAFE(insn) \ 1279 1280 : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \ ··· 1284 1285 1285 1286 #define kvm_asm_safe_fep(insn, inputs...) \ 1286 1287 ({ \ 1287 - uint64_t ign_error_code; \ 1288 - uint8_t vector; \ 1288 + u64 ign_error_code; \ 1289 + u8 vector; \ 1289 1290 \ 1290 1291 asm volatile(KVM_ASM_SAFE_FEP(insn) \ 1291 1292 : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \ ··· 1296 1297 1297 1298 #define kvm_asm_safe_ec_fep(insn, error_code, inputs...) \ 1298 1299 ({ \ 1299 - uint8_t vector; \ 1300 + u8 vector; \ 1300 1301 \ 1301 1302 asm volatile(KVM_ASM_SAFE_FEP(insn) \ 1302 1303 : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \ ··· 1306 1307 }) 1307 1308 1308 1309 #define BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP) \ 1309 - static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val) \ 1310 + static inline u8 insn##_safe ##_fep(u32 idx, u64 *val) \ 1310 1311 { \ 1311 - uint64_t error_code; \ 1312 - uint8_t vector; \ 1313 - uint32_t a, d; \ 1312 + u64 error_code; \ 1313 + u8 vector; \ 1314 + u32 a, d; \ 1314 1315 \ 1315 1316 asm volatile(KVM_ASM_SAFE##_FEP(#insn) \ 1316 1317 : "=a"(a), "=d"(d), \ ··· 1318 1319 : "c"(idx) \ 1319 1320 : KVM_ASM_SAFE_CLOBBERS); \ 1320 1321 \ 1321 - *val = (uint64_t)a | ((uint64_t)d << 32); \ 1322 + *val = (u64)a | ((u64)d << 32); \ 1322 1323 return vector; \ 1323 1324 } 1324 1325 ··· 1334 1335 BUILD_READ_U64_SAFE_HELPERS(rdpmc) 1335 1336 BUILD_READ_U64_SAFE_HELPERS(xgetbv) 1336 1337 1337 - static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val) 1338 + static inline u8 wrmsr_safe(u32 msr, u64 val) 1338 1339 { 1339 1340 return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr)); 1340 1341 } 1341 1342 1342 - static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value) 1343 + static inline u8 xsetbv_safe(u32 index, u64 value) 1343 1344 { 1344 1345 u32 eax = value; 1345 1346 u32 edx = value >> 32; ··· 1394 1395 return !!get_kvm_amd_param_integer("lbrv"); 1395 1396 } 1396 1397 1397 - uint64_t *vm_get_pte(struct kvm_vm *vm, uint64_t vaddr); 1398 + u64 *vm_get_pte(struct kvm_vm *vm, gva_t gva); 1398 1399 1399 - uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, 1400 - uint64_t a3); 1401 - uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1); 1402 - void xen_hypercall(uint64_t nr, uint64_t a0, void *a1); 1400 + u64 kvm_hypercall(u64 nr, u64 a0, u64 a1, u64 a2, u64 a3); 1401 + u64 __xen_hypercall(u64 nr, u64 a0, void *a1); 1402 + void xen_hypercall(u64 nr, u64 a0, void *a1); 1403 1403 1404 - static inline uint64_t __kvm_hypercall_map_gpa_range(uint64_t gpa, 1405 - uint64_t size, uint64_t flags) 1404 + static inline u64 __kvm_hypercall_map_gpa_range(gpa_t gpa, u64 size, u64 flags) 1406 1405 { 1407 1406 return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0); 1408 1407 } 1409 1408 1410 - static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size, 1411 - uint64_t flags) 1409 + static inline void kvm_hypercall_map_gpa_range(gpa_t gpa, u64 size, u64 flags) 1412 1410 { 1413 - uint64_t ret = __kvm_hypercall_map_gpa_range(gpa, size, flags); 1411 + u64 ret = __kvm_hypercall_map_gpa_range(gpa, size, flags); 1414 1412 1415 1413 GUEST_ASSERT(!ret); 1416 1414 } ··· 1452 1456 asm volatile ("cli"); 1453 1457 } 1454 1458 1455 - void __vm_xsave_require_permission(uint64_t xfeature, const char *name); 1459 + void __vm_xsave_require_permission(u64 xfeature, const char *name); 1456 1460 1457 1461 #define vm_xsave_require_permission(xfeature) \ 1458 1462 __vm_xsave_require_permission(xfeature, #xfeature) ··· 1507 1511 void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels, 1508 1512 struct pte_masks *pte_masks); 1509 1513 1510 - void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr, 1511 - uint64_t paddr, int level); 1512 - void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 1513 - uint64_t nr_bytes, int level); 1514 + void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva, 1515 + gpa_t gpa, int level); 1516 + void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa, 1517 + u64 nr_bytes, int level); 1514 1518 1515 1519 void vm_enable_tdp(struct kvm_vm *vm); 1516 1520 bool kvm_cpu_has_tdp(void); 1517 - void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint64_t size); 1521 + void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size); 1518 1522 void tdp_identity_map_default_memslots(struct kvm_vm *vm); 1519 - void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size); 1520 - uint64_t *tdp_get_pte(struct kvm_vm *vm, uint64_t l2_gpa); 1523 + void tdp_identity_map_1g(struct kvm_vm *vm, u64 addr, u64 size); 1524 + u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa); 1521 1525 1522 1526 /* 1523 1527 * Basic CPU control in CR0
+10 -10
tools/testing/selftests/kvm/include/x86/sev.h
··· 46 46 return is_sev_es_vm(vm) || vm->type == KVM_X86_SEV_VM; 47 47 } 48 48 49 - void sev_vm_launch(struct kvm_vm *vm, uint32_t policy); 50 - void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement); 49 + void sev_vm_launch(struct kvm_vm *vm, u32 policy); 50 + void sev_vm_launch_measure(struct kvm_vm *vm, u8 *measurement); 51 51 void sev_vm_launch_finish(struct kvm_vm *vm); 52 - void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy); 52 + void snp_vm_launch_start(struct kvm_vm *vm, u64 policy); 53 53 void snp_vm_launch_update(struct kvm_vm *vm); 54 54 void snp_vm_launch_finish(struct kvm_vm *vm); 55 55 56 - struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code, 56 + struct kvm_vm *vm_sev_create_with_one_vcpu(u32 type, void *guest_code, 57 57 struct kvm_vcpu **cpu); 58 - void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement); 58 + void vm_sev_launch(struct kvm_vm *vm, u64 policy, u8 *measurement); 59 59 60 60 kvm_static_assert(SEV_RET_SUCCESS == 0); 61 61 ··· 85 85 unsigned long raw; \ 86 86 } sev_cmd = { .c = { \ 87 87 .id = (cmd), \ 88 - .data = (uint64_t)(arg), \ 88 + .data = (u64)(arg), \ 89 89 .sev_fd = (vm)->arch.sev_fd, \ 90 90 } }; \ 91 91 \ ··· 120 120 vm_ioctl(vm, KVM_MEMORY_ENCRYPT_REG_REGION, &range); 121 121 } 122 122 123 - static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, 124 - uint64_t size) 123 + static inline void sev_launch_update_data(struct kvm_vm *vm, gpa_t gpa, 124 + u64 size) 125 125 { 126 126 struct kvm_sev_launch_update_data update_data = { 127 127 .uaddr = (unsigned long)addr_gpa2hva(vm, gpa), ··· 131 131 vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_DATA, &update_data); 132 132 } 133 133 134 - static inline void snp_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, 135 - uint64_t hva, uint64_t size, uint8_t type) 134 + static inline void snp_launch_update_data(struct kvm_vm *vm, gpa_t gpa, 135 + u64 hva, u64 size, u8 type) 136 136 { 137 137 struct kvm_sev_snp_launch_update update_data = { 138 138 .uaddr = hva,
+1 -2
tools/testing/selftests/kvm/include/x86/smm.h
··· 8 8 #define SMRAM_MEMSLOT ((1 << 16) | 1) 9 9 #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE) 10 10 11 - void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, 12 - uint64_t smram_gpa, 11 + void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, u64 smram_gpa, 13 12 const void *smi_handler, size_t handler_size); 14 13 15 14 void inject_smi(struct kvm_vcpu *vcpu);
+6 -6
tools/testing/selftests/kvm/include/x86/svm_util.h
··· 16 16 /* VMCB */ 17 17 struct vmcb *vmcb; /* gva */ 18 18 void *vmcb_hva; 19 - uint64_t vmcb_gpa; 19 + u64 vmcb_gpa; 20 20 21 21 /* host state-save area */ 22 22 struct vmcb_save_area *save_area; /* gva */ 23 23 void *save_area_hva; 24 - uint64_t save_area_gpa; 24 + u64 save_area_gpa; 25 25 26 26 /* MSR-Bitmap */ 27 27 void *msr; /* gva */ 28 28 void *msr_hva; 29 - uint64_t msr_gpa; 29 + u64 msr_gpa; 30 30 31 31 /* NPT */ 32 - uint64_t ncr3_gpa; 32 + u64 ncr3_gpa; 33 33 }; 34 34 35 35 static inline void vmmcall(void) ··· 56 56 "clgi\n" \ 57 57 ) 58 58 59 - struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva); 59 + struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, gva_t *p_svm_gva); 60 60 void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp); 61 - void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa); 61 + void run_guest(struct vmcb *vmcb, u64 vmcb_gpa); 62 62 63 63 static inline bool kvm_cpu_has_npt(void) 64 64 {
+1 -1
tools/testing/selftests/kvm/include/x86/ucall.h
··· 6 6 7 7 #define UCALL_EXIT_REASON KVM_EXIT_IO 8 8 9 - static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 9 + static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) 10 10 { 11 11 } 12 12
+35 -35
tools/testing/selftests/kvm/include/x86/vmx.h
··· 285 285 }; 286 286 287 287 struct vmx_msr_entry { 288 - uint32_t index; 289 - uint32_t reserved; 290 - uint64_t value; 288 + u32 index; 289 + u32 reserved; 290 + u64 value; 291 291 } __attribute__ ((aligned(16))); 292 292 293 293 #include "evmcs.h" 294 294 295 - static inline int vmxon(uint64_t phys) 295 + static inline int vmxon(u64 phys) 296 296 { 297 - uint8_t ret; 297 + u8 ret; 298 298 299 299 __asm__ __volatile__ ("vmxon %[pa]; setna %[ret]" 300 300 : [ret]"=rm"(ret) ··· 309 309 __asm__ __volatile__("vmxoff"); 310 310 } 311 311 312 - static inline int vmclear(uint64_t vmcs_pa) 312 + static inline int vmclear(u64 vmcs_pa) 313 313 { 314 - uint8_t ret; 314 + u8 ret; 315 315 316 316 __asm__ __volatile__ ("vmclear %[pa]; setna %[ret]" 317 317 : [ret]"=rm"(ret) ··· 321 321 return ret; 322 322 } 323 323 324 - static inline int vmptrld(uint64_t vmcs_pa) 324 + static inline int vmptrld(u64 vmcs_pa) 325 325 { 326 - uint8_t ret; 326 + u8 ret; 327 327 328 328 if (enable_evmcs) 329 329 return -1; ··· 336 336 return ret; 337 337 } 338 338 339 - static inline int vmptrst(uint64_t *value) 339 + static inline int vmptrst(u64 *value) 340 340 { 341 - uint64_t tmp; 342 - uint8_t ret; 341 + u64 tmp; 342 + u8 ret; 343 343 344 344 if (enable_evmcs) 345 345 return evmcs_vmptrst(value); ··· 356 356 * A wrapper around vmptrst that ignores errors and returns zero if the 357 357 * vmptrst instruction fails. 358 358 */ 359 - static inline uint64_t vmptrstz(void) 359 + static inline u64 vmptrstz(void) 360 360 { 361 - uint64_t value = 0; 361 + u64 value = 0; 362 362 vmptrst(&value); 363 363 return value; 364 364 } ··· 391 391 "pop %%rcx;" 392 392 "pop %%rbp;" 393 393 : [ret]"=&a"(ret) 394 - : [host_rsp]"r"((uint64_t)HOST_RSP), 395 - [host_rip]"r"((uint64_t)HOST_RIP) 394 + : [host_rsp]"r"((u64)HOST_RSP), 395 + [host_rip]"r"((u64)HOST_RIP) 396 396 : "memory", "cc", "rbx", "r8", "r9", "r10", 397 397 "r11", "r12", "r13", "r14", "r15"); 398 398 return ret; ··· 426 426 "pop %%rcx;" 427 427 "pop %%rbp;" 428 428 : [ret]"=&a"(ret) 429 - : [host_rsp]"r"((uint64_t)HOST_RSP), 430 - [host_rip]"r"((uint64_t)HOST_RIP) 429 + : [host_rsp]"r"((u64)HOST_RSP), 430 + [host_rip]"r"((u64)HOST_RIP) 431 431 : "memory", "cc", "rbx", "r8", "r9", "r10", 432 432 "r11", "r12", "r13", "r14", "r15"); 433 433 return ret; ··· 447 447 "r10", "r11", "r12", "r13", "r14", "r15"); 448 448 } 449 449 450 - static inline int vmread(uint64_t encoding, uint64_t *value) 450 + static inline int vmread(u64 encoding, u64 *value) 451 451 { 452 - uint64_t tmp; 453 - uint8_t ret; 452 + u64 tmp; 453 + u8 ret; 454 454 455 455 if (enable_evmcs) 456 456 return evmcs_vmread(encoding, value); ··· 468 468 * A wrapper around vmread that ignores errors and returns zero if the 469 469 * vmread instruction fails. 470 470 */ 471 - static inline uint64_t vmreadz(uint64_t encoding) 471 + static inline u64 vmreadz(u64 encoding) 472 472 { 473 - uint64_t value = 0; 473 + u64 value = 0; 474 474 vmread(encoding, &value); 475 475 return value; 476 476 } 477 477 478 - static inline int vmwrite(uint64_t encoding, uint64_t value) 478 + static inline int vmwrite(u64 encoding, u64 value) 479 479 { 480 - uint8_t ret; 480 + u8 ret; 481 481 482 482 if (enable_evmcs) 483 483 return evmcs_vmwrite(encoding, value); ··· 490 490 return ret; 491 491 } 492 492 493 - static inline uint32_t vmcs_revision(void) 493 + static inline u32 vmcs_revision(void) 494 494 { 495 495 return rdmsr(MSR_IA32_VMX_BASIC); 496 496 } 497 497 498 498 struct vmx_pages { 499 499 void *vmxon_hva; 500 - uint64_t vmxon_gpa; 500 + u64 vmxon_gpa; 501 501 void *vmxon; 502 502 503 503 void *vmcs_hva; 504 - uint64_t vmcs_gpa; 504 + u64 vmcs_gpa; 505 505 void *vmcs; 506 506 507 507 void *msr_hva; 508 - uint64_t msr_gpa; 508 + u64 msr_gpa; 509 509 void *msr; 510 510 511 511 void *shadow_vmcs_hva; 512 - uint64_t shadow_vmcs_gpa; 512 + u64 shadow_vmcs_gpa; 513 513 void *shadow_vmcs; 514 514 515 515 void *vmread_hva; 516 - uint64_t vmread_gpa; 516 + u64 vmread_gpa; 517 517 void *vmread; 518 518 519 519 void *vmwrite_hva; 520 - uint64_t vmwrite_gpa; 520 + u64 vmwrite_gpa; 521 521 void *vmwrite; 522 522 523 523 void *apic_access_hva; 524 - uint64_t apic_access_gpa; 524 + u64 apic_access_gpa; 525 525 void *apic_access; 526 526 527 - uint64_t eptp_gpa; 527 + u64 eptp_gpa; 528 528 }; 529 529 530 530 union vmx_basic { ··· 550 550 }; 551 551 }; 552 552 553 - struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva); 553 + struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, gva_t *p_vmx_gva); 554 554 bool prepare_for_vmx_operation(struct vmx_pages *vmx); 555 555 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); 556 556 bool load_vmcs(struct vmx_pages *vmx);
+27 -27
tools/testing/selftests/kvm/kvm_page_table_test.c
··· 46 46 47 47 struct test_args { 48 48 struct kvm_vm *vm; 49 - uint64_t guest_test_virt_mem; 50 - uint64_t host_page_size; 51 - uint64_t host_num_pages; 52 - uint64_t large_page_size; 53 - uint64_t large_num_pages; 54 - uint64_t host_pages_per_lpage; 49 + u64 guest_test_virt_mem; 50 + u64 host_page_size; 51 + u64 host_num_pages; 52 + u64 large_page_size; 53 + u64 large_num_pages; 54 + u64 host_pages_per_lpage; 55 55 enum vm_mem_backing_src_type src_type; 56 56 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 57 57 }; ··· 63 63 static enum test_stage guest_test_stage; 64 64 65 65 /* Host variables */ 66 - static uint32_t nr_vcpus = 1; 66 + static u32 nr_vcpus = 1; 67 67 static struct test_args test_args; 68 68 static enum test_stage *current_stage; 69 69 static bool host_quit; ··· 77 77 * This will be set to the topmost valid physical address minus 78 78 * the test memory size. 79 79 */ 80 - static uint64_t guest_test_phys_mem; 80 + static u64 guest_test_phys_mem; 81 81 82 82 /* 83 83 * Guest virtual memory offset of the testing memory slot. 84 84 * Must not conflict with identity mapped test code. 85 85 */ 86 - static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; 86 + static u64 guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; 87 87 88 88 static void guest_code(bool do_write) 89 89 { 90 90 struct test_args *p = &test_args; 91 91 enum test_stage *current_stage = &guest_test_stage; 92 - uint64_t addr; 92 + u64 addr; 93 93 int i, j; 94 94 95 95 while (true) { ··· 113 113 case KVM_CREATE_MAPPINGS: 114 114 for (i = 0; i < p->large_num_pages; i++) { 115 115 if (do_write) 116 - *(uint64_t *)addr = 0x0123456789ABCDEF; 116 + *(u64 *)addr = 0x0123456789ABCDEF; 117 117 else 118 - READ_ONCE(*(uint64_t *)addr); 118 + READ_ONCE(*(u64 *)addr); 119 119 120 120 addr += p->large_page_size; 121 121 } ··· 131 131 case KVM_UPDATE_MAPPINGS: 132 132 if (p->src_type == VM_MEM_SRC_ANONYMOUS) { 133 133 for (i = 0; i < p->host_num_pages; i++) { 134 - *(uint64_t *)addr = 0x0123456789ABCDEF; 134 + *(u64 *)addr = 0x0123456789ABCDEF; 135 135 addr += p->host_page_size; 136 136 } 137 137 break; ··· 142 142 * Write to the first host page in each large 143 143 * page region, and triger break of large pages. 144 144 */ 145 - *(uint64_t *)addr = 0x0123456789ABCDEF; 145 + *(u64 *)addr = 0x0123456789ABCDEF; 146 146 147 147 /* 148 148 * Access the middle host pages in each large ··· 152 152 */ 153 153 addr += p->large_page_size / 2; 154 154 for (j = 0; j < p->host_pages_per_lpage / 2; j++) { 155 - READ_ONCE(*(uint64_t *)addr); 155 + READ_ONCE(*(u64 *)addr); 156 156 addr += p->host_page_size; 157 157 } 158 158 } ··· 167 167 */ 168 168 case KVM_ADJUST_MAPPINGS: 169 169 for (i = 0; i < p->host_num_pages; i++) { 170 - READ_ONCE(*(uint64_t *)addr); 170 + READ_ONCE(*(u64 *)addr); 171 171 addr += p->host_page_size; 172 172 } 173 173 break; ··· 227 227 } 228 228 229 229 struct test_params { 230 - uint64_t phys_offset; 231 - uint64_t test_mem_size; 230 + u64 phys_offset; 231 + u64 test_mem_size; 232 232 enum vm_mem_backing_src_type src_type; 233 233 }; 234 234 ··· 237 237 int ret; 238 238 struct test_params *p = arg; 239 239 enum vm_mem_backing_src_type src_type = p->src_type; 240 - uint64_t large_page_size = get_backing_src_pagesz(src_type); 241 - uint64_t guest_page_size = vm_guest_mode_params[mode].page_size; 242 - uint64_t host_page_size = getpagesize(); 243 - uint64_t test_mem_size = p->test_mem_size; 244 - uint64_t guest_num_pages; 245 - uint64_t alignment; 240 + u64 large_page_size = get_backing_src_pagesz(src_type); 241 + u64 guest_page_size = vm_guest_mode_params[mode].page_size; 242 + u64 host_page_size = getpagesize(); 243 + u64 test_mem_size = p->test_mem_size; 244 + u64 guest_num_pages; 245 + u64 alignment; 246 246 void *host_test_mem; 247 247 struct kvm_vm *vm; 248 248 ··· 281 281 virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages); 282 282 283 283 /* Cache the HVA pointer of the region */ 284 - host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem); 284 + host_test_mem = addr_gpa2hva(vm, (gpa_t)guest_test_phys_mem); 285 285 286 286 /* Export shared structure test_args to guest */ 287 287 sync_global_to_guest(vm, test_args); ··· 292 292 ret = sem_init(&test_stage_completed, 0, 0); 293 293 TEST_ASSERT(ret == 0, "Error in sem_init"); 294 294 295 - current_stage = addr_gva2hva(vm, (vm_vaddr_t)(&guest_test_stage)); 295 + current_stage = addr_gva2hva(vm, (gva_t)(&guest_test_stage)); 296 296 *current_stage = NUM_TEST_STAGES; 297 297 298 298 pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode)); ··· 304 304 pr_info("Guest physical test memory offset: 0x%lx\n", 305 305 guest_test_phys_mem); 306 306 pr_info("Host virtual test memory offset: 0x%lx\n", 307 - (uint64_t)host_test_mem); 307 + (u64)host_test_mem); 308 308 pr_info("Number of testing vCPUs: %d\n", nr_vcpus); 309 309 310 310 return vm;
+3 -3
tools/testing/selftests/kvm/lib/arm64/gic.c
··· 50 50 51 51 void gic_init(enum gic_type type, unsigned int nr_cpus) 52 52 { 53 - uint32_t cpu = guest_get_vcpuid(); 53 + u32 cpu = guest_get_vcpuid(); 54 54 55 55 GUEST_ASSERT(type < GIC_TYPE_MAX); 56 56 GUEST_ASSERT(nr_cpus); ··· 73 73 74 74 unsigned int gic_get_and_ack_irq(void) 75 75 { 76 - uint64_t irqstat; 76 + u64 irqstat; 77 77 unsigned int intid; 78 78 79 79 GUEST_ASSERT(gic_common_ops); ··· 102 102 gic_common_ops->gic_set_eoi_split(split); 103 103 } 104 104 105 - void gic_set_priority_mask(uint64_t pmr) 105 + void gic_set_priority_mask(u64 pmr) 106 106 { 107 107 GUEST_ASSERT(gic_common_ops); 108 108 gic_common_ops->gic_set_priority_mask(pmr);
+13 -13
tools/testing/selftests/kvm/lib/arm64/gic_private.h
··· 12 12 void (*gic_cpu_init)(unsigned int cpu); 13 13 void (*gic_irq_enable)(unsigned int intid); 14 14 void (*gic_irq_disable)(unsigned int intid); 15 - uint64_t (*gic_read_iar)(void); 16 - void (*gic_write_eoir)(uint32_t irq); 17 - void (*gic_write_dir)(uint32_t irq); 15 + u64 (*gic_read_iar)(void); 16 + void (*gic_write_eoir)(u32 irq); 17 + void (*gic_write_dir)(u32 irq); 18 18 void (*gic_set_eoi_split)(bool split); 19 - void (*gic_set_priority_mask)(uint64_t mask); 20 - void (*gic_set_priority)(uint32_t intid, uint32_t prio); 21 - void (*gic_irq_set_active)(uint32_t intid); 22 - void (*gic_irq_clear_active)(uint32_t intid); 23 - bool (*gic_irq_get_active)(uint32_t intid); 24 - void (*gic_irq_set_pending)(uint32_t intid); 25 - void (*gic_irq_clear_pending)(uint32_t intid); 26 - bool (*gic_irq_get_pending)(uint32_t intid); 27 - void (*gic_irq_set_config)(uint32_t intid, bool is_edge); 28 - void (*gic_irq_set_group)(uint32_t intid, bool group); 19 + void (*gic_set_priority_mask)(u64 mask); 20 + void (*gic_set_priority)(u32 intid, u32 prio); 21 + void (*gic_irq_set_active)(u32 intid); 22 + void (*gic_irq_clear_active)(u32 intid); 23 + bool (*gic_irq_get_active)(u32 intid); 24 + void (*gic_irq_set_pending)(u32 intid); 25 + void (*gic_irq_clear_pending)(u32 intid); 26 + bool (*gic_irq_get_pending)(u32 intid); 27 + void (*gic_irq_set_config)(u32 intid, bool is_edge); 28 + void (*gic_irq_set_group)(u32 intid, bool group); 29 29 }; 30 30 31 31 extern const struct gic_common_ops gicv3_ops;
+45 -45
tools/testing/selftests/kvm/lib/arm64/gic_v3.c
··· 50 50 } 51 51 } 52 52 53 - static inline volatile void *gicr_base_cpu(uint32_t cpu) 53 + static inline volatile void *gicr_base_cpu(u32 cpu) 54 54 { 55 55 /* Align all the redistributors sequentially */ 56 56 return GICR_BASE_GVA + cpu * SZ_64K * 2; 57 57 } 58 58 59 - static void gicv3_gicr_wait_for_rwp(uint32_t cpu) 59 + static void gicv3_gicr_wait_for_rwp(u32 cpu) 60 60 { 61 61 unsigned int count = 100000; /* 1s */ 62 62 ··· 66 66 } 67 67 } 68 68 69 - static void gicv3_wait_for_rwp(uint32_t cpu_or_dist) 69 + static void gicv3_wait_for_rwp(u32 cpu_or_dist) 70 70 { 71 71 if (cpu_or_dist & DIST_BIT) 72 72 gicv3_gicd_wait_for_rwp(); ··· 91 91 return INVALID_RANGE; 92 92 } 93 93 94 - static uint64_t gicv3_read_iar(void) 94 + static u64 gicv3_read_iar(void) 95 95 { 96 - uint64_t irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1); 96 + u64 irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1); 97 97 98 98 dsb(sy); 99 99 return irqstat; 100 100 } 101 101 102 - static void gicv3_write_eoir(uint32_t irq) 102 + static void gicv3_write_eoir(u32 irq) 103 103 { 104 104 write_sysreg_s(irq, SYS_ICC_EOIR1_EL1); 105 105 isb(); 106 106 } 107 107 108 - static void gicv3_write_dir(uint32_t irq) 108 + static void gicv3_write_dir(u32 irq) 109 109 { 110 110 write_sysreg_s(irq, SYS_ICC_DIR_EL1); 111 111 isb(); 112 112 } 113 113 114 - static void gicv3_set_priority_mask(uint64_t mask) 114 + static void gicv3_set_priority_mask(u64 mask) 115 115 { 116 116 write_sysreg_s(mask, SYS_ICC_PMR_EL1); 117 117 } 118 118 119 119 static void gicv3_set_eoi_split(bool split) 120 120 { 121 - uint32_t val; 121 + u32 val; 122 122 123 123 /* 124 124 * All other fields are read-only, so no need to read CTLR first. In ··· 129 129 isb(); 130 130 } 131 131 132 - uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset) 132 + u32 gicv3_reg_readl(u32 cpu_or_dist, u64 offset) 133 133 { 134 134 volatile void *base = cpu_or_dist & DIST_BIT ? GICD_BASE_GVA 135 135 : sgi_base_from_redist(gicr_base_cpu(cpu_or_dist)); 136 136 return readl(base + offset); 137 137 } 138 138 139 - void gicv3_reg_writel(uint32_t cpu_or_dist, uint64_t offset, uint32_t reg_val) 139 + void gicv3_reg_writel(u32 cpu_or_dist, u64 offset, u32 reg_val) 140 140 { 141 141 volatile void *base = cpu_or_dist & DIST_BIT ? GICD_BASE_GVA 142 142 : sgi_base_from_redist(gicr_base_cpu(cpu_or_dist)); 143 143 writel(reg_val, base + offset); 144 144 } 145 145 146 - uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, uint64_t offset, uint32_t mask) 146 + u32 gicv3_getl_fields(u32 cpu_or_dist, u64 offset, u32 mask) 147 147 { 148 148 return gicv3_reg_readl(cpu_or_dist, offset) & mask; 149 149 } 150 150 151 - void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset, 152 - uint32_t mask, uint32_t reg_val) 151 + void gicv3_setl_fields(u32 cpu_or_dist, u64 offset, 152 + u32 mask, u32 reg_val) 153 153 { 154 - uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask; 154 + u32 tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask; 155 155 156 156 tmp |= (reg_val & mask); 157 157 gicv3_reg_writel(cpu_or_dist, offset, tmp); ··· 165 165 * map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being 166 166 * marked as "Reserved" in the Distributor map. 167 167 */ 168 - static void gicv3_access_reg(uint32_t intid, uint64_t offset, 169 - uint32_t reg_bits, uint32_t bits_per_field, 170 - bool write, uint32_t *val) 168 + static void gicv3_access_reg(u32 intid, u64 offset, 169 + u32 reg_bits, u32 bits_per_field, 170 + bool write, u32 *val) 171 171 { 172 - uint32_t cpu = guest_get_vcpuid(); 172 + u32 cpu = guest_get_vcpuid(); 173 173 enum gicv3_intid_range intid_range = get_intid_range(intid); 174 - uint32_t fields_per_reg, index, mask, shift; 175 - uint32_t cpu_or_dist; 174 + u32 fields_per_reg, index, mask, shift; 175 + u32 cpu_or_dist; 176 176 177 177 GUEST_ASSERT(bits_per_field <= reg_bits); 178 178 GUEST_ASSERT(!write || *val < (1U << bits_per_field)); ··· 197 197 *val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift; 198 198 } 199 199 200 - static void gicv3_write_reg(uint32_t intid, uint64_t offset, 201 - uint32_t reg_bits, uint32_t bits_per_field, uint32_t val) 200 + static void gicv3_write_reg(u32 intid, u64 offset, 201 + u32 reg_bits, u32 bits_per_field, u32 val) 202 202 { 203 203 gicv3_access_reg(intid, offset, reg_bits, 204 204 bits_per_field, true, &val); 205 205 } 206 206 207 - static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset, 208 - uint32_t reg_bits, uint32_t bits_per_field) 207 + static u32 gicv3_read_reg(u32 intid, u64 offset, 208 + u32 reg_bits, u32 bits_per_field) 209 209 { 210 - uint32_t val; 210 + u32 val; 211 211 212 212 gicv3_access_reg(intid, offset, reg_bits, 213 213 bits_per_field, false, &val); 214 214 return val; 215 215 } 216 216 217 - static void gicv3_set_priority(uint32_t intid, uint32_t prio) 217 + static void gicv3_set_priority(u32 intid, u32 prio) 218 218 { 219 219 gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio); 220 220 } 221 221 222 222 /* Sets the intid to be level-sensitive or edge-triggered. */ 223 - static void gicv3_irq_set_config(uint32_t intid, bool is_edge) 223 + static void gicv3_irq_set_config(u32 intid, bool is_edge) 224 224 { 225 - uint32_t val; 225 + u32 val; 226 226 227 227 /* N/A for private interrupts. */ 228 228 GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE); ··· 230 230 gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val); 231 231 } 232 232 233 - static void gicv3_irq_enable(uint32_t intid) 233 + static void gicv3_irq_enable(u32 intid) 234 234 { 235 235 bool is_spi = get_intid_range(intid) == SPI_RANGE; 236 - uint32_t cpu = guest_get_vcpuid(); 236 + u32 cpu = guest_get_vcpuid(); 237 237 238 238 gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1); 239 239 gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu); 240 240 } 241 241 242 - static void gicv3_irq_disable(uint32_t intid) 242 + static void gicv3_irq_disable(u32 intid) 243 243 { 244 244 bool is_spi = get_intid_range(intid) == SPI_RANGE; 245 - uint32_t cpu = guest_get_vcpuid(); 245 + u32 cpu = guest_get_vcpuid(); 246 246 247 247 gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1); 248 248 gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu); 249 249 } 250 250 251 - static void gicv3_irq_set_active(uint32_t intid) 251 + static void gicv3_irq_set_active(u32 intid) 252 252 { 253 253 gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1); 254 254 } 255 255 256 - static void gicv3_irq_clear_active(uint32_t intid) 256 + static void gicv3_irq_clear_active(u32 intid) 257 257 { 258 258 gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1); 259 259 } 260 260 261 - static bool gicv3_irq_get_active(uint32_t intid) 261 + static bool gicv3_irq_get_active(u32 intid) 262 262 { 263 263 return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1); 264 264 } 265 265 266 - static void gicv3_irq_set_pending(uint32_t intid) 266 + static void gicv3_irq_set_pending(u32 intid) 267 267 { 268 268 gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1); 269 269 } 270 270 271 - static void gicv3_irq_clear_pending(uint32_t intid) 271 + static void gicv3_irq_clear_pending(u32 intid) 272 272 { 273 273 gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1); 274 274 } 275 275 276 - static bool gicv3_irq_get_pending(uint32_t intid) 276 + static bool gicv3_irq_get_pending(u32 intid) 277 277 { 278 278 return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1); 279 279 } 280 280 281 281 static void gicv3_enable_redist(volatile void *redist_base) 282 282 { 283 - uint32_t val = readl(redist_base + GICR_WAKER); 283 + u32 val = readl(redist_base + GICR_WAKER); 284 284 unsigned int count = 100000; /* 1s */ 285 285 286 286 val &= ~GICR_WAKER_ProcessorSleep; ··· 293 293 } 294 294 } 295 295 296 - static void gicv3_set_group(uint32_t intid, bool grp) 296 + static void gicv3_set_group(u32 intid, bool grp) 297 297 { 298 - uint32_t cpu_or_dist; 299 - uint32_t val; 298 + u32 cpu_or_dist; 299 + u32 val; 300 300 301 301 cpu_or_dist = (get_intid_range(intid) == SPI_RANGE) ? DIST_BIT : guest_get_vcpuid(); 302 302 val = gicv3_reg_readl(cpu_or_dist, GICD_IGROUPR + (intid / 32) * 4); ··· 424 424 .gic_irq_set_group = gicv3_set_group, 425 425 }; 426 426 427 - void gic_rdist_enable_lpis(vm_paddr_t cfg_table, size_t cfg_table_size, 428 - vm_paddr_t pend_table) 427 + void gic_rdist_enable_lpis(gpa_t cfg_table, size_t cfg_table_size, 428 + gpa_t pend_table) 429 429 { 430 430 volatile void *rdist_base = gicr_base_cpu(guest_get_vcpuid()); 431 431
+5 -6
tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c
··· 54 54 return -1; 55 55 } 56 56 57 - static void its_install_table(unsigned int type, vm_paddr_t base, size_t size) 57 + static void its_install_table(unsigned int type, gpa_t base, size_t size) 58 58 { 59 59 unsigned long offset = its_find_baser(type); 60 60 u64 baser; ··· 69 69 its_write_u64(offset, baser); 70 70 } 71 71 72 - static void its_install_cmdq(vm_paddr_t base, size_t size) 72 + static void its_install_cmdq(gpa_t base, size_t size) 73 73 { 74 74 u64 cbaser; 75 75 ··· 82 82 its_write_u64(GITS_CBASER, cbaser); 83 83 } 84 84 85 - void its_init(vm_paddr_t coll_tbl, size_t coll_tbl_sz, 86 - vm_paddr_t device_tbl, size_t device_tbl_sz, 87 - vm_paddr_t cmdq, size_t cmdq_size) 85 + void its_init(gpa_t coll_tbl, size_t coll_tbl_sz, gpa_t device_tbl, 86 + size_t device_tbl_sz, gpa_t cmdq, size_t cmdq_size) 88 87 { 89 88 u32 ctlr; 90 89 ··· 203 204 } 204 205 } 205 206 206 - void its_send_mapd_cmd(void *cmdq_base, u32 device_id, vm_paddr_t itt_base, 207 + void its_send_mapd_cmd(void *cmdq_base, u32 device_id, gpa_t itt_base, 207 208 size_t itt_size, bool valid) 208 209 { 209 210 struct its_cmd_block cmd = {};
+81 -82
tools/testing/selftests/kvm/lib/arm64/processor.c
··· 19 19 20 20 #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000 21 21 22 - static vm_vaddr_t exception_handlers; 22 + static gva_t exception_handlers; 23 23 24 - static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) 24 + static u64 pgd_index(struct kvm_vm *vm, gva_t gva) 25 25 { 26 26 unsigned int shift = (vm->mmu.pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; 27 - uint64_t mask = (1UL << (vm->va_bits - shift)) - 1; 27 + u64 mask = (1UL << (vm->va_bits - shift)) - 1; 28 28 29 29 return (gva >> shift) & mask; 30 30 } 31 31 32 - static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) 32 + static u64 pud_index(struct kvm_vm *vm, gva_t gva) 33 33 { 34 34 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift; 35 - uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; 35 + u64 mask = (1UL << (vm->page_shift - 3)) - 1; 36 36 37 37 TEST_ASSERT(vm->mmu.pgtable_levels == 4, 38 38 "Mode %d does not have 4 page table levels", vm->mode); ··· 40 40 return (gva >> shift) & mask; 41 41 } 42 42 43 - static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva) 43 + static u64 pmd_index(struct kvm_vm *vm, gva_t gva) 44 44 { 45 45 unsigned int shift = (vm->page_shift - 3) + vm->page_shift; 46 - uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; 46 + u64 mask = (1UL << (vm->page_shift - 3)) - 1; 47 47 48 48 TEST_ASSERT(vm->mmu.pgtable_levels >= 3, 49 49 "Mode %d does not have >= 3 page table levels", vm->mode); ··· 51 51 return (gva >> shift) & mask; 52 52 } 53 53 54 - static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva) 54 + static u64 pte_index(struct kvm_vm *vm, gva_t gva) 55 55 { 56 - uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; 56 + u64 mask = (1UL << (vm->page_shift - 3)) - 1; 57 57 return (gva >> vm->page_shift) & mask; 58 58 } 59 59 ··· 63 63 (vm->pa_bits > 48 || vm->va_bits > 48); 64 64 } 65 65 66 - static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs) 66 + static u64 addr_pte(struct kvm_vm *vm, u64 pa, u64 attrs) 67 67 { 68 - uint64_t pte; 68 + u64 pte; 69 69 70 70 if (use_lpa2_pte_format(vm)) { 71 71 pte = pa & PTE_ADDR_MASK_LPA2(vm->page_shift); ··· 81 81 return pte; 82 82 } 83 83 84 - static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte) 84 + static u64 pte_addr(struct kvm_vm *vm, u64 pte) 85 85 { 86 - uint64_t pa; 86 + u64 pa; 87 87 88 88 if (use_lpa2_pte_format(vm)) { 89 89 pa = pte & PTE_ADDR_MASK_LPA2(vm->page_shift); ··· 97 97 return pa; 98 98 } 99 99 100 - static uint64_t ptrs_per_pgd(struct kvm_vm *vm) 100 + static u64 ptrs_per_pgd(struct kvm_vm *vm) 101 101 { 102 102 unsigned int shift = (vm->mmu.pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; 103 103 return 1 << (vm->va_bits - shift); 104 104 } 105 105 106 - static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm) 106 + static u64 __maybe_unused ptrs_per_pte(struct kvm_vm *vm) 107 107 { 108 108 return 1 << (vm->page_shift - 3); 109 109 } ··· 121 121 vm->mmu.pgd_created = true; 122 122 } 123 123 124 - static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 125 - uint64_t flags) 124 + static void _virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, 125 + u64 flags) 126 126 { 127 - uint8_t attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT); 128 - uint64_t pg_attr; 129 - uint64_t *ptep; 127 + u8 attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT); 128 + u64 pg_attr; 129 + u64 *ptep; 130 130 131 - TEST_ASSERT((vaddr % vm->page_size) == 0, 131 + TEST_ASSERT((gva % vm->page_size) == 0, 132 132 "Virtual address not on page boundary,\n" 133 - " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); 134 - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, 135 - (vaddr >> vm->page_shift)), 136 - "Invalid virtual address, vaddr: 0x%lx", vaddr); 137 - TEST_ASSERT((paddr % vm->page_size) == 0, 138 - "Physical address not on page boundary,\n" 139 - " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); 140 - TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, 141 - "Physical address beyond beyond maximum supported,\n" 142 - " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 143 - paddr, vm->max_gfn, vm->page_size); 133 + " gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size); 134 + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), 135 + "Invalid virtual address, gva: 0x%lx", gva); 136 + TEST_ASSERT((gpa % vm->page_size) == 0, 137 + "Physical address not on page boundary,\n" 138 + " gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size); 139 + TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, 140 + "Physical address beyond beyond maximum supported,\n" 141 + " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 142 + gpa, vm->max_gfn, vm->page_size); 144 143 145 - ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, vaddr) * 8; 144 + ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, gva) * 8; 146 145 if (!*ptep) 147 146 *ptep = addr_pte(vm, vm_alloc_page_table(vm), 148 147 PGD_TYPE_TABLE | PTE_VALID); 149 148 150 149 switch (vm->mmu.pgtable_levels) { 151 150 case 4: 152 - ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8; 151 + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8; 153 152 if (!*ptep) 154 153 *ptep = addr_pte(vm, vm_alloc_page_table(vm), 155 154 PUD_TYPE_TABLE | PTE_VALID); 156 155 /* fall through */ 157 156 case 3: 158 - ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8; 157 + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8; 159 158 if (!*ptep) 160 159 *ptep = addr_pte(vm, vm_alloc_page_table(vm), 161 160 PMD_TYPE_TABLE | PTE_VALID); 162 161 /* fall through */ 163 162 case 2: 164 - ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8; 163 + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8; 165 164 break; 166 165 default: 167 166 TEST_FAIL("Page table levels must be 2, 3, or 4"); ··· 170 171 if (!use_lpa2_pte_format(vm)) 171 172 pg_attr |= PTE_SHARED; 172 173 173 - *ptep = addr_pte(vm, paddr, pg_attr); 174 + *ptep = addr_pte(vm, gpa, pg_attr); 174 175 } 175 176 176 - void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) 177 + void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) 177 178 { 178 - uint64_t attr_idx = MT_NORMAL; 179 + u64 attr_idx = MT_NORMAL; 179 180 180 - _virt_pg_map(vm, vaddr, paddr, attr_idx); 181 + _virt_pg_map(vm, gva, gpa, attr_idx); 181 182 } 182 183 183 - uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level) 184 + u64 *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level) 184 185 { 185 - uint64_t *ptep; 186 + u64 *ptep; 186 187 187 188 if (!vm->mmu.pgd_created) 188 189 goto unmapped_gva; ··· 224 225 exit(EXIT_FAILURE); 225 226 } 226 227 227 - uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva) 228 + u64 *virt_get_pte_hva(struct kvm_vm *vm, gva_t gva) 228 229 { 229 230 return virt_get_pte_hva_at_level(vm, gva, 3); 230 231 } 231 232 232 - vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) 233 + gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 233 234 { 234 - uint64_t *ptep = virt_get_pte_hva(vm, gva); 235 + u64 *ptep = virt_get_pte_hva(vm, gva); 235 236 236 237 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); 237 238 } 238 239 239 - static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level) 240 + static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, u64 page, int level) 240 241 { 241 242 #ifdef DEBUG 242 243 static const char * const type[] = { "", "pud", "pmd", "pte" }; 243 - uint64_t pte, *ptep; 244 + u64 pte, *ptep; 244 245 245 246 if (level == 4) 246 247 return; ··· 255 256 #endif 256 257 } 257 258 258 - void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 259 + void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent) 259 260 { 260 261 int level = 4 - (vm->mmu.pgtable_levels - 1); 261 - uint64_t pgd, *ptep; 262 + u64 pgd, *ptep; 262 263 263 264 if (!vm->mmu.pgd_created) 264 265 return; ··· 297 298 { 298 299 struct kvm_vcpu_init default_init = { .target = -1, }; 299 300 struct kvm_vm *vm = vcpu->vm; 300 - uint64_t sctlr_el1, tcr_el1, ttbr0_el1; 301 + u64 sctlr_el1, tcr_el1, ttbr0_el1; 301 302 302 303 if (!init) { 303 304 kvm_get_default_vcpu_target(vm, &default_init); ··· 396 397 HCR_EL2_RW | HCR_EL2_TGE | HCR_EL2_E2H); 397 398 } 398 399 399 - void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) 400 + void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent) 400 401 { 401 - uint64_t pstate, pc; 402 + u64 pstate, pc; 402 403 403 404 pstate = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate)); 404 405 pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc)); ··· 409 410 410 411 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) 411 412 { 412 - vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); 413 + vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (u64)guest_code); 413 414 } 414 415 415 - static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 416 + static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id, 416 417 struct kvm_vcpu_init *init) 417 418 { 418 419 size_t stack_size; 419 - uint64_t stack_vaddr; 420 + gva_t stack_gva; 420 421 struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); 421 422 422 423 stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size : 423 424 vm->page_size; 424 - stack_vaddr = __vm_vaddr_alloc(vm, stack_size, 425 - DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, 426 - MEM_REGION_DATA); 425 + stack_gva = __vm_alloc(vm, stack_size, 426 + DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, 427 + MEM_REGION_DATA); 427 428 428 429 aarch64_vcpu_setup(vcpu, init); 429 430 430 - vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), stack_vaddr + stack_size); 431 + vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), stack_gva + stack_size); 431 432 return vcpu; 432 433 } 433 434 434 - struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 435 + struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id, 435 436 struct kvm_vcpu_init *init, void *guest_code) 436 437 { 437 438 struct kvm_vcpu *vcpu = __aarch64_vcpu_add(vm, vcpu_id, init); ··· 441 442 return vcpu; 442 443 } 443 444 444 - struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) 445 + struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) 445 446 { 446 447 return __aarch64_vcpu_add(vm, vcpu_id, NULL); 447 448 } ··· 458 459 459 460 for (i = 0; i < num; i++) { 460 461 vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]), 461 - va_arg(ap, uint64_t)); 462 + va_arg(ap, u64)); 462 463 } 463 464 464 465 va_end(ap); 465 466 } 466 467 467 - void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec) 468 + void kvm_exit_unexpected_exception(int vector, u64 ec, bool valid_ec) 468 469 { 469 470 ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec); 470 471 while (1) ··· 497 498 { 498 499 extern char vectors; 499 500 500 - vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_VBAR_EL1), (uint64_t)&vectors); 501 + vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_VBAR_EL1), (u64)&vectors); 501 502 } 502 503 503 504 void route_exception(struct ex_regs *regs, int vector) ··· 535 536 536 537 void vm_init_descriptor_tables(struct kvm_vm *vm) 537 538 { 538 - vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers), 539 - vm->page_size, MEM_REGION_DATA); 539 + vm->handlers = __vm_alloc(vm, sizeof(struct handlers), vm->page_size, 540 + MEM_REGION_DATA); 540 541 541 - *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; 542 + *(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers; 542 543 } 543 544 544 545 void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec, ··· 562 563 handlers->exception_handlers[vector][0] = handler; 563 564 } 564 565 565 - uint32_t guest_get_vcpuid(void) 566 + u32 guest_get_vcpuid(void) 566 567 { 567 568 return read_sysreg(tpidr_el1); 568 569 } 569 570 570 - static uint32_t max_ipa_for_page_size(uint32_t vm_ipa, uint32_t gran, 571 - uint32_t not_sup_val, uint32_t ipa52_min_val) 571 + static u32 max_ipa_for_page_size(u32 vm_ipa, u32 gran, 572 + u32 not_sup_val, u32 ipa52_min_val) 572 573 { 573 574 if (gran == not_sup_val) 574 575 return 0; ··· 578 579 return min(vm_ipa, 48U); 579 580 } 580 581 581 - void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k, 582 - uint32_t *ipa16k, uint32_t *ipa64k) 582 + void aarch64_get_supported_page_sizes(u32 ipa, u32 *ipa4k, 583 + u32 *ipa16k, u32 *ipa64k) 583 584 { 584 585 struct kvm_vcpu_init preferred_init; 585 586 int kvm_fd, vm_fd, vcpu_fd, err; 586 - uint64_t val; 587 - uint32_t gran; 587 + u64 val; 588 + u32 gran; 588 589 struct kvm_one_reg reg = { 589 590 .id = KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1), 590 - .addr = (uint64_t)&val, 591 + .addr = (u64)&val, 591 592 }; 592 593 593 594 kvm_fd = open_kvm_dev_path_or_exit(); ··· 645 646 : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7") 646 647 647 648 648 - void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1, 649 - uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, 650 - uint64_t arg6, struct arm_smccc_res *res) 649 + void smccc_hvc(u32 function_id, u64 arg0, u64 arg1, 650 + u64 arg2, u64 arg3, u64 arg4, u64 arg5, 651 + u64 arg6, struct arm_smccc_res *res) 651 652 { 652 653 __smccc_call(hvc, function_id, arg0, arg1, arg2, arg3, arg4, arg5, 653 654 arg6, res); 654 655 } 655 656 656 - void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1, 657 - uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, 658 - uint64_t arg6, struct arm_smccc_res *res) 657 + void smccc_smc(u32 function_id, u64 arg0, u64 arg1, 658 + u64 arg2, u64 arg3, u64 arg4, u64 arg5, 659 + u64 arg6, struct arm_smccc_res *res) 659 660 { 660 661 __smccc_call(smc, function_id, arg0, arg1, arg2, arg3, arg4, arg5, 661 662 arg6, res); ··· 670 671 guest_modes_append_default(); 671 672 } 672 673 673 - void vm_vaddr_populate_bitmap(struct kvm_vm *vm) 674 + void vm_populate_gva_bitmap(struct kvm_vm *vm) 674 675 { 675 676 /* 676 677 * arm64 selftests use only TTBR0_EL1, meaning that the valid VA space
+6 -6
tools/testing/selftests/kvm/lib/arm64/ucall.c
··· 6 6 */ 7 7 #include "kvm_util.h" 8 8 9 - vm_vaddr_t *ucall_exit_mmio_addr; 9 + gva_t *ucall_exit_mmio_addr; 10 10 11 - void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 11 + void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) 12 12 { 13 - vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); 13 + gva_t mmio_gva = vm_unused_gva_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); 14 14 15 15 virt_map(vm, mmio_gva, mmio_gpa, 1); 16 16 17 17 vm->ucall_mmio_addr = mmio_gpa; 18 18 19 - write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva); 19 + write_guest_global(vm, ucall_exit_mmio_addr, (gva_t *)mmio_gva); 20 20 } 21 21 22 22 void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) ··· 25 25 26 26 if (run->exit_reason == KVM_EXIT_MMIO && 27 27 run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) { 28 - TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t), 28 + TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(u64), 29 29 "Unexpected ucall exit mmio address access"); 30 - return (void *)(*((uint64_t *)run->mmio.data)); 30 + return (void *)(*((u64 *)run->mmio.data)); 31 31 } 32 32 33 33 return NULL;
+20 -20
tools/testing/selftests/kvm/lib/arm64/vgic.c
··· 41 41 * redistributor regions of the guest. Since it depends on the number of 42 42 * vCPUs for the VM, it must be called after all the vCPUs have been created. 43 43 */ 44 - int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs) 44 + int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs) 45 45 { 46 46 int gic_fd; 47 - uint64_t attr; 47 + u64 attr; 48 48 unsigned int nr_gic_pages; 49 49 50 50 /* Distributor setup */ ··· 77 77 KVM_DEV_ARM_VGIC_CTRL_INIT, NULL); 78 78 } 79 79 80 - int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs) 80 + int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs) 81 81 { 82 82 unsigned int nr_vcpus_created = 0; 83 83 struct list_head *iter; ··· 104 104 } 105 105 106 106 /* should only work for level sensitive interrupts */ 107 - int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level) 107 + int _kvm_irq_set_level_info(int gic_fd, u32 intid, int level) 108 108 { 109 - uint64_t attr = 32 * (intid / 32); 110 - uint64_t index = intid % 32; 111 - uint64_t val; 109 + u64 attr = 32 * (intid / 32); 110 + u64 index = intid % 32; 111 + u64 val; 112 112 int ret; 113 113 114 114 ret = __kvm_device_attr_get(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, ··· 122 122 return ret; 123 123 } 124 124 125 - void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level) 125 + void kvm_irq_set_level_info(int gic_fd, u32 intid, int level) 126 126 { 127 127 int ret = _kvm_irq_set_level_info(gic_fd, intid, level); 128 128 129 129 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, ret)); 130 130 } 131 131 132 - int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) 132 + int _kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level) 133 133 { 134 - uint32_t irq = intid & KVM_ARM_IRQ_NUM_MASK; 134 + u32 irq = intid & KVM_ARM_IRQ_NUM_MASK; 135 135 136 136 TEST_ASSERT(!INTID_IS_SGI(intid), "KVM_IRQ_LINE's interface itself " 137 137 "doesn't allow injecting SGIs. There's no mask for it."); ··· 144 144 return _kvm_irq_line(vm, irq, level); 145 145 } 146 146 147 - void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) 147 + void kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level) 148 148 { 149 149 int ret = _kvm_arm_irq_line(vm, intid, level); 150 150 151 151 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret)); 152 152 } 153 153 154 - static void vgic_poke_irq(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu, 155 - uint64_t reg_off) 154 + static void vgic_poke_irq(int gic_fd, u32 intid, struct kvm_vcpu *vcpu, 155 + u64 reg_off) 156 156 { 157 - uint64_t reg = intid / 32; 158 - uint64_t index = intid % 32; 159 - uint64_t attr = reg_off + reg * 4; 160 - uint64_t val; 157 + u64 reg = intid / 32; 158 + u64 index = intid % 32; 159 + u64 attr = reg_off + reg * 4; 160 + u64 val; 161 161 bool intid_is_private = INTID_IS_SGI(intid) || INTID_IS_PPI(intid); 162 162 163 - uint32_t group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 163 + u32 group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 164 164 : KVM_DEV_ARM_VGIC_GRP_DIST_REGS; 165 165 166 166 if (intid_is_private) { ··· 183 183 kvm_device_attr_set(gic_fd, group, attr, &val); 184 184 } 185 185 186 - void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu) 186 + void kvm_irq_write_ispendr(int gic_fd, u32 intid, struct kvm_vcpu *vcpu) 187 187 { 188 188 vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISPENDR); 189 189 } 190 190 191 - void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu) 191 + void kvm_irq_write_isactiver(int gic_fd, u32 intid, struct kvm_vcpu *vcpu) 192 192 { 193 193 vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISACTIVER); 194 194 }
+8 -9
tools/testing/selftests/kvm/lib/elf.c
··· 156 156 TEST_ASSERT(phdr.p_memsz > 0, "Unexpected loadable segment " 157 157 "memsize of 0,\n" 158 158 " phdr index: %u p_memsz: 0x%" PRIx64, 159 - n1, (uint64_t) phdr.p_memsz); 160 - vm_vaddr_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size); 161 - vm_vaddr_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1; 159 + n1, (u64)phdr.p_memsz); 160 + gva_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size); 161 + gva_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1; 162 162 seg_vend |= vm->page_size - 1; 163 163 size_t seg_size = seg_vend - seg_vstart + 1; 164 164 165 - vm_vaddr_t vaddr = __vm_vaddr_alloc(vm, seg_size, seg_vstart, 166 - MEM_REGION_CODE); 167 - TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate " 165 + gva_t gva = __vm_alloc(vm, seg_size, seg_vstart, MEM_REGION_CODE); 166 + TEST_ASSERT(gva == seg_vstart, "Unable to allocate " 168 167 "virtual memory for segment at requested min addr,\n" 169 168 " segment idx: %u\n" 170 169 " seg_vstart: 0x%lx\n" 171 - " vaddr: 0x%lx", 172 - n1, seg_vstart, vaddr); 173 - memset(addr_gva2hva(vm, vaddr), 0, seg_size); 170 + " gva: 0x%lx", 171 + n1, seg_vstart, gva); 172 + memset(addr_gva2hva(vm, gva), 0, seg_size); 174 173 /* TODO(lhuemill): Set permissions of each memory segment 175 174 * based on the least-significant 3 bits of phdr.p_flags. 176 175 */
+1 -1
tools/testing/selftests/kvm/lib/guest_modes.c
··· 20 20 #ifdef __aarch64__ 21 21 { 22 22 unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE); 23 - uint32_t ipa4k, ipa16k, ipa64k; 23 + u32 ipa4k, ipa16k, ipa64k; 24 24 int i; 25 25 26 26 aarch64_get_supported_page_sizes(limit, &ipa4k, &ipa16k, &ipa64k);
+9 -9
tools/testing/selftests/kvm/lib/guest_sprintf.c
··· 35 35 ({ \ 36 36 int __res; \ 37 37 \ 38 - __res = ((uint64_t) n) % (uint32_t) base; \ 39 - n = ((uint64_t) n) / (uint32_t) base; \ 38 + __res = ((u64)n) % (u32)base; \ 39 + n = ((u64)n) / (u32)base; \ 40 40 __res; \ 41 41 }) 42 42 ··· 119 119 { 120 120 char *str, *end; 121 121 const char *s; 122 - uint64_t num; 122 + u64 num; 123 123 int i, base; 124 124 int len; 125 125 ··· 216 216 while (--field_width > 0) 217 217 APPEND_BUFFER_SAFE(str, end, ' '); 218 218 APPEND_BUFFER_SAFE(str, end, 219 - (uint8_t)va_arg(args, int)); 219 + (u8)va_arg(args, int)); 220 220 while (--field_width > 0) 221 221 APPEND_BUFFER_SAFE(str, end, ' '); 222 222 continue; ··· 240 240 flags |= SPECIAL | SMALL | ZEROPAD; 241 241 } 242 242 str = number(str, end, 243 - (uint64_t)va_arg(args, void *), 16, 243 + (u64)va_arg(args, void *), 16, 244 244 field_width, precision, flags); 245 245 continue; 246 246 ··· 284 284 continue; 285 285 } 286 286 if (qualifier == 'l') 287 - num = va_arg(args, uint64_t); 287 + num = va_arg(args, u64); 288 288 else if (qualifier == 'h') { 289 - num = (uint16_t)va_arg(args, int); 289 + num = (u16)va_arg(args, int); 290 290 if (flags & SIGN) 291 - num = (int16_t)num; 291 + num = (s16)num; 292 292 } else if (flags & SIGN) 293 293 num = va_arg(args, int); 294 294 else 295 - num = va_arg(args, uint32_t); 295 + num = va_arg(args, u32); 296 296 str = number(str, end, num, base, field_width, precision, flags); 297 297 } 298 298
+153 -224
tools/testing/selftests/kvm/lib/kvm_util.c
··· 20 20 21 21 #define KVM_UTIL_MIN_PFN 2 22 22 23 - uint32_t guest_random_seed; 23 + u32 guest_random_seed; 24 24 struct guest_random_state guest_rng; 25 - static uint32_t last_guest_seed; 25 + static u32 last_guest_seed; 26 26 27 27 static size_t vcpu_mmap_sz(void); 28 28 ··· 165 165 return (unsigned int)ret; 166 166 } 167 167 168 - void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) 168 + void vm_enable_dirty_ring(struct kvm_vm *vm, u32 ring_size) 169 169 { 170 170 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) 171 171 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); ··· 189 189 vm->stats.fd = -1; 190 190 } 191 191 192 - const char *vm_guest_mode_string(uint32_t i) 192 + const char *vm_guest_mode_string(u32 i) 193 193 { 194 194 static const char * const strings[] = { 195 195 [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages", ··· 267 267 * based on the MSB of the VA. On architectures with this behavior 268 268 * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1]. 269 269 */ 270 - __weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm) 270 + __weak void vm_populate_gva_bitmap(struct kvm_vm *vm) 271 271 { 272 272 sparsebit_set_num(vm->vpages_valid, 273 273 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); ··· 385 385 386 386 /* Limit to VA-bit canonical virtual addresses. */ 387 387 vm->vpages_valid = sparsebit_alloc(); 388 - vm_vaddr_populate_bitmap(vm); 388 + vm_populate_gva_bitmap(vm); 389 389 390 390 /* Limit physical addresses to PA-bits. */ 391 391 vm->max_gfn = vm_compute_max_gfn(vm); ··· 396 396 return vm; 397 397 } 398 398 399 - static uint64_t vm_nr_pages_required(enum vm_guest_mode mode, 400 - uint32_t nr_runnable_vcpus, 401 - uint64_t extra_mem_pages) 399 + static u64 vm_nr_pages_required(enum vm_guest_mode mode, 400 + u32 nr_runnable_vcpus, 401 + u64 extra_mem_pages) 402 402 { 403 - uint64_t page_size = vm_guest_mode_params[mode].page_size; 404 - uint64_t nr_pages; 403 + u64 page_size = vm_guest_mode_params[mode].page_size; 404 + u64 nr_pages; 405 405 406 406 TEST_ASSERT(nr_runnable_vcpus, 407 407 "Use vm_create_barebones() for VMs that _never_ have vCPUs"); ··· 435 435 return vm_adjust_num_guest_pages(mode, nr_pages); 436 436 } 437 437 438 - void kvm_set_files_rlimit(uint32_t nr_vcpus) 438 + void kvm_set_files_rlimit(u32 nr_vcpus) 439 439 { 440 440 /* 441 441 * Each vCPU will open two file descriptors: the vCPU itself and the ··· 476 476 #endif 477 477 } 478 478 479 - struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, 480 - uint64_t nr_extra_pages) 479 + struct kvm_vm *__vm_create(struct vm_shape shape, u32 nr_runnable_vcpus, 480 + u64 nr_extra_pages) 481 481 { 482 - uint64_t nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus, 482 + u64 nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus, 483 483 nr_extra_pages); 484 484 struct userspace_mem_region *slot0; 485 485 struct kvm_vm *vm; ··· 546 546 * extra_mem_pages is only used to calculate the maximum page table size, 547 547 * no real memory allocation for non-slot0 memory in this function. 548 548 */ 549 - struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus, 550 - uint64_t extra_mem_pages, 549 + struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, u32 nr_vcpus, 550 + u64 extra_mem_pages, 551 551 void *guest_code, struct kvm_vcpu *vcpus[]) 552 552 { 553 553 struct kvm_vm *vm; ··· 566 566 567 567 struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape, 568 568 struct kvm_vcpu **vcpu, 569 - uint64_t extra_mem_pages, 569 + u64 extra_mem_pages, 570 570 void *guest_code) 571 571 { 572 572 struct kvm_vcpu *vcpus[1]; ··· 614 614 } 615 615 616 616 __weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, 617 - uint32_t vcpu_id) 617 + u32 vcpu_id) 618 618 { 619 619 return __vm_vcpu_add(vm, vcpu_id); 620 620 } ··· 636 636 return pthread_setaffinity_np(task, sizeof(cpuset), &cpuset); 637 637 } 638 638 639 - static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask) 639 + static u32 parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask) 640 640 { 641 - uint32_t pcpu = atoi_non_negative("CPU number", cpu_str); 641 + u32 pcpu = atoi_non_negative("CPU number", cpu_str); 642 642 643 643 TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask), 644 644 "Not allowed to run on pCPU '%d', check cgroups?", pcpu); ··· 662 662 " (default: no pinning)\n", name, name); 663 663 } 664 664 665 - void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], 665 + void kvm_parse_vcpu_pinning(const char *pcpus_string, u32 vcpu_to_pcpu[], 666 666 int nr_vcpus) 667 667 { 668 668 cpu_set_t allowed_mask; ··· 715 715 * region exists. 716 716 */ 717 717 static struct userspace_mem_region * 718 - userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) 718 + userspace_mem_region_find(struct kvm_vm *vm, u64 start, u64 end) 719 719 { 720 720 struct rb_node *node; 721 721 722 722 for (node = vm->regions.gpa_tree.rb_node; node; ) { 723 723 struct userspace_mem_region *region = 724 724 container_of(node, struct userspace_mem_region, gpa_node); 725 - uint64_t existing_start = region->region.guest_phys_addr; 726 - uint64_t existing_end = region->region.guest_phys_addr 725 + u64 existing_start = region->region.guest_phys_addr; 726 + u64 existing_end = region->region.guest_phys_addr 727 727 + region->region.memory_size - 1; 728 728 if (start <= existing_end && end >= existing_start) 729 729 return region; ··· 918 918 } 919 919 920 920 921 - int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 922 - uint64_t gpa, uint64_t size, void *hva) 921 + int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags, 922 + gpa_t gpa, u64 size, void *hva) 923 923 { 924 924 struct kvm_userspace_memory_region region = { 925 925 .slot = slot, ··· 932 932 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region); 933 933 } 934 934 935 - void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 936 - uint64_t gpa, uint64_t size, void *hva) 935 + void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags, 936 + gpa_t gpa, u64 size, void *hva) 937 937 { 938 938 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva); 939 939 ··· 945 945 __TEST_REQUIRE(kvm_has_cap(KVM_CAP_USER_MEMORY2), \ 946 946 "KVM selftests now require KVM_SET_USER_MEMORY_REGION2 (introduced in v6.8)") 947 947 948 - int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 949 - uint64_t gpa, uint64_t size, void *hva, 950 - uint32_t guest_memfd, uint64_t guest_memfd_offset) 948 + int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags, 949 + gpa_t gpa, u64 size, void *hva, 950 + u32 guest_memfd, u64 guest_memfd_offset) 951 951 { 952 952 struct kvm_userspace_memory_region2 region = { 953 953 .slot = slot, ··· 964 964 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, &region); 965 965 } 966 966 967 - void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 968 - uint64_t gpa, uint64_t size, void *hva, 969 - uint32_t guest_memfd, uint64_t guest_memfd_offset) 967 + void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags, 968 + gpa_t gpa, u64 size, void *hva, 969 + u32 guest_memfd, u64 guest_memfd_offset) 970 970 { 971 971 int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva, 972 972 guest_memfd, guest_memfd_offset); ··· 978 978 979 979 /* FIXME: This thing needs to be ripped apart and rewritten. */ 980 980 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, 981 - uint64_t gpa, uint32_t slot, uint64_t npages, uint32_t flags, 982 - int guest_memfd, uint64_t guest_memfd_offset) 981 + gpa_t gpa, u32 slot, u64 npages, u32 flags, 982 + int guest_memfd, u64 guest_memfd_offset) 983 983 { 984 984 int ret; 985 985 struct userspace_mem_region *region; ··· 1016 1016 " requested gpa: 0x%lx npages: 0x%lx page_size: 0x%x\n" 1017 1017 " existing gpa: 0x%lx size: 0x%lx", 1018 1018 gpa, npages, vm->page_size, 1019 - (uint64_t) region->region.guest_phys_addr, 1020 - (uint64_t) region->region.memory_size); 1019 + (u64)region->region.guest_phys_addr, 1020 + (u64)region->region.memory_size); 1021 1021 1022 1022 /* Confirm no region with the requested slot already exists. */ 1023 1023 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, ··· 1027 1027 1028 1028 TEST_FAIL("A mem region with the requested slot " 1029 1029 "already exists.\n" 1030 - " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" 1031 - " existing slot: %u paddr: 0x%lx size: 0x%lx", 1030 + " requested slot: %u gpa: 0x%lx npages: 0x%lx\n" 1031 + " existing slot: %u gpa: 0x%lx size: 0x%lx", 1032 1032 slot, gpa, npages, region->region.slot, 1033 - (uint64_t) region->region.guest_phys_addr, 1034 - (uint64_t) region->region.memory_size); 1033 + (u64)region->region.guest_phys_addr, 1034 + (u64)region->region.memory_size); 1035 1035 } 1036 1036 1037 1037 /* Allocate and initialize new mem region structure. */ ··· 1085 1085 1086 1086 if (flags & KVM_MEM_GUEST_MEMFD) { 1087 1087 if (guest_memfd < 0) { 1088 - uint32_t guest_memfd_flags = 0; 1088 + u32 guest_memfd_flags = 0; 1089 1089 TEST_ASSERT(!guest_memfd_offset, 1090 1090 "Offset must be zero when creating new guest_memfd"); 1091 1091 guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags); ··· 1141 1141 1142 1142 void vm_userspace_mem_region_add(struct kvm_vm *vm, 1143 1143 enum vm_mem_backing_src_type src_type, 1144 - uint64_t gpa, uint32_t slot, uint64_t npages, 1145 - uint32_t flags) 1144 + gpa_t gpa, u32 slot, u64 npages, u32 flags) 1146 1145 { 1147 1146 vm_mem_add(vm, src_type, gpa, slot, npages, flags, -1, 0); 1148 1147 } ··· 1162 1163 * memory slot ID). 1163 1164 */ 1164 1165 struct userspace_mem_region * 1165 - memslot2region(struct kvm_vm *vm, uint32_t memslot) 1166 + memslot2region(struct kvm_vm *vm, u32 memslot) 1166 1167 { 1167 1168 struct userspace_mem_region *region; 1168 1169 ··· 1193 1194 * Sets the flags of the memory region specified by the value of slot, 1194 1195 * to the values given by flags. 1195 1196 */ 1196 - void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) 1197 + void vm_mem_region_set_flags(struct kvm_vm *vm, u32 slot, u32 flags) 1197 1198 { 1198 1199 int ret; 1199 1200 struct userspace_mem_region *region; ··· 1209 1210 ret, errno, slot, flags); 1210 1211 } 1211 1212 1212 - void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot) 1213 + void vm_mem_region_reload(struct kvm_vm *vm, u32 slot) 1213 1214 { 1214 1215 struct userspace_mem_region *region = memslot2region(vm, slot); 1215 1216 struct kvm_userspace_memory_region2 tmp = region->region; ··· 1233 1234 * 1234 1235 * Change the gpa of a memory region. 1235 1236 */ 1236 - void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) 1237 + void vm_mem_region_move(struct kvm_vm *vm, u32 slot, u64 new_gpa) 1237 1238 { 1238 1239 struct userspace_mem_region *region; 1239 1240 int ret; ··· 1262 1263 * 1263 1264 * Delete a memory region. 1264 1265 */ 1265 - void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) 1266 + void vm_mem_region_delete(struct kvm_vm *vm, u32 slot) 1266 1267 { 1267 1268 struct userspace_mem_region *region = memslot2region(vm, slot); 1268 1269 ··· 1272 1273 __vm_mem_region_delete(vm, region); 1273 1274 } 1274 1275 1275 - void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size, 1276 + void vm_guest_mem_fallocate(struct kvm_vm *vm, u64 base, u64 size, 1276 1277 bool punch_hole) 1277 1278 { 1278 1279 const int mode = FALLOC_FL_KEEP_SIZE | (punch_hole ? FALLOC_FL_PUNCH_HOLE : 0); 1279 1280 struct userspace_mem_region *region; 1280 - uint64_t end = base + size; 1281 - uint64_t gpa, len; 1281 + u64 end = base + size; 1282 + gpa_t gpa, len; 1282 1283 off_t fd_offset; 1283 1284 int ret; 1284 1285 1285 1286 for (gpa = base; gpa < end; gpa += len) { 1286 - uint64_t offset; 1287 + u64 offset; 1287 1288 1288 1289 region = userspace_mem_region_find(vm, gpa, gpa); 1289 1290 TEST_ASSERT(region && region->region.flags & KVM_MEM_GUEST_MEMFD, ··· 1291 1292 1292 1293 offset = gpa - region->region.guest_phys_addr; 1293 1294 fd_offset = region->region.guest_memfd_offset + offset; 1294 - len = min_t(uint64_t, end - gpa, region->region.memory_size - offset); 1295 + len = min_t(u64, end - gpa, region->region.memory_size - offset); 1295 1296 1296 1297 ret = fallocate(region->region.guest_memfd, mode, fd_offset, len); 1297 1298 TEST_ASSERT(!ret, "fallocate() failed to %s at %lx (len = %lu), fd = %d, mode = %x, offset = %lx", ··· 1316 1317 return ret; 1317 1318 } 1318 1319 1319 - static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id) 1320 + static bool vcpu_exists(struct kvm_vm *vm, u32 vcpu_id) 1320 1321 { 1321 1322 struct kvm_vcpu *vcpu; 1322 1323 ··· 1332 1333 * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id. 1333 1334 * No additional vCPU setup is done. Returns the vCPU. 1334 1335 */ 1335 - struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) 1336 + struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) 1336 1337 { 1337 1338 struct kvm_vcpu *vcpu; 1338 1339 ··· 1366 1367 } 1367 1368 1368 1369 /* 1369 - * VM Virtual Address Unused Gap 1370 - * 1371 - * Input Args: 1372 - * vm - Virtual Machine 1373 - * sz - Size (bytes) 1374 - * vaddr_min - Minimum Virtual Address 1375 - * 1376 - * Output Args: None 1377 - * 1378 - * Return: 1379 - * Lowest virtual address at or above vaddr_min, with at least 1380 - * sz unused bytes. TEST_ASSERT failure if no area of at least 1381 - * size sz is available. 1382 - * 1383 - * Within the VM specified by vm, locates the lowest starting virtual 1384 - * address >= vaddr_min, that has at least sz unallocated bytes. A 1370 + * Within the VM specified by @vm, locates the lowest starting guest virtual 1371 + * address >= @min_gva, that has at least @sz unallocated bytes. A 1385 1372 * TEST_ASSERT failure occurs for invalid input or no area of at least 1386 - * sz unallocated bytes >= vaddr_min is available. 1373 + * @sz unallocated bytes >= @min_gva is available. 1387 1374 */ 1388 - vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, 1389 - vm_vaddr_t vaddr_min) 1375 + gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t min_gva) 1390 1376 { 1391 - uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; 1377 + u64 pages = (sz + vm->page_size - 1) >> vm->page_shift; 1392 1378 1393 1379 /* Determine lowest permitted virtual page index. */ 1394 - uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; 1395 - if ((pgidx_start * vm->page_size) < vaddr_min) 1380 + u64 pgidx_start = (min_gva + vm->page_size - 1) >> vm->page_shift; 1381 + if ((pgidx_start * vm->page_size) < min_gva) 1396 1382 goto no_va_found; 1397 1383 1398 1384 /* Loop over section with enough valid virtual page indexes. */ ··· 1414 1430 } while (pgidx_start != 0); 1415 1431 1416 1432 no_va_found: 1417 - TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages); 1433 + TEST_FAIL("No gva of specified pages available, pages: 0x%lx", pages); 1418 1434 1419 1435 /* NOT REACHED */ 1420 1436 return -1; ··· 1436 1452 return pgidx_start * vm->page_size; 1437 1453 } 1438 1454 1439 - static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, 1440 - vm_vaddr_t vaddr_min, 1441 - enum kvm_mem_region_type type, 1442 - bool protected) 1455 + static gva_t ____vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva, 1456 + enum kvm_mem_region_type type, bool protected) 1443 1457 { 1444 - uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); 1458 + u64 pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); 1445 1459 1446 1460 virt_pgd_alloc(vm); 1447 - vm_paddr_t paddr = __vm_phy_pages_alloc(vm, pages, 1448 - KVM_UTIL_MIN_PFN * vm->page_size, 1449 - vm->memslots[type], protected); 1461 + gpa_t gpa = __vm_phy_pages_alloc(vm, pages, 1462 + KVM_UTIL_MIN_PFN * vm->page_size, 1463 + vm->memslots[type], protected); 1450 1464 1451 1465 /* 1452 1466 * Find an unused range of virtual page addresses of at least 1453 1467 * pages in length. 1454 1468 */ 1455 - vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); 1469 + gva_t gva_start = vm_unused_gva_gap(vm, sz, min_gva); 1456 1470 1457 1471 /* Map the virtual pages. */ 1458 - for (vm_vaddr_t vaddr = vaddr_start; pages > 0; 1459 - pages--, vaddr += vm->page_size, paddr += vm->page_size) { 1472 + for (gva_t gva = gva_start; pages > 0; 1473 + pages--, gva += vm->page_size, gpa += vm->page_size) { 1460 1474 1461 - virt_pg_map(vm, vaddr, paddr); 1475 + virt_pg_map(vm, gva, gpa); 1462 1476 } 1463 1477 1464 - return vaddr_start; 1478 + return gva_start; 1465 1479 } 1466 1480 1467 - vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, 1468 - enum kvm_mem_region_type type) 1481 + gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva, 1482 + enum kvm_mem_region_type type) 1469 1483 { 1470 - return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, 1471 - vm_arch_has_protected_memory(vm)); 1484 + return ____vm_alloc(vm, sz, min_gva, type, 1485 + vm_arch_has_protected_memory(vm)); 1472 1486 } 1473 1487 1474 - vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, 1475 - vm_vaddr_t vaddr_min, 1476 - enum kvm_mem_region_type type) 1488 + gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t min_gva, 1489 + enum kvm_mem_region_type type) 1477 1490 { 1478 - return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, false); 1491 + return ____vm_alloc(vm, sz, min_gva, type, false); 1479 1492 } 1480 1493 1481 1494 /* 1482 - * VM Virtual Address Allocate 1483 - * 1484 - * Input Args: 1485 - * vm - Virtual Machine 1486 - * sz - Size in bytes 1487 - * vaddr_min - Minimum starting virtual address 1488 - * 1489 - * Output Args: None 1490 - * 1491 - * Return: 1492 - * Starting guest virtual address 1493 - * 1494 - * Allocates at least sz bytes within the virtual address space of the vm 1495 - * given by vm. The allocated bytes are mapped to a virtual address >= 1496 - * the address given by vaddr_min. Note that each allocation uses a 1497 - * a unique set of pages, with the minimum real allocation being at least 1498 - * a page. The allocated physical space comes from the TEST_DATA memory region. 1495 + * Allocates at least sz bytes within the virtual address space of the VM 1496 + * given by @vm. The allocated bytes are mapped to a virtual address >= the 1497 + * address given by @min_gva. Note that each allocation uses a a unique set 1498 + * of pages, with the minimum real allocation being at least a page. The 1499 + * allocated physical space comes from the TEST_DATA memory region. 1499 1500 */ 1500 - vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min) 1501 + gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva) 1501 1502 { 1502 - return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA); 1503 + return __vm_alloc(vm, sz, min_gva, MEM_REGION_TEST_DATA); 1504 + } 1505 + 1506 + gva_t vm_alloc_pages(struct kvm_vm *vm, int nr_pages) 1507 + { 1508 + return vm_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR); 1509 + } 1510 + 1511 + gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type) 1512 + { 1513 + return __vm_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type); 1514 + } 1515 + 1516 + gva_t vm_alloc_page(struct kvm_vm *vm) 1517 + { 1518 + return vm_alloc_pages(vm, 1); 1503 1519 } 1504 1520 1505 1521 /* 1506 - * VM Virtual Address Allocate Pages 1522 + * Map a range of VM virtual address to the VM's physical address. 1507 1523 * 1508 - * Input Args: 1509 - * vm - Virtual Machine 1510 - * 1511 - * Output Args: None 1512 - * 1513 - * Return: 1514 - * Starting guest virtual address 1515 - * 1516 - * Allocates at least N system pages worth of bytes within the virtual address 1517 - * space of the vm. 1524 + * Within the VM given by @vm, creates a virtual translation for @npages 1525 + * starting at @gva to the page range starting at @gpa. 1518 1526 */ 1519 - vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages) 1520 - { 1521 - return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR); 1522 - } 1523 - 1524 - vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type) 1525 - { 1526 - return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type); 1527 - } 1528 - 1529 - /* 1530 - * VM Virtual Address Allocate Page 1531 - * 1532 - * Input Args: 1533 - * vm - Virtual Machine 1534 - * 1535 - * Output Args: None 1536 - * 1537 - * Return: 1538 - * Starting guest virtual address 1539 - * 1540 - * Allocates at least one system page worth of bytes within the virtual address 1541 - * space of the vm. 1542 - */ 1543 - vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm) 1544 - { 1545 - return vm_vaddr_alloc_pages(vm, 1); 1546 - } 1547 - 1548 - /* 1549 - * Map a range of VM virtual address to the VM's physical address 1550 - * 1551 - * Input Args: 1552 - * vm - Virtual Machine 1553 - * vaddr - Virtuall address to map 1554 - * paddr - VM Physical Address 1555 - * npages - The number of pages to map 1556 - * 1557 - * Output Args: None 1558 - * 1559 - * Return: None 1560 - * 1561 - * Within the VM given by @vm, creates a virtual translation for 1562 - * @npages starting at @vaddr to the page range starting at @paddr. 1563 - */ 1564 - void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 1565 - unsigned int npages) 1527 + void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, unsigned int npages) 1566 1528 { 1567 1529 size_t page_size = vm->page_size; 1568 1530 size_t size = npages * page_size; 1569 1531 1570 - TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow"); 1571 - TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); 1532 + TEST_ASSERT(gva + size > gva, "Vaddr overflow"); 1533 + TEST_ASSERT(gpa + size > gpa, "Paddr overflow"); 1572 1534 1573 1535 while (npages--) { 1574 - virt_pg_map(vm, vaddr, paddr); 1536 + virt_pg_map(vm, gva, gpa); 1575 1537 1576 - vaddr += page_size; 1577 - paddr += page_size; 1538 + gva += page_size; 1539 + gpa += page_size; 1578 1540 } 1579 1541 } 1580 1542 ··· 1541 1611 * address providing the memory to the vm physical address is returned. 1542 1612 * A TEST_ASSERT failure occurs if no region containing gpa exists. 1543 1613 */ 1544 - void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) 1614 + void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa) 1545 1615 { 1546 1616 struct userspace_mem_region *region; 1547 1617 ··· 1574 1644 * VM physical address is returned. A TEST_ASSERT failure occurs if no 1575 1645 * region containing hva exists. 1576 1646 */ 1577 - vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) 1647 + gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva) 1578 1648 { 1579 1649 struct rb_node *node; 1580 1650 ··· 1585 1655 if (hva >= region->host_mem) { 1586 1656 if (hva <= (region->host_mem 1587 1657 + region->region.memory_size - 1)) 1588 - return (vm_paddr_t)((uintptr_t) 1658 + return (gpa_t)((uintptr_t) 1589 1659 region->region.guest_phys_addr 1590 1660 + (hva - (uintptr_t)region->host_mem)); 1591 1661 ··· 1617 1687 * memory without mapping said memory in the guest's address space. And, for 1618 1688 * userfaultfd-based demand paging, to do so without triggering userfaults. 1619 1689 */ 1620 - void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa) 1690 + void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa) 1621 1691 { 1622 1692 struct userspace_mem_region *region; 1623 1693 uintptr_t offset; ··· 1711 1781 1712 1782 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu) 1713 1783 { 1714 - uint32_t page_size = getpagesize(); 1715 - uint32_t size = vcpu->vm->dirty_ring_size; 1784 + u32 page_size = getpagesize(); 1785 + u32 size = vcpu->vm->dirty_ring_size; 1716 1786 1717 1787 TEST_ASSERT(size > 0, "Should enable dirty ring first"); 1718 1788 ··· 1741 1811 * Device Ioctl 1742 1812 */ 1743 1813 1744 - int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr) 1814 + int __kvm_has_device_attr(int dev_fd, u32 group, u64 attr) 1745 1815 { 1746 1816 struct kvm_device_attr attribute = { 1747 1817 .group = group, ··· 1752 1822 return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute); 1753 1823 } 1754 1824 1755 - int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type) 1825 + int __kvm_test_create_device(struct kvm_vm *vm, u64 type) 1756 1826 { 1757 1827 struct kvm_create_device create_dev = { 1758 1828 .type = type, ··· 1762 1832 return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); 1763 1833 } 1764 1834 1765 - int __kvm_create_device(struct kvm_vm *vm, uint64_t type) 1835 + int __kvm_create_device(struct kvm_vm *vm, u64 type) 1766 1836 { 1767 1837 struct kvm_create_device create_dev = { 1768 1838 .type = type, ··· 1776 1846 return err ? : create_dev.fd; 1777 1847 } 1778 1848 1779 - int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val) 1849 + int __kvm_device_attr_get(int dev_fd, u32 group, u64 attr, void *val) 1780 1850 { 1781 1851 struct kvm_device_attr kvmattr = { 1782 1852 .group = group, ··· 1788 1858 return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr); 1789 1859 } 1790 1860 1791 - int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val) 1861 + int __kvm_device_attr_set(int dev_fd, u32 group, u64 attr, void *val) 1792 1862 { 1793 1863 struct kvm_device_attr kvmattr = { 1794 1864 .group = group, ··· 1804 1874 * IRQ related functions. 1805 1875 */ 1806 1876 1807 - int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) 1877 + int _kvm_irq_line(struct kvm_vm *vm, u32 irq, int level) 1808 1878 { 1809 1879 struct kvm_irq_level irq_level = { 1810 1880 .irq = irq, ··· 1814 1884 return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level); 1815 1885 } 1816 1886 1817 - void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) 1887 + void kvm_irq_line(struct kvm_vm *vm, u32 irq, int level) 1818 1888 { 1819 1889 int ret = _kvm_irq_line(vm, irq, level); 1820 1890 ··· 1836 1906 } 1837 1907 1838 1908 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, 1839 - uint32_t gsi, uint32_t pin) 1909 + u32 gsi, u32 pin) 1840 1910 { 1841 1911 int i; 1842 1912 ··· 1886 1956 * Dumps the current state of the VM given by vm, to the FILE stream 1887 1957 * given by stream. 1888 1958 */ 1889 - void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 1959 + void vm_dump(FILE *stream, struct kvm_vm *vm, u8 indent) 1890 1960 { 1891 1961 int ctr; 1892 1962 struct userspace_mem_region *region; ··· 1899 1969 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { 1900 1970 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx " 1901 1971 "host_virt: %p\n", indent + 2, "", 1902 - (uint64_t) region->region.guest_phys_addr, 1903 - (uint64_t) region->region.memory_size, 1972 + (u64)region->region.guest_phys_addr, 1973 + (u64)region->region.memory_size, 1904 1974 region->host_mem); 1905 1975 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, ""); 1906 1976 sparsebit_dump(stream, region->unused_phy_pages, 0); ··· 2007 2077 * Input Args: 2008 2078 * vm - Virtual Machine 2009 2079 * num - number of pages 2010 - * paddr_min - Physical address minimum 2080 + * min_gpa - Physical address minimum 2011 2081 * memslot - Memory region to allocate page from 2012 2082 * protected - True if the pages will be used as protected/private memory 2013 2083 * ··· 2017 2087 * Starting physical address 2018 2088 * 2019 2089 * Within the VM specified by vm, locates a range of available physical 2020 - * pages at or above paddr_min. If found, the pages are marked as in use 2090 + * pages at or above min_gpa. If found, the pages are marked as in use 2021 2091 * and their base address is returned. A TEST_ASSERT failure occurs if 2022 - * not enough pages are available at or above paddr_min. 2092 + * not enough pages are available at or above min_gpa. 2023 2093 */ 2024 - vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 2025 - vm_paddr_t paddr_min, uint32_t memslot, 2026 - bool protected) 2094 + gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 2095 + gpa_t min_gpa, u32 memslot, 2096 + bool protected) 2027 2097 { 2028 2098 struct userspace_mem_region *region; 2029 2099 sparsebit_idx_t pg, base; 2030 2100 2031 2101 TEST_ASSERT(num > 0, "Must allocate at least one page"); 2032 2102 2033 - TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " 2103 + TEST_ASSERT((min_gpa % vm->page_size) == 0, "Min physical address " 2034 2104 "not divisible by page size.\n" 2035 - " paddr_min: 0x%lx page_size: 0x%x", 2036 - paddr_min, vm->page_size); 2105 + " min_gpa: 0x%lx page_size: 0x%x", 2106 + min_gpa, vm->page_size); 2037 2107 2038 2108 region = memslot2region(vm, memslot); 2039 2109 TEST_ASSERT(!protected || region->protected_phy_pages, 2040 2110 "Region doesn't support protected memory"); 2041 2111 2042 - base = pg = paddr_min >> vm->page_shift; 2112 + base = pg = min_gpa >> vm->page_shift; 2043 2113 do { 2044 2114 for (; pg < base + num; ++pg) { 2045 2115 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { ··· 2051 2121 2052 2122 if (pg == 0) { 2053 2123 fprintf(stderr, "No guest physical page available, " 2054 - "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", 2055 - paddr_min, vm->page_size, memslot); 2124 + "min_gpa: 0x%lx page_size: 0x%x memslot: %u\n", 2125 + min_gpa, vm->page_size, memslot); 2056 2126 fputs("---- vm dump ----\n", stderr); 2057 2127 vm_dump(stderr, vm, 2); 2058 2128 abort(); ··· 2067 2137 return base * vm->page_size; 2068 2138 } 2069 2139 2070 - vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, 2071 - uint32_t memslot) 2140 + gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot) 2072 2141 { 2073 - return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); 2142 + return vm_phy_pages_alloc(vm, 1, min_gpa, memslot); 2074 2143 } 2075 2144 2076 - vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) 2145 + gpa_t vm_alloc_page_table(struct kvm_vm *vm) 2077 2146 { 2078 2147 return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 2079 2148 vm->memslots[MEM_REGION_PT]); ··· 2090 2161 * Return: 2091 2162 * Equivalent host virtual address 2092 2163 */ 2093 - void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) 2164 + void *addr_gva2hva(struct kvm_vm *vm, gva_t gva) 2094 2165 { 2095 2166 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); 2096 2167 } ··· 2188 2259 * Read the data values of a specified stat from the binary stats interface. 2189 2260 */ 2190 2261 void read_stat_data(int stats_fd, struct kvm_stats_header *header, 2191 - struct kvm_stats_desc *desc, uint64_t *data, 2262 + struct kvm_stats_desc *desc, u64 *data, 2192 2263 size_t max_elements) 2193 2264 { 2194 2265 size_t nr_elements = min_t(ssize_t, desc->size, max_elements); ··· 2209 2280 } 2210 2281 2211 2282 void kvm_get_stat(struct kvm_binary_stats *stats, const char *name, 2212 - uint64_t *data, size_t max_elements) 2283 + u64 *data, size_t max_elements) 2213 2284 { 2214 2285 struct kvm_stats_desc *desc; 2215 2286 size_t size_desc; ··· 2286 2357 kvm_selftest_arch_init(); 2287 2358 } 2288 2359 2289 - bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr) 2360 + bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa) 2290 2361 { 2291 2362 sparsebit_idx_t pg = 0; 2292 2363 struct userspace_mem_region *region; ··· 2294 2365 if (!vm_arch_has_protected_memory(vm)) 2295 2366 return false; 2296 2367 2297 - region = userspace_mem_region_find(vm, paddr, paddr); 2298 - TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr); 2368 + region = userspace_mem_region_find(vm, gpa, gpa); 2369 + TEST_ASSERT(region, "No vm physical memory at 0x%lx", gpa); 2299 2370 2300 - pg = paddr >> vm->page_shift; 2371 + pg = gpa >> vm->page_shift; 2301 2372 return sparsebit_is_set(region->protected_phy_pages, pg); 2302 2373 } 2303 2374
+55 -55
tools/testing/selftests/kvm/lib/loongarch/processor.c
··· 12 12 #define LOONGARCH_PAGE_TABLE_PHYS_MIN 0x200000 13 13 #define LOONGARCH_GUEST_STACK_VADDR_MIN 0x200000 14 14 15 - static vm_paddr_t invalid_pgtable[4]; 16 - static vm_vaddr_t exception_handlers; 15 + static gpa_t invalid_pgtable[4]; 16 + static gva_t exception_handlers; 17 17 18 - static uint64_t virt_pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) 18 + static u64 virt_pte_index(struct kvm_vm *vm, gva_t gva, int level) 19 19 { 20 20 unsigned int shift; 21 - uint64_t mask; 21 + u64 mask; 22 22 23 23 shift = level * (vm->page_shift - 3) + vm->page_shift; 24 24 mask = (1UL << (vm->page_shift - 3)) - 1; 25 25 return (gva >> shift) & mask; 26 26 } 27 27 28 - static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) 28 + static u64 pte_addr(struct kvm_vm *vm, u64 entry) 29 29 { 30 30 return entry & ~((0x1UL << vm->page_shift) - 1); 31 31 } 32 32 33 - static uint64_t ptrs_per_pte(struct kvm_vm *vm) 33 + static u64 ptrs_per_pte(struct kvm_vm *vm) 34 34 { 35 35 return 1 << (vm->page_shift - 3); 36 36 } 37 37 38 - static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t child) 38 + static void virt_set_pgtable(struct kvm_vm *vm, gpa_t table, gpa_t child) 39 39 { 40 - uint64_t *ptep; 40 + u64 *ptep; 41 41 int i, ptrs_per_pte; 42 42 43 43 ptep = addr_gpa2hva(vm, table); ··· 49 49 void virt_arch_pgd_alloc(struct kvm_vm *vm) 50 50 { 51 51 int i; 52 - vm_paddr_t child, table; 52 + gpa_t child, table; 53 53 54 54 if (vm->mmu.pgd_created) 55 55 return; ··· 67 67 vm->mmu.pgd_created = true; 68 68 } 69 69 70 - static int virt_pte_none(uint64_t *ptep, int level) 70 + static int virt_pte_none(u64 *ptep, int level) 71 71 { 72 72 return *ptep == invalid_pgtable[level]; 73 73 } 74 74 75 - static uint64_t *virt_populate_pte(struct kvm_vm *vm, vm_vaddr_t gva, int alloc) 75 + static u64 *virt_populate_pte(struct kvm_vm *vm, gva_t gva, int alloc) 76 76 { 77 77 int level; 78 - uint64_t *ptep; 79 - vm_paddr_t child; 78 + u64 *ptep; 79 + gpa_t child; 80 80 81 81 if (!vm->mmu.pgd_created) 82 82 goto unmapped_gva; ··· 106 106 exit(EXIT_FAILURE); 107 107 } 108 108 109 - vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) 109 + gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 110 110 { 111 - uint64_t *ptep; 111 + u64 *ptep; 112 112 113 113 ptep = virt_populate_pte(vm, gva, 0); 114 - TEST_ASSERT(*ptep != 0, "Virtual address vaddr: 0x%lx not mapped\n", gva); 114 + TEST_ASSERT(*ptep != 0, "Virtual address gva: 0x%lx not mapped\n", gva); 115 115 116 116 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); 117 117 } 118 118 119 - void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) 119 + void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) 120 120 { 121 - uint32_t prot_bits; 122 - uint64_t *ptep; 121 + u32 prot_bits; 122 + u64 *ptep; 123 123 124 - TEST_ASSERT((vaddr % vm->page_size) == 0, 124 + TEST_ASSERT((gva % vm->page_size) == 0, 125 125 "Virtual address not on page boundary,\n" 126 - "vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); 127 - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, 128 - (vaddr >> vm->page_shift)), 129 - "Invalid virtual address, vaddr: 0x%lx", vaddr); 130 - TEST_ASSERT((paddr % vm->page_size) == 0, 126 + "gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size); 127 + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), 128 + "Invalid virtual address, gva: 0x%lx", gva); 129 + TEST_ASSERT((gpa % vm->page_size) == 0, 131 130 "Physical address not on page boundary,\n" 132 - "paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); 133 - TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, 131 + "gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size); 132 + TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, 134 133 "Physical address beyond maximum supported,\n" 135 - "paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 136 - paddr, vm->max_gfn, vm->page_size); 134 + "gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 135 + gpa, vm->max_gfn, vm->page_size); 137 136 138 - ptep = virt_populate_pte(vm, vaddr, 1); 137 + ptep = virt_populate_pte(vm, gva, 1); 139 138 prot_bits = _PAGE_PRESENT | __READABLE | __WRITEABLE | _CACHE_CC | _PAGE_USER; 140 - WRITE_ONCE(*ptep, paddr | prot_bits); 139 + WRITE_ONCE(*ptep, gpa | prot_bits); 141 140 } 142 141 143 - static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level) 142 + static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, u64 page, int level) 144 143 { 145 - uint64_t pte, *ptep; 144 + u64 pte, *ptep; 146 145 static const char * const type[] = { "pte", "pmd", "pud", "pgd"}; 147 146 148 147 if (level < 0) ··· 157 158 } 158 159 } 159 160 160 - void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 161 + void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent) 161 162 { 162 163 int level; 163 164 ··· 168 169 pte_dump(stream, vm, indent, vm->mmu.pgd, level); 169 170 } 170 171 171 - void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) 172 + void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent) 172 173 { 173 174 } 174 175 ··· 205 206 { 206 207 void *addr; 207 208 208 - vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers), 209 - LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA); 209 + vm->handlers = __vm_alloc(vm, sizeof(struct handlers), 210 + LOONGARCH_GUEST_STACK_VADDR_MIN, 211 + MEM_REGION_DATA); 210 212 211 213 addr = addr_gva2hva(vm, vm->handlers); 212 214 memset(addr, 0, vm->page_size); ··· 223 223 handlers->exception_handlers[vector] = handler; 224 224 } 225 225 226 - uint32_t guest_get_vcpuid(void) 226 + u32 guest_get_vcpuid(void) 227 227 { 228 228 return csr_read(LOONGARCH_CSR_CPUID); 229 229 } ··· 241 241 242 242 va_start(ap, num); 243 243 for (i = 0; i < num; i++) 244 - regs.gpr[i + 4] = va_arg(ap, uint64_t); 244 + regs.gpr[i + 4] = va_arg(ap, u64); 245 245 va_end(ap); 246 246 247 247 vcpu_regs_set(vcpu, &regs); 248 248 } 249 249 250 - static void loongarch_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) 250 + static void loongarch_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val) 251 251 { 252 252 __vcpu_set_reg(vcpu, id, val); 253 253 } 254 254 255 - static void loongarch_set_cpucfg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) 255 + static void loongarch_set_cpucfg(struct kvm_vcpu *vcpu, u64 id, u64 val) 256 256 { 257 - uint64_t cfgid; 257 + u64 cfgid; 258 258 259 259 cfgid = KVM_REG_LOONGARCH_CPUCFG | KVM_REG_SIZE_U64 | 8 * id; 260 260 __vcpu_set_reg(vcpu, cfgid, val); 261 261 } 262 262 263 - static void loongarch_get_csr(struct kvm_vcpu *vcpu, uint64_t id, void *addr) 263 + static void loongarch_get_csr(struct kvm_vcpu *vcpu, u64 id, void *addr) 264 264 { 265 - uint64_t csrid; 265 + u64 csrid; 266 266 267 267 csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id; 268 268 __vcpu_get_reg(vcpu, csrid, addr); 269 269 } 270 270 271 - static void loongarch_set_csr(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) 271 + static void loongarch_set_csr(struct kvm_vcpu *vcpu, u64 id, u64 val) 272 272 { 273 - uint64_t csrid; 273 + u64 csrid; 274 274 275 275 csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id; 276 276 __vcpu_set_reg(vcpu, csrid, val); ··· 354 354 loongarch_set_csr(vcpu, LOONGARCH_CSR_STLBPGSIZE, PS_DEFAULT_SIZE); 355 355 356 356 /* LOONGARCH_CSR_KS1 is used for exception stack */ 357 - val = __vm_vaddr_alloc(vm, vm->page_size, 358 - LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA); 357 + val = __vm_alloc(vm, vm->page_size, LOONGARCH_GUEST_STACK_VADDR_MIN, 358 + MEM_REGION_DATA); 359 359 TEST_ASSERT(val != 0, "No memory for exception stack"); 360 360 val = val + vm->page_size; 361 361 loongarch_set_csr(vcpu, LOONGARCH_CSR_KS1, val); ··· 369 369 loongarch_set_csr(vcpu, LOONGARCH_CSR_TMID, vcpu->id); 370 370 } 371 371 372 - struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) 372 + struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) 373 373 { 374 374 size_t stack_size; 375 - uint64_t stack_vaddr; 375 + u64 stack_gva; 376 376 struct kvm_regs regs; 377 377 struct kvm_vcpu *vcpu; 378 378 379 379 vcpu = __vm_vcpu_add(vm, vcpu_id); 380 380 stack_size = vm->page_size; 381 - stack_vaddr = __vm_vaddr_alloc(vm, stack_size, 382 - LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA); 383 - TEST_ASSERT(stack_vaddr != 0, "No memory for vm stack"); 381 + stack_gva = __vm_alloc(vm, stack_size, 382 + LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA); 383 + TEST_ASSERT(stack_gva != 0, "No memory for vm stack"); 384 384 385 385 loongarch_vcpu_setup(vcpu); 386 386 /* Setup guest general purpose registers */ 387 387 vcpu_regs_get(vcpu, &regs); 388 - regs.gpr[3] = stack_vaddr + stack_size; 388 + regs.gpr[3] = stack_gva + stack_size; 389 389 vcpu_regs_set(vcpu, &regs); 390 390 391 391 return vcpu; ··· 397 397 398 398 /* Setup guest PC register */ 399 399 vcpu_regs_get(vcpu, &regs); 400 - regs.pc = (uint64_t)guest_code; 400 + regs.pc = (u64)guest_code; 401 401 vcpu_regs_set(vcpu, &regs); 402 402 }
+6 -6
tools/testing/selftests/kvm/lib/loongarch/ucall.c
··· 9 9 * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each 10 10 * VM), it must not be accessed from host code. 11 11 */ 12 - vm_vaddr_t *ucall_exit_mmio_addr; 12 + gva_t *ucall_exit_mmio_addr; 13 13 14 - void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 14 + void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) 15 15 { 16 - vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); 16 + gva_t mmio_gva = vm_unused_gva_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); 17 17 18 18 virt_map(vm, mmio_gva, mmio_gpa, 1); 19 19 20 20 vm->ucall_mmio_addr = mmio_gpa; 21 21 22 - write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva); 22 + write_guest_global(vm, ucall_exit_mmio_addr, (gva_t *)mmio_gva); 23 23 } 24 24 25 25 void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) ··· 28 28 29 29 if (run->exit_reason == KVM_EXIT_MMIO && 30 30 run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) { 31 - TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t), 31 + TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(u64), 32 32 "Unexpected ucall exit mmio address access"); 33 33 34 - return (void *)(*((uint64_t *)run->mmio.data)); 34 + return (void *)(*((u64 *)run->mmio.data)); 35 35 } 36 36 37 37 return NULL;
+19 -19
tools/testing/selftests/kvm/lib/memstress.c
··· 16 16 * Guest virtual memory offset of the testing memory slot. 17 17 * Must not conflict with identity mapped test code. 18 18 */ 19 - static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; 19 + static u64 guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; 20 20 21 21 struct vcpu_thread { 22 22 /* The index of the vCPU. */ ··· 44 44 * Continuously write to the first 8 bytes of each page in the 45 45 * specified region. 46 46 */ 47 - void memstress_guest_code(uint32_t vcpu_idx) 47 + void memstress_guest_code(u32 vcpu_idx) 48 48 { 49 49 struct memstress_args *args = &memstress_args; 50 50 struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx]; 51 51 struct guest_random_state rand_state; 52 - uint64_t gva; 53 - uint64_t pages; 54 - uint64_t addr; 55 - uint64_t page; 52 + gva_t gva; 53 + u64 pages; 54 + u64 addr; 55 + u64 page; 56 56 int i; 57 57 58 58 rand_state = new_guest_random_state(guest_random_seed + vcpu_idx); ··· 76 76 addr = gva + (page * args->guest_page_size); 77 77 78 78 if (__guest_random_bool(&rand_state, args->write_percent)) 79 - *(uint64_t *)addr = 0x0123456789ABCDEF; 79 + *(u64 *)addr = 0x0123456789ABCDEF; 80 80 else 81 - READ_ONCE(*(uint64_t *)addr); 81 + READ_ONCE(*(u64 *)addr); 82 82 } 83 83 84 84 GUEST_SYNC(1); ··· 87 87 88 88 void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus, 89 89 struct kvm_vcpu *vcpus[], 90 - uint64_t vcpu_memory_bytes, 90 + u64 vcpu_memory_bytes, 91 91 bool partition_vcpu_memory_access) 92 92 { 93 93 struct memstress_args *args = &memstress_args; ··· 122 122 } 123 123 124 124 struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus, 125 - uint64_t vcpu_memory_bytes, int slots, 125 + u64 vcpu_memory_bytes, int slots, 126 126 enum vm_mem_backing_src_type backing_src, 127 127 bool partition_vcpu_memory_access) 128 128 { 129 129 struct memstress_args *args = &memstress_args; 130 130 struct kvm_vm *vm; 131 - uint64_t guest_num_pages, slot0_pages = 0; 132 - uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src); 133 - uint64_t region_end_gfn; 131 + u64 guest_num_pages, slot0_pages = 0; 132 + u64 backing_src_pagesz = get_backing_src_pagesz(backing_src); 133 + u64 region_end_gfn; 134 134 int i; 135 135 136 136 pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode)); ··· 202 202 203 203 /* Add extra memory slots for testing */ 204 204 for (i = 0; i < slots; i++) { 205 - uint64_t region_pages = guest_num_pages / slots; 206 - vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i; 205 + u64 region_pages = guest_num_pages / slots; 206 + gpa_t region_start = args->gpa + region_pages * args->guest_page_size * i; 207 207 208 208 vm_userspace_mem_region_add(vm, backing_src, region_start, 209 209 MEMSTRESS_MEM_SLOT_INDEX + i, ··· 232 232 kvm_vm_free(vm); 233 233 } 234 234 235 - void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent) 235 + void memstress_set_write_percent(struct kvm_vm *vm, u32 write_percent) 236 236 { 237 237 memstress_args.write_percent = write_percent; 238 238 sync_global_to_guest(vm, memstress_args.write_percent); ··· 244 244 sync_global_to_guest(vm, memstress_args.random_access); 245 245 } 246 246 247 - uint64_t __weak memstress_nested_pages(int nr_vcpus) 247 + u64 __weak memstress_nested_pages(int nr_vcpus) 248 248 { 249 249 return 0; 250 250 } ··· 349 349 } 350 350 351 351 void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], 352 - int slots, uint64_t pages_per_slot) 352 + int slots, u64 pages_per_slot) 353 353 { 354 354 int i; 355 355 ··· 360 360 } 361 361 } 362 362 363 - unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot) 363 + unsigned long **memstress_alloc_bitmaps(int slots, u64 pages_per_slot) 364 364 { 365 365 unsigned long **bitmaps; 366 366 int i;
+45 -46
tools/testing/selftests/kvm/lib/riscv/processor.c
··· 15 15 16 16 #define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN 0xac0000 17 17 18 - static vm_vaddr_t exception_handlers; 18 + static gva_t exception_handlers; 19 19 20 - bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext) 20 + bool __vcpu_has_ext(struct kvm_vcpu *vcpu, u64 ext) 21 21 { 22 22 unsigned long value = 0; 23 23 int ret; ··· 27 27 return !ret && !!value; 28 28 } 29 29 30 - static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) 30 + static u64 pte_addr(struct kvm_vm *vm, u64 entry) 31 31 { 32 32 return ((entry & PGTBL_PTE_ADDR_MASK) >> PGTBL_PTE_ADDR_SHIFT) << 33 33 PGTBL_PAGE_SIZE_SHIFT; 34 34 } 35 35 36 - static uint64_t ptrs_per_pte(struct kvm_vm *vm) 36 + static u64 ptrs_per_pte(struct kvm_vm *vm) 37 37 { 38 - return PGTBL_PAGE_SIZE / sizeof(uint64_t); 38 + return PGTBL_PAGE_SIZE / sizeof(u64); 39 39 } 40 40 41 - static uint64_t pte_index_mask[] = { 41 + static u64 pte_index_mask[] = { 42 42 PGTBL_L0_INDEX_MASK, 43 43 PGTBL_L1_INDEX_MASK, 44 44 PGTBL_L2_INDEX_MASK, 45 45 PGTBL_L3_INDEX_MASK, 46 46 }; 47 47 48 - static uint32_t pte_index_shift[] = { 48 + static u32 pte_index_shift[] = { 49 49 PGTBL_L0_INDEX_SHIFT, 50 50 PGTBL_L1_INDEX_SHIFT, 51 51 PGTBL_L2_INDEX_SHIFT, 52 52 PGTBL_L3_INDEX_SHIFT, 53 53 }; 54 54 55 - static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) 55 + static u64 pte_index(struct kvm_vm *vm, gva_t gva, int level) 56 56 { 57 57 TEST_ASSERT(level > -1, 58 58 "Negative page table level (%d) not possible", level); ··· 75 75 vm->mmu.pgd_created = true; 76 76 } 77 77 78 - void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) 78 + void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) 79 79 { 80 - uint64_t *ptep, next_ppn; 80 + u64 *ptep, next_ppn; 81 81 int level = vm->mmu.pgtable_levels - 1; 82 82 83 - TEST_ASSERT((vaddr % vm->page_size) == 0, 83 + TEST_ASSERT((gva % vm->page_size) == 0, 84 84 "Virtual address not on page boundary,\n" 85 - " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); 86 - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, 87 - (vaddr >> vm->page_shift)), 88 - "Invalid virtual address, vaddr: 0x%lx", vaddr); 89 - TEST_ASSERT((paddr % vm->page_size) == 0, 85 + " gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size); 86 + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), 87 + "Invalid virtual address, gva: 0x%lx", gva); 88 + TEST_ASSERT((gpa % vm->page_size) == 0, 90 89 "Physical address not on page boundary,\n" 91 - " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); 92 - TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, 90 + " gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size); 91 + TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, 93 92 "Physical address beyond maximum supported,\n" 94 - " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 95 - paddr, vm->max_gfn, vm->page_size); 93 + " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 94 + gpa, vm->max_gfn, vm->page_size); 96 95 97 - ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, vaddr, level) * 8; 96 + ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, gva, level) * 8; 98 97 if (!*ptep) { 99 98 next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT; 100 99 *ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) | ··· 103 104 104 105 while (level > -1) { 105 106 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + 106 - pte_index(vm, vaddr, level) * 8; 107 + pte_index(vm, gva, level) * 8; 107 108 if (!*ptep && level > 0) { 108 109 next_ppn = vm_alloc_page_table(vm) >> 109 110 PGTBL_PAGE_SIZE_SHIFT; ··· 113 114 level--; 114 115 } 115 116 116 - paddr = paddr >> PGTBL_PAGE_SIZE_SHIFT; 117 - *ptep = (paddr << PGTBL_PTE_ADDR_SHIFT) | 117 + gpa = gpa >> PGTBL_PAGE_SIZE_SHIFT; 118 + *ptep = (gpa << PGTBL_PTE_ADDR_SHIFT) | 118 119 PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK; 119 120 } 120 121 121 - vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) 122 + gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 122 123 { 123 - uint64_t *ptep; 124 + u64 *ptep; 124 125 int level = vm->mmu.pgtable_levels - 1; 125 126 126 127 if (!vm->mmu.pgd_created) ··· 147 148 exit(1); 148 149 } 149 150 150 - static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, 151 - uint64_t page, int level) 151 + static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, 152 + u64 page, int level) 152 153 { 153 154 #ifdef DEBUG 154 155 static const char *const type[] = { "pte", "pmd", "pud", "p4d"}; 155 - uint64_t pte, *ptep; 156 + u64 pte, *ptep; 156 157 157 158 if (level < 0) 158 159 return; ··· 169 170 #endif 170 171 } 171 172 172 - void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 173 + void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent) 173 174 { 174 175 struct kvm_mmu *mmu = &vm->mmu; 175 176 int level = mmu->pgtable_levels - 1; 176 - uint64_t pgd, *ptep; 177 + u64 pgd, *ptep; 177 178 178 179 if (!mmu->pgd_created) 179 180 return; ··· 232 233 vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(satp), satp); 233 234 } 234 235 235 - void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) 236 + void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent) 236 237 { 237 238 struct kvm_riscv_core core; 238 239 ··· 310 311 vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code); 311 312 } 312 313 313 - struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) 314 + struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) 314 315 { 315 316 int r; 316 317 size_t stack_size; 317 - unsigned long stack_vaddr; 318 + unsigned long stack_gva; 318 319 unsigned long current_gp = 0; 319 320 struct kvm_mp_state mps; 320 321 struct kvm_vcpu *vcpu; 321 322 322 323 stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size : 323 324 vm->page_size; 324 - stack_vaddr = __vm_vaddr_alloc(vm, stack_size, 325 - DEFAULT_RISCV_GUEST_STACK_VADDR_MIN, 326 - MEM_REGION_DATA); 325 + stack_gva = __vm_alloc(vm, stack_size, 326 + DEFAULT_RISCV_GUEST_STACK_VADDR_MIN, 327 + MEM_REGION_DATA); 327 328 328 329 vcpu = __vm_vcpu_add(vm, vcpu_id); 329 330 riscv_vcpu_mmu_setup(vcpu); ··· 343 344 vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.gp), current_gp); 344 345 345 346 /* Setup stack pointer and program counter of guest */ 346 - vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size); 347 + vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_gva + stack_size); 347 348 348 349 /* Setup sscratch for guest_get_vcpuid() */ 349 350 vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(sscratch), vcpu_id); ··· 357 358 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) 358 359 { 359 360 va_list ap; 360 - uint64_t id = RISCV_CORE_REG(regs.a0); 361 + u64 id = RISCV_CORE_REG(regs.a0); 361 362 int i; 362 363 363 364 TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n" ··· 392 393 id = RISCV_CORE_REG(regs.a7); 393 394 break; 394 395 } 395 - vcpu_set_reg(vcpu, id, va_arg(ap, uint64_t)); 396 + vcpu_set_reg(vcpu, id, va_arg(ap, u64)); 396 397 } 397 398 398 399 va_end(ap); ··· 448 449 449 450 void vm_init_vector_tables(struct kvm_vm *vm) 450 451 { 451 - vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers), 452 - vm->page_size, MEM_REGION_DATA); 452 + vm->handlers = __vm_alloc(vm, sizeof(struct handlers), vm->page_size, 453 + MEM_REGION_DATA); 453 454 454 - *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; 455 + *(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers; 455 456 } 456 457 457 458 void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler) ··· 469 470 handlers->exception_handlers[1][0] = handler; 470 471 } 471 472 472 - uint32_t guest_get_vcpuid(void) 473 + u32 guest_get_vcpuid(void) 473 474 { 474 475 return csr_read(CSR_SSCRATCH); 475 476 } ··· 543 544 unsigned long riscv64_get_satp_mode(void) 544 545 { 545 546 int kvm_fd, vm_fd, vcpu_fd, err; 546 - uint64_t val; 547 + u64 val; 547 548 struct kvm_one_reg reg = { 548 549 .id = RISCV_CONFIG_REG(satp_mode), 549 - .addr = (uint64_t)&val, 550 + .addr = (u64)&val, 550 551 }; 551 552 552 553 kvm_fd = open_kvm_dev_path_or_exit();
+6 -6
tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c
··· 13 13 14 14 static void guest_code(void) 15 15 { 16 - uint64_t diag318_info = 0x12345678; 16 + u64 diag318_info = 0x12345678; 17 17 18 18 asm volatile ("diag %0,0,0x318\n" : : "d" (diag318_info)); 19 19 } ··· 23 23 * we create an ad-hoc VM here to handle the instruction then extract the 24 24 * necessary data. It is up to the caller to decide what to do with that data. 25 25 */ 26 - static uint64_t diag318_handler(void) 26 + static u64 diag318_handler(void) 27 27 { 28 28 struct kvm_vcpu *vcpu; 29 29 struct kvm_vm *vm; 30 30 struct kvm_run *run; 31 - uint64_t reg; 32 - uint64_t diag318_info; 31 + u64 reg; 32 + u64 diag318_info; 33 33 34 34 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 35 35 vcpu_run(vcpu); ··· 51 51 return diag318_info; 52 52 } 53 53 54 - uint64_t get_diag318_info(void) 54 + u64 get_diag318_info(void) 55 55 { 56 - static uint64_t diag318_info; 56 + static u64 diag318_info; 57 57 static bool printed_skip; 58 58 59 59 /*
+1 -1
tools/testing/selftests/kvm/lib/s390/facility.c
··· 10 10 11 11 #include "facility.h" 12 12 13 - uint64_t stfl_doublewords[NB_STFL_DOUBLEWORDS]; 13 + u64 stfl_doublewords[NB_STFL_DOUBLEWORDS]; 14 14 bool stfle_flag;
+31 -34
tools/testing/selftests/kvm/lib/s390/processor.c
··· 12 12 13 13 void virt_arch_pgd_alloc(struct kvm_vm *vm) 14 14 { 15 - vm_paddr_t paddr; 15 + gpa_t gpa; 16 16 17 17 TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x", 18 18 vm->page_size); ··· 20 20 if (vm->mmu.pgd_created) 21 21 return; 22 22 23 - paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION, 23 + gpa = vm_phy_pages_alloc(vm, PAGES_PER_REGION, 24 24 KVM_GUEST_PAGE_TABLE_MIN_PADDR, 25 25 vm->memslots[MEM_REGION_PT]); 26 - memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); 26 + memset(addr_gpa2hva(vm, gpa), 0xff, PAGES_PER_REGION * vm->page_size); 27 27 28 - vm->mmu.pgd = paddr; 28 + vm->mmu.pgd = gpa; 29 29 vm->mmu.pgd_created = true; 30 30 } 31 31 ··· 34 34 * a page table (ri == 4). Returns a suitable region/segment table entry 35 35 * which points to the freshly allocated pages. 36 36 */ 37 - static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri) 37 + static u64 virt_alloc_region(struct kvm_vm *vm, int ri) 38 38 { 39 - uint64_t taddr; 39 + u64 taddr; 40 40 41 41 taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1, 42 42 KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); ··· 47 47 | ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH); 48 48 } 49 49 50 - void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa) 50 + void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) 51 51 { 52 52 int ri, idx; 53 - uint64_t *entry; 53 + u64 *entry; 54 54 55 55 TEST_ASSERT((gva % vm->page_size) == 0, 56 - "Virtual address not on page boundary,\n" 57 - " vaddr: 0x%lx vm->page_size: 0x%x", 58 - gva, vm->page_size); 59 - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, 60 - (gva >> vm->page_shift)), 61 - "Invalid virtual address, vaddr: 0x%lx", 62 - gva); 56 + "Virtual address not on page boundary,\n" 57 + " gva: 0x%lx vm->page_size: 0x%x", 58 + gva, vm->page_size); 59 + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), 60 + "Invalid virtual address, gva: 0x%lx", gva); 63 61 TEST_ASSERT((gpa % vm->page_size) == 0, 64 62 "Physical address not on page boundary,\n" 65 - " paddr: 0x%lx vm->page_size: 0x%x", 63 + " gpa: 0x%lx vm->page_size: 0x%x", 66 64 gva, vm->page_size); 67 65 TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, 68 66 "Physical address beyond beyond maximum supported,\n" 69 - " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 67 + " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 70 68 gva, vm->max_gfn, vm->page_size); 71 69 72 70 /* Walk through region and segment tables */ ··· 84 86 entry[idx] = gpa; 85 87 } 86 88 87 - vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) 89 + gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 88 90 { 89 91 int ri, idx; 90 - uint64_t *entry; 92 + u64 *entry; 91 93 92 94 TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x", 93 95 vm->page_size); ··· 109 111 return (entry[idx] & ~0xffful) + (gva & 0xffful); 110 112 } 111 113 112 - static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent, 113 - uint64_t ptea_start) 114 + static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, u8 indent, 115 + u64 ptea_start) 114 116 { 115 - uint64_t *pte, ptea; 117 + u64 *pte, ptea; 116 118 117 119 for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) { 118 120 pte = addr_gpa2hva(vm, ptea); ··· 123 125 } 124 126 } 125 127 126 - static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent, 127 - uint64_t reg_tab_addr) 128 + static void virt_dump_region(FILE *stream, struct kvm_vm *vm, u8 indent, 129 + u64 reg_tab_addr) 128 130 { 129 - uint64_t addr, *entry; 131 + u64 addr, *entry; 130 132 131 133 for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) { 132 134 entry = addr_gpa2hva(vm, addr); ··· 145 147 } 146 148 } 147 149 148 - void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 150 + void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent) 149 151 { 150 152 if (!vm->mmu.pgd_created) 151 153 return; ··· 158 160 vcpu->run->psw_addr = (uintptr_t)guest_code; 159 161 } 160 162 161 - struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) 163 + struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) 162 164 { 163 165 size_t stack_size = DEFAULT_STACK_PGS * getpagesize(); 164 - uint64_t stack_vaddr; 166 + u64 stack_gva; 165 167 struct kvm_regs regs; 166 168 struct kvm_sregs sregs; 167 169 struct kvm_vcpu *vcpu; ··· 169 171 TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x", 170 172 vm->page_size); 171 173 172 - stack_vaddr = __vm_vaddr_alloc(vm, stack_size, 173 - DEFAULT_GUEST_STACK_VADDR_MIN, 174 - MEM_REGION_DATA); 174 + stack_gva = __vm_alloc(vm, stack_size, DEFAULT_GUEST_STACK_VADDR_MIN, 175 + MEM_REGION_DATA); 175 176 176 177 vcpu = __vm_vcpu_add(vm, vcpu_id); 177 178 178 179 /* Setup guest registers */ 179 180 vcpu_regs_get(vcpu, &regs); 180 - regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160; 181 + regs.gprs[15] = stack_gva + (DEFAULT_STACK_PGS * getpagesize()) - 160; 181 182 vcpu_regs_set(vcpu, &regs); 182 183 183 184 vcpu_sregs_get(vcpu, &sregs); ··· 203 206 vcpu_regs_get(vcpu, &regs); 204 207 205 208 for (i = 0; i < num; i++) 206 - regs.gprs[i + 2] = va_arg(ap, uint64_t); 209 + regs.gprs[i + 2] = va_arg(ap, u64); 207 210 208 211 vcpu_regs_set(vcpu, &regs); 209 212 va_end(ap); 210 213 } 211 214 212 - void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) 215 + void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent) 213 216 { 214 217 fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n", 215 218 indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr);
+9 -9
tools/testing/selftests/kvm/lib/sparsebit.c
··· 76 76 * the use of a binary-search tree, where each node contains at least 77 77 * the following members: 78 78 * 79 - * typedef uint64_t sparsebit_idx_t; 80 - * typedef uint64_t sparsebit_num_t; 79 + * typedef u64 sparsebit_idx_t; 80 + * typedef u64 sparsebit_num_t; 81 81 * 82 82 * sparsebit_idx_t idx; 83 - * uint32_t mask; 83 + * u32 mask; 84 84 * sparsebit_num_t num_after; 85 85 * 86 86 * The idx member contains the bit index of the first bit described by this ··· 162 162 163 163 #define DUMP_LINE_MAX 100 /* Does not include indent amount */ 164 164 165 - typedef uint32_t mask_t; 165 + typedef u32 mask_t; 166 166 #define MASK_BITS (sizeof(mask_t) * CHAR_BIT) 167 167 168 168 struct node { ··· 2056 2056 return ch; 2057 2057 } 2058 2058 2059 - uint64_t get64(void) 2059 + u64 get64(void) 2060 2060 { 2061 - uint64_t x; 2061 + u64 x; 2062 2062 2063 2063 x = get8(); 2064 2064 x = (x << 8) | get8(); ··· 2074 2074 { 2075 2075 s = sparsebit_alloc(); 2076 2076 for (;;) { 2077 - uint8_t op = get8() & 0xf; 2078 - uint64_t first = get64(); 2079 - uint64_t last = get64(); 2077 + u8 op = get8() & 0xf; 2078 + u64 first = get64(); 2079 + u64 last = get64(); 2080 2080 2081 2081 operate(op, first, last); 2082 2082 }
+15 -15
tools/testing/selftests/kvm/lib/test_util.c
··· 30 30 * Park-Miller LCG using standard constants. 31 31 */ 32 32 33 - struct guest_random_state new_guest_random_state(uint32_t seed) 33 + struct guest_random_state new_guest_random_state(u32 seed) 34 34 { 35 35 struct guest_random_state s = {.seed = seed}; 36 36 return s; 37 37 } 38 38 39 - uint32_t guest_random_u32(struct guest_random_state *state) 39 + u32 guest_random_u32(struct guest_random_state *state) 40 40 { 41 - state->seed = (uint64_t)state->seed * 48271 % ((uint32_t)(1 << 31) - 1); 41 + state->seed = (u64)state->seed * 48271 % ((u32)(1 << 31) - 1); 42 42 return state->seed; 43 43 } 44 44 ··· 83 83 return base << shift; 84 84 } 85 85 86 - int64_t timespec_to_ns(struct timespec ts) 86 + s64 timespec_to_ns(struct timespec ts) 87 87 { 88 - return (int64_t)ts.tv_nsec + 1000000000LL * (int64_t)ts.tv_sec; 88 + return (s64)ts.tv_nsec + 1000000000LL * (s64)ts.tv_sec; 89 89 } 90 90 91 - struct timespec timespec_add_ns(struct timespec ts, int64_t ns) 91 + struct timespec timespec_add_ns(struct timespec ts, s64 ns) 92 92 { 93 93 struct timespec res; 94 94 ··· 101 101 102 102 struct timespec timespec_add(struct timespec ts1, struct timespec ts2) 103 103 { 104 - int64_t ns1 = timespec_to_ns(ts1); 105 - int64_t ns2 = timespec_to_ns(ts2); 104 + s64 ns1 = timespec_to_ns(ts1); 105 + s64 ns2 = timespec_to_ns(ts2); 106 106 return timespec_add_ns((struct timespec){0}, ns1 + ns2); 107 107 } 108 108 109 109 struct timespec timespec_sub(struct timespec ts1, struct timespec ts2) 110 110 { 111 - int64_t ns1 = timespec_to_ns(ts1); 112 - int64_t ns2 = timespec_to_ns(ts2); 111 + s64 ns1 = timespec_to_ns(ts1); 112 + s64 ns2 = timespec_to_ns(ts2); 113 113 return timespec_add_ns((struct timespec){0}, ns1 - ns2); 114 114 } 115 115 ··· 123 123 124 124 struct timespec timespec_div(struct timespec ts, int divisor) 125 125 { 126 - int64_t ns = timespec_to_ns(ts) / divisor; 126 + s64 ns = timespec_to_ns(ts) / divisor; 127 127 128 128 return timespec_add_ns((struct timespec){0}, ns); 129 129 } ··· 225 225 #define ANON_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS) 226 226 #define ANON_HUGE_FLAGS (ANON_FLAGS | MAP_HUGETLB) 227 227 228 - const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i) 228 + const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(u32 i) 229 229 { 230 230 static const struct vm_mem_backing_src_alias aliases[] = { 231 231 [VM_MEM_SRC_ANONYMOUS] = { ··· 317 317 318 318 #define MAP_HUGE_PAGE_SIZE(x) (1ULL << ((x >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK)) 319 319 320 - size_t get_backing_src_pagesz(uint32_t i) 320 + size_t get_backing_src_pagesz(u32 i) 321 321 { 322 - uint32_t flag = vm_mem_backing_src_alias(i)->flag; 322 + u32 flag = vm_mem_backing_src_alias(i)->flag; 323 323 324 324 switch (i) { 325 325 case VM_MEM_SRC_ANONYMOUS: ··· 335 335 } 336 336 } 337 337 338 - bool is_backing_src_hugetlb(uint32_t i) 338 + bool is_backing_src_hugetlb(u32 i) 339 339 { 340 340 return !!(vm_mem_backing_src_alias(i)->flag & MAP_HUGETLB); 341 341 }
+17 -17
tools/testing/selftests/kvm/lib/ucall_common.c
··· 14 14 struct ucall ucalls[KVM_MAX_VCPUS]; 15 15 }; 16 16 17 - int ucall_nr_pages_required(uint64_t page_size) 17 + int ucall_nr_pages_required(u64 page_size) 18 18 { 19 19 return align_up(sizeof(struct ucall_header), page_size) / page_size; 20 20 } ··· 25 25 */ 26 26 static struct ucall_header *ucall_pool; 27 27 28 - void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 28 + void ucall_init(struct kvm_vm *vm, gpa_t mmio_gpa) 29 29 { 30 30 struct ucall_header *hdr; 31 31 struct ucall *uc; 32 - vm_vaddr_t vaddr; 32 + gva_t gva; 33 33 int i; 34 34 35 - vaddr = vm_vaddr_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR, 36 - MEM_REGION_DATA); 37 - hdr = (struct ucall_header *)addr_gva2hva(vm, vaddr); 35 + gva = vm_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR, 36 + MEM_REGION_DATA); 37 + hdr = (struct ucall_header *)addr_gva2hva(vm, gva); 38 38 memset(hdr, 0, sizeof(*hdr)); 39 39 40 40 for (i = 0; i < KVM_MAX_VCPUS; ++i) { ··· 42 42 uc->hva = uc; 43 43 } 44 44 45 - write_guest_global(vm, ucall_pool, (struct ucall_header *)vaddr); 45 + write_guest_global(vm, ucall_pool, (struct ucall_header *)gva); 46 46 47 47 ucall_arch_init(vm, mmio_gpa); 48 48 } ··· 79 79 clear_bit(uc - ucall_pool->ucalls, ucall_pool->in_use); 80 80 } 81 81 82 - void ucall_assert(uint64_t cmd, const char *exp, const char *file, 82 + void ucall_assert(u64 cmd, const char *exp, const char *file, 83 83 unsigned int line, const char *fmt, ...) 84 84 { 85 85 struct ucall *uc; ··· 88 88 uc = ucall_alloc(); 89 89 uc->cmd = cmd; 90 90 91 - WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (uint64_t)(exp)); 92 - WRITE_ONCE(uc->args[GUEST_FILE], (uint64_t)(file)); 91 + WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (u64)(exp)); 92 + WRITE_ONCE(uc->args[GUEST_FILE], (u64)(file)); 93 93 WRITE_ONCE(uc->args[GUEST_LINE], line); 94 94 95 95 va_start(va, fmt); 96 96 guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va); 97 97 va_end(va); 98 98 99 - ucall_arch_do_ucall((vm_vaddr_t)uc->hva); 99 + ucall_arch_do_ucall((gva_t)uc->hva); 100 100 101 101 ucall_free(uc); 102 102 } 103 103 104 - void ucall_fmt(uint64_t cmd, const char *fmt, ...) 104 + void ucall_fmt(u64 cmd, const char *fmt, ...) 105 105 { 106 106 struct ucall *uc; 107 107 va_list va; ··· 113 113 guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va); 114 114 va_end(va); 115 115 116 - ucall_arch_do_ucall((vm_vaddr_t)uc->hva); 116 + ucall_arch_do_ucall((gva_t)uc->hva); 117 117 118 118 ucall_free(uc); 119 119 } 120 120 121 - void ucall(uint64_t cmd, int nargs, ...) 121 + void ucall(u64 cmd, int nargs, ...) 122 122 { 123 123 struct ucall *uc; 124 124 va_list va; ··· 132 132 133 133 va_start(va, nargs); 134 134 for (i = 0; i < nargs; ++i) 135 - WRITE_ONCE(uc->args[i], va_arg(va, uint64_t)); 135 + WRITE_ONCE(uc->args[i], va_arg(va, u64)); 136 136 va_end(va); 137 137 138 - ucall_arch_do_ucall((vm_vaddr_t)uc->hva); 138 + ucall_arch_do_ucall((gva_t)uc->hva); 139 139 140 140 ucall_free(uc); 141 141 } 142 142 143 - uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) 143 + u64 get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) 144 144 { 145 145 struct ucall ucall; 146 146 void *addr;
+7 -7
tools/testing/selftests/kvm/lib/userfaultfd_util.c
··· 27 27 { 28 28 struct uffd_reader_args *reader_args = (struct uffd_reader_args *)arg; 29 29 int uffd = reader_args->uffd; 30 - int64_t pages = 0; 30 + s64 pages = 0; 31 31 struct timespec start; 32 32 struct timespec ts_diff; 33 33 struct epoll_event evt; ··· 100 100 } 101 101 102 102 struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay, 103 - void *hva, uint64_t len, 104 - uint64_t num_readers, 103 + void *hva, u64 len, 104 + u64 num_readers, 105 105 uffd_handler_t handler) 106 106 { 107 107 struct uffd_desc *uffd_desc; ··· 109 109 int uffd; 110 110 struct uffdio_api uffdio_api; 111 111 struct uffdio_register uffdio_register; 112 - uint64_t expected_ioctls = ((uint64_t) 1) << _UFFDIO_COPY; 112 + u64 expected_ioctls = ((u64)1) << _UFFDIO_COPY; 113 113 int ret, i; 114 114 115 115 PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n", ··· 132 132 133 133 /* In order to get minor faults, prefault via the alias. */ 134 134 if (is_minor) 135 - expected_ioctls = ((uint64_t) 1) << _UFFDIO_CONTINUE; 135 + expected_ioctls = ((u64)1) << _UFFDIO_CONTINUE; 136 136 137 137 uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 138 138 TEST_ASSERT(uffd >= 0, "uffd creation failed, errno: %d", errno); ··· 141 141 uffdio_api.features = 0; 142 142 TEST_ASSERT(ioctl(uffd, UFFDIO_API, &uffdio_api) != -1, 143 143 "ioctl UFFDIO_API failed: %" PRIu64, 144 - (uint64_t)uffdio_api.api); 144 + (u64)uffdio_api.api); 145 145 146 - uffdio_register.range.start = (uint64_t)hva; 146 + uffdio_register.range.start = (u64)hva; 147 147 uffdio_register.range.len = len; 148 148 uffdio_register.mode = uffd_mode; 149 149 TEST_ASSERT(ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) != -1,
+1 -1
tools/testing/selftests/kvm/lib/x86/apic.c
··· 14 14 15 15 void xapic_enable(void) 16 16 { 17 - uint64_t val = rdmsr(MSR_IA32_APICBASE); 17 + u64 val = rdmsr(MSR_IA32_APICBASE); 18 18 19 19 /* Per SDM: to enable xAPIC when in x2APIC must first disable APIC */ 20 20 if (val & MSR_IA32_APICBASE_EXTD) {
+7 -7
tools/testing/selftests/kvm/lib/x86/hyperv.c
··· 76 76 } 77 77 78 78 struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm, 79 - vm_vaddr_t *p_hv_pages_gva) 79 + gva_t *p_hv_pages_gva) 80 80 { 81 - vm_vaddr_t hv_pages_gva = vm_vaddr_alloc_page(vm); 81 + gva_t hv_pages_gva = vm_alloc_page(vm); 82 82 struct hyperv_test_pages *hv = addr_gva2hva(vm, hv_pages_gva); 83 83 84 84 /* Setup of a region of guest memory for the VP Assist page. */ 85 - hv->vp_assist = (void *)vm_vaddr_alloc_page(vm); 85 + hv->vp_assist = (void *)vm_alloc_page(vm); 86 86 hv->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->vp_assist); 87 87 hv->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->vp_assist); 88 88 89 89 /* Setup of a region of guest memory for the partition assist page. */ 90 - hv->partition_assist = (void *)vm_vaddr_alloc_page(vm); 90 + hv->partition_assist = (void *)vm_alloc_page(vm); 91 91 hv->partition_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->partition_assist); 92 92 hv->partition_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->partition_assist); 93 93 94 94 /* Setup of a region of guest memory for the enlightened VMCS. */ 95 - hv->enlightened_vmcs = (void *)vm_vaddr_alloc_page(vm); 95 + hv->enlightened_vmcs = (void *)vm_alloc_page(vm); 96 96 hv->enlightened_vmcs_hva = addr_gva2hva(vm, (uintptr_t)hv->enlightened_vmcs); 97 97 hv->enlightened_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)hv->enlightened_vmcs); 98 98 ··· 100 100 return hv; 101 101 } 102 102 103 - int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist) 103 + int enable_vp_assist(u64 vp_assist_pa, void *vp_assist) 104 104 { 105 - uint64_t val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) | 105 + u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) | 106 106 HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; 107 107 108 108 wrmsr(HV_X64_MSR_VP_ASSIST_PAGE, val);
+7 -7
tools/testing/selftests/kvm/lib/x86/memstress.c
··· 16 16 #include "svm_util.h" 17 17 #include "vmx.h" 18 18 19 - void memstress_l2_guest_code(uint64_t vcpu_id) 19 + void memstress_l2_guest_code(u64 vcpu_id) 20 20 { 21 21 memstress_guest_code(vcpu_id); 22 22 vmcall(); ··· 32 32 33 33 #define L2_GUEST_STACK_SIZE 64 34 34 35 - static void l1_vmx_code(struct vmx_pages *vmx, uint64_t vcpu_id) 35 + static void l1_vmx_code(struct vmx_pages *vmx, u64 vcpu_id) 36 36 { 37 37 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 38 38 unsigned long *rsp; ··· 51 51 GUEST_DONE(); 52 52 } 53 53 54 - static void l1_svm_code(struct svm_test_data *svm, uint64_t vcpu_id) 54 + static void l1_svm_code(struct svm_test_data *svm, u64 vcpu_id) 55 55 { 56 56 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 57 57 unsigned long *rsp; ··· 67 67 } 68 68 69 69 70 - static void memstress_l1_guest_code(void *data, uint64_t vcpu_id) 70 + static void memstress_l1_guest_code(void *data, u64 vcpu_id) 71 71 { 72 72 if (this_cpu_has(X86_FEATURE_VMX)) 73 73 l1_vmx_code(data, vcpu_id); ··· 75 75 l1_svm_code(data, vcpu_id); 76 76 } 77 77 78 - uint64_t memstress_nested_pages(int nr_vcpus) 78 + u64 memstress_nested_pages(int nr_vcpus) 79 79 { 80 80 /* 81 81 * 513 page tables is enough to identity-map 256 TiB of L2 with 1G ··· 87 87 88 88 static void memstress_setup_ept_mappings(struct kvm_vm *vm) 89 89 { 90 - uint64_t start, end; 90 + u64 start, end; 91 91 92 92 /* 93 93 * Identity map the first 4G and the test region with 1G pages so that ··· 104 104 void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]) 105 105 { 106 106 struct kvm_regs regs; 107 - vm_vaddr_t nested_gva; 107 + gva_t nested_gva; 108 108 int vcpu_id; 109 109 110 110 TEST_REQUIRE(kvm_cpu_has_tdp());
+4 -4
tools/testing/selftests/kvm/lib/x86/pmu.c
··· 11 11 #include "processor.h" 12 12 #include "pmu.h" 13 13 14 - const uint64_t intel_pmu_arch_events[] = { 14 + const u64 intel_pmu_arch_events[] = { 15 15 INTEL_ARCH_CPU_CYCLES, 16 16 INTEL_ARCH_INSTRUCTIONS_RETIRED, 17 17 INTEL_ARCH_REFERENCE_CYCLES, ··· 28 28 }; 29 29 kvm_static_assert(ARRAY_SIZE(intel_pmu_arch_events) == NR_INTEL_ARCH_EVENTS); 30 30 31 - const uint64_t amd_pmu_zen_events[] = { 31 + const u64 amd_pmu_zen_events[] = { 32 32 AMD_ZEN_CORE_CYCLES, 33 33 AMD_ZEN_INSTRUCTIONS_RETIRED, 34 34 AMD_ZEN_BRANCHES_RETIRED, ··· 50 50 * be overcounted on these certain instructions, but for Clearwater Forest 51 51 * only "Instruction Retired" event is overcounted on these instructions. 52 52 */ 53 - static uint64_t get_pmu_errata(void) 53 + static u64 get_pmu_errata(void) 54 54 { 55 55 if (!this_cpu_is_intel()) 56 56 return 0; ··· 72 72 } 73 73 } 74 74 75 - uint64_t pmu_errata_mask; 75 + u64 pmu_errata_mask; 76 76 77 77 void kvm_init_pmu_errata(void) 78 78 {
+142 -150
tools/testing/selftests/kvm/lib/x86/processor.c
··· 21 21 #define KERNEL_DS 0x10 22 22 #define KERNEL_TSS 0x18 23 23 24 - vm_vaddr_t exception_handlers; 24 + gva_t exception_handlers; 25 25 bool host_cpu_is_amd; 26 26 bool host_cpu_is_intel; 27 27 bool host_cpu_is_hygon; 28 28 bool host_cpu_is_amd_compatible; 29 29 bool is_forced_emulation_enabled; 30 - uint64_t guest_tsc_khz; 30 + u64 guest_tsc_khz; 31 31 32 32 const char *ex_str(int vector) 33 33 { ··· 62 62 } 63 63 } 64 64 65 - static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent) 65 + static void regs_dump(FILE *stream, struct kvm_regs *regs, u8 indent) 66 66 { 67 67 fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx " 68 68 "rcx: 0x%.16llx rdx: 0x%.16llx\n", ··· 86 86 } 87 87 88 88 static void segment_dump(FILE *stream, struct kvm_segment *segment, 89 - uint8_t indent) 89 + u8 indent) 90 90 { 91 91 fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x " 92 92 "selector: 0x%.4x type: 0x%.2x\n", ··· 103 103 } 104 104 105 105 static void dtable_dump(FILE *stream, struct kvm_dtable *dtable, 106 - uint8_t indent) 106 + u8 indent) 107 107 { 108 108 fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x " 109 109 "padding: 0x%.4x 0x%.4x 0x%.4x\n", ··· 111 111 dtable->padding[0], dtable->padding[1], dtable->padding[2]); 112 112 } 113 113 114 - static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent) 114 + static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, u8 indent) 115 115 { 116 116 unsigned int i; 117 117 ··· 207 207 } 208 208 209 209 static void *virt_get_pte(struct kvm_vm *vm, struct kvm_mmu *mmu, 210 - uint64_t *parent_pte, uint64_t vaddr, int level) 210 + u64 *parent_pte, gva_t gva, int level) 211 211 { 212 - uint64_t pt_gpa = PTE_GET_PA(*parent_pte); 213 - uint64_t *page_table = addr_gpa2hva(vm, pt_gpa); 214 - int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu; 212 + u64 pt_gpa = PTE_GET_PA(*parent_pte); 213 + u64 *page_table = addr_gpa2hva(vm, pt_gpa); 214 + int index = (gva >> PG_LEVEL_SHIFT(level)) & 0x1ffu; 215 215 216 216 TEST_ASSERT((*parent_pte == mmu->pgd) || is_present_pte(mmu, parent_pte), 217 217 "Parent PTE (level %d) not PRESENT for gva: 0x%08lx", 218 - level + 1, vaddr); 218 + level + 1, gva); 219 219 220 220 return &page_table[index]; 221 221 } 222 222 223 - static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, 224 - struct kvm_mmu *mmu, 225 - uint64_t *parent_pte, 226 - uint64_t vaddr, 227 - uint64_t paddr, 228 - int current_level, 229 - int target_level) 223 + static u64 *virt_create_upper_pte(struct kvm_vm *vm, 224 + struct kvm_mmu *mmu, 225 + u64 *parent_pte, 226 + gva_t gva, 227 + gpa_t gpa, 228 + int current_level, 229 + int target_level) 230 230 { 231 - uint64_t *pte = virt_get_pte(vm, mmu, parent_pte, vaddr, current_level); 231 + u64 *pte = virt_get_pte(vm, mmu, parent_pte, gva, current_level); 232 232 233 - paddr = vm_untag_gpa(vm, paddr); 233 + gpa = vm_untag_gpa(vm, gpa); 234 234 235 235 if (!is_present_pte(mmu, pte)) { 236 236 *pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) | 237 237 PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) | 238 238 PTE_ALWAYS_SET_MASK(mmu); 239 239 if (current_level == target_level) 240 - *pte |= PTE_HUGE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK); 240 + *pte |= PTE_HUGE_MASK(mmu) | (gpa & PHYSICAL_PAGE_MASK); 241 241 else 242 242 *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK; 243 243 } else { ··· 247 247 * this level. 248 248 */ 249 249 TEST_ASSERT(current_level != target_level, 250 - "Cannot create hugepage at level: %u, vaddr: 0x%lx", 251 - current_level, vaddr); 250 + "Cannot create hugepage at level: %u, gva: 0x%lx", 251 + current_level, gva); 252 252 TEST_ASSERT(!is_huge_pte(mmu, pte), 253 - "Cannot create page table at level: %u, vaddr: 0x%lx", 254 - current_level, vaddr); 253 + "Cannot create page table at level: %u, gva: 0x%lx", 254 + current_level, gva); 255 255 } 256 256 return pte; 257 257 } 258 258 259 - void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr, 260 - uint64_t paddr, int level) 259 + void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva, 260 + gpa_t gpa, int level) 261 261 { 262 - const uint64_t pg_size = PG_LEVEL_SIZE(level); 263 - uint64_t *pte = &mmu->pgd; 262 + const u64 pg_size = PG_LEVEL_SIZE(level); 263 + u64 *pte = &mmu->pgd; 264 264 int current_level; 265 265 266 266 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K, 267 267 "Unknown or unsupported guest mode: 0x%x", vm->mode); 268 268 269 - TEST_ASSERT((vaddr % pg_size) == 0, 269 + TEST_ASSERT((gva % pg_size) == 0, 270 270 "Virtual address not aligned,\n" 271 - "vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size); 272 - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)), 273 - "Invalid virtual address, vaddr: 0x%lx", vaddr); 274 - TEST_ASSERT((paddr % pg_size) == 0, 271 + "gva: 0x%lx page size: 0x%lx", gva, pg_size); 272 + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), 273 + "Invalid virtual address, gva: 0x%lx", gva); 274 + TEST_ASSERT((gpa % pg_size) == 0, 275 275 "Physical address not aligned,\n" 276 - " paddr: 0x%lx page size: 0x%lx", paddr, pg_size); 277 - TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, 276 + " gpa: 0x%lx page size: 0x%lx", gpa, pg_size); 277 + TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, 278 278 "Physical address beyond maximum supported,\n" 279 - " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 280 - paddr, vm->max_gfn, vm->page_size); 281 - TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr, 282 - "Unexpected bits in paddr: %lx", paddr); 279 + " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 280 + gpa, vm->max_gfn, vm->page_size); 281 + TEST_ASSERT(vm_untag_gpa(vm, gpa) == gpa, 282 + "Unexpected bits in gpa: %lx", gpa); 283 283 284 284 TEST_ASSERT(!PTE_EXECUTABLE_MASK(mmu) || !PTE_NX_MASK(mmu), 285 285 "X and NX bit masks cannot be used simultaneously"); ··· 291 291 for (current_level = mmu->pgtable_levels; 292 292 current_level > PG_LEVEL_4K; 293 293 current_level--) { 294 - pte = virt_create_upper_pte(vm, mmu, pte, vaddr, paddr, 294 + pte = virt_create_upper_pte(vm, mmu, pte, gva, gpa, 295 295 current_level, level); 296 296 if (is_huge_pte(mmu, pte)) 297 297 return; 298 298 } 299 299 300 300 /* Fill in page table entry. */ 301 - pte = virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K); 301 + pte = virt_get_pte(vm, mmu, pte, gva, PG_LEVEL_4K); 302 302 TEST_ASSERT(!is_present_pte(mmu, pte), 303 - "PTE already present for 4k page at vaddr: 0x%lx", vaddr); 303 + "PTE already present for 4k page at gva: 0x%lx", gva); 304 304 *pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) | 305 305 PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) | 306 - PTE_ALWAYS_SET_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK); 306 + PTE_ALWAYS_SET_MASK(mmu) | (gpa & PHYSICAL_PAGE_MASK); 307 307 308 308 /* 309 309 * Neither SEV nor TDX supports shared page tables, so only the final 310 310 * leaf PTE needs manually set the C/S-bit. 311 311 */ 312 - if (vm_is_gpa_protected(vm, paddr)) 312 + if (vm_is_gpa_protected(vm, gpa)) 313 313 *pte |= PTE_C_BIT_MASK(mmu); 314 314 else 315 315 *pte |= PTE_S_BIT_MASK(mmu); 316 316 } 317 317 318 - void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) 318 + void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) 319 319 { 320 - __virt_pg_map(vm, &vm->mmu, vaddr, paddr, PG_LEVEL_4K); 320 + __virt_pg_map(vm, &vm->mmu, gva, gpa, PG_LEVEL_4K); 321 321 } 322 322 323 - void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 324 - uint64_t nr_bytes, int level) 323 + void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa, 324 + u64 nr_bytes, int level) 325 325 { 326 - uint64_t pg_size = PG_LEVEL_SIZE(level); 327 - uint64_t nr_pages = nr_bytes / pg_size; 326 + u64 pg_size = PG_LEVEL_SIZE(level); 327 + u64 nr_pages = nr_bytes / pg_size; 328 328 int i; 329 329 330 330 TEST_ASSERT(nr_bytes % pg_size == 0, ··· 332 332 nr_bytes, pg_size); 333 333 334 334 for (i = 0; i < nr_pages; i++) { 335 - __virt_pg_map(vm, &vm->mmu, vaddr, paddr, level); 336 - sparsebit_set_num(vm->vpages_mapped, vaddr >> vm->page_shift, 335 + __virt_pg_map(vm, &vm->mmu, gva, gpa, level); 336 + sparsebit_set_num(vm->vpages_mapped, gva >> vm->page_shift, 337 337 nr_bytes / PAGE_SIZE); 338 338 339 - vaddr += pg_size; 340 - paddr += pg_size; 339 + gva += pg_size; 340 + gpa += pg_size; 341 341 } 342 342 } 343 343 344 - static bool vm_is_target_pte(struct kvm_mmu *mmu, uint64_t *pte, 344 + static bool vm_is_target_pte(struct kvm_mmu *mmu, u64 *pte, 345 345 int *level, int current_level) 346 346 { 347 347 if (is_huge_pte(mmu, pte)) { ··· 354 354 return *level == current_level; 355 355 } 356 356 357 - static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, 358 - struct kvm_mmu *mmu, 359 - uint64_t vaddr, 360 - int *level) 357 + static u64 *__vm_get_page_table_entry(struct kvm_vm *vm, 358 + struct kvm_mmu *mmu, 359 + gva_t gva, 360 + int *level) 361 361 { 362 362 int va_width = 12 + (mmu->pgtable_levels) * 9; 363 - uint64_t *pte = &mmu->pgd; 363 + u64 *pte = &mmu->pgd; 364 364 int current_level; 365 365 366 366 TEST_ASSERT(!vm->arch.is_pt_protected, ··· 371 371 372 372 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K, 373 373 "Unknown or unsupported guest mode: 0x%x", vm->mode); 374 - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, 375 - (vaddr >> vm->page_shift)), 376 - "Invalid virtual address, vaddr: 0x%lx", 377 - vaddr); 374 + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), 375 + "Invalid virtual address, gva: 0x%lx", gva); 378 376 /* 379 - * Check that the vaddr is a sign-extended va_width value. 377 + * Check that the gva is a sign-extended va_width value. 380 378 */ 381 - TEST_ASSERT(vaddr == 382 - (((int64_t)vaddr << (64 - va_width) >> (64 - va_width))), 379 + TEST_ASSERT(gva == (((s64)gva << (64 - va_width) >> (64 - va_width))), 383 380 "Canonical check failed. The virtual address is invalid."); 384 381 385 382 for (current_level = mmu->pgtable_levels; 386 383 current_level > PG_LEVEL_4K; 387 384 current_level--) { 388 - pte = virt_get_pte(vm, mmu, pte, vaddr, current_level); 385 + pte = virt_get_pte(vm, mmu, pte, gva, current_level); 389 386 if (vm_is_target_pte(mmu, pte, level, current_level)) 390 387 return pte; 391 388 } 392 389 393 - return virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K); 390 + return virt_get_pte(vm, mmu, pte, gva, PG_LEVEL_4K); 394 391 } 395 392 396 - uint64_t *tdp_get_pte(struct kvm_vm *vm, uint64_t l2_gpa) 393 + u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa) 397 394 { 398 395 int level = PG_LEVEL_4K; 399 396 400 397 return __vm_get_page_table_entry(vm, &vm->stage2_mmu, l2_gpa, &level); 401 398 } 402 399 403 - uint64_t *vm_get_pte(struct kvm_vm *vm, uint64_t vaddr) 400 + u64 *vm_get_pte(struct kvm_vm *vm, gva_t gva) 404 401 { 405 402 int level = PG_LEVEL_4K; 406 403 407 - return __vm_get_page_table_entry(vm, &vm->mmu, vaddr, &level); 404 + return __vm_get_page_table_entry(vm, &vm->mmu, gva, &level); 408 405 } 409 406 410 - void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 407 + void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent) 411 408 { 412 409 struct kvm_mmu *mmu = &vm->mmu; 413 - uint64_t *pml4e, *pml4e_start; 414 - uint64_t *pdpe, *pdpe_start; 415 - uint64_t *pde, *pde_start; 416 - uint64_t *pte, *pte_start; 410 + u64 *pml4e, *pml4e_start; 411 + u64 *pdpe, *pdpe_start; 412 + u64 *pde, *pde_start; 413 + u64 *pte, *pte_start; 417 414 418 415 if (!mmu->pgd_created) 419 416 return; ··· 420 423 fprintf(stream, "%*s index hvaddr gpaddr " 421 424 "addr w exec dirty\n", 422 425 indent, ""); 423 - pml4e_start = (uint64_t *) addr_gpa2hva(vm, mmu->pgd); 424 - for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) { 426 + pml4e_start = (u64 *)addr_gpa2hva(vm, mmu->pgd); 427 + for (u16 n1 = 0; n1 <= 0x1ffu; n1++) { 425 428 pml4e = &pml4e_start[n1]; 426 429 if (!is_present_pte(mmu, pml4e)) 427 430 continue; ··· 433 436 is_writable_pte(mmu, pml4e), is_nx_pte(mmu, pml4e)); 434 437 435 438 pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK); 436 - for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) { 439 + for (u16 n2 = 0; n2 <= 0x1ffu; n2++) { 437 440 pdpe = &pdpe_start[n2]; 438 441 if (!is_present_pte(mmu, pdpe)) 439 442 continue; ··· 446 449 is_nx_pte(mmu, pdpe)); 447 450 448 451 pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK); 449 - for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) { 452 + for (u16 n3 = 0; n3 <= 0x1ffu; n3++) { 450 453 pde = &pde_start[n3]; 451 454 if (!is_present_pte(mmu, pde)) 452 455 continue; ··· 458 461 is_nx_pte(mmu, pde)); 459 462 460 463 pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK); 461 - for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) { 464 + for (u16 n4 = 0; n4 <= 0x1ffu; n4++) { 462 465 pte = &pte_start[n4]; 463 466 if (!is_present_pte(mmu, pte)) 464 467 continue; ··· 472 475 is_writable_pte(mmu, pte), 473 476 is_nx_pte(mmu, pte), 474 477 is_dirty_pte(mmu, pte), 475 - ((uint64_t) n1 << 27) 476 - | ((uint64_t) n2 << 18) 477 - | ((uint64_t) n3 << 9) 478 - | ((uint64_t) n4)); 478 + ((u64)n1 << 27) 479 + | ((u64)n2 << 18) 480 + | ((u64)n3 << 9) 481 + | ((u64)n4)); 479 482 } 480 483 } 481 484 } ··· 495 498 return kvm_cpu_has_ept() || kvm_cpu_has_npt(); 496 499 } 497 500 498 - void __tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, 499 - uint64_t size, int level) 501 + void __tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size, int level) 500 502 { 501 503 size_t page_size = PG_LEVEL_SIZE(level); 502 504 size_t npages = size / page_size; 503 505 504 - TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow"); 505 - TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); 506 + TEST_ASSERT(l2_gpa + size > l2_gpa, "L2 GPA overflow"); 507 + TEST_ASSERT(gpa + size > gpa, "GPA overflow"); 506 508 507 509 while (npages--) { 508 - __virt_pg_map(vm, &vm->stage2_mmu, nested_paddr, paddr, level); 509 - nested_paddr += page_size; 510 - paddr += page_size; 510 + __virt_pg_map(vm, &vm->stage2_mmu, l2_gpa, gpa, level); 511 + l2_gpa += page_size; 512 + gpa += page_size; 511 513 } 512 514 } 513 515 514 - void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, 515 - uint64_t size) 516 + void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size) 516 517 { 517 - __tdp_map(vm, nested_paddr, paddr, size, PG_LEVEL_4K); 518 + __tdp_map(vm, l2_gpa, gpa, size, PG_LEVEL_4K); 518 519 } 519 520 520 521 /* Prepare an identity extended page table that maps all the ··· 520 525 */ 521 526 void tdp_identity_map_default_memslots(struct kvm_vm *vm) 522 527 { 523 - uint32_t s, memslot = 0; 528 + u32 s, memslot = 0; 524 529 sparsebit_idx_t i, last; 525 530 struct userspace_mem_region *region = memslot2region(vm, memslot); 526 531 ··· 535 540 if (i > last) 536 541 break; 537 542 538 - tdp_map(vm, (uint64_t)i << vm->page_shift, 539 - (uint64_t)i << vm->page_shift, 1 << vm->page_shift); 543 + tdp_map(vm, (u64)i << vm->page_shift, 544 + (u64)i << vm->page_shift, 1 << vm->page_shift); 540 545 } 541 546 } 542 547 543 548 /* Identity map a region with 1GiB Pages. */ 544 - void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size) 549 + void tdp_identity_map_1g(struct kvm_vm *vm, u64 addr, u64 size) 545 550 { 546 551 __tdp_map(vm, addr, addr, size, PG_LEVEL_1G); 547 552 } ··· 613 618 segp->present = true; 614 619 } 615 620 616 - vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) 621 + gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) 617 622 { 618 623 int level = PG_LEVEL_NONE; 619 - uint64_t *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level); 624 + u64 *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level); 620 625 621 626 TEST_ASSERT(is_present_pte(&vm->mmu, pte), 622 627 "Leaf PTE not PRESENT for gva: 0x%08lx", gva); ··· 628 633 return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level)); 629 634 } 630 635 631 - static void kvm_seg_set_tss_64bit(vm_vaddr_t base, struct kvm_segment *segp) 636 + static void kvm_seg_set_tss_64bit(gva_t base, struct kvm_segment *segp) 632 637 { 633 638 memset(segp, 0, sizeof(*segp)); 634 639 segp->base = base; ··· 741 746 struct kvm_segment seg; 742 747 int i; 743 748 744 - vm->arch.gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); 745 - vm->arch.idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); 746 - vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); 747 - vm->arch.tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); 749 + vm->arch.gdt = __vm_alloc_page(vm, MEM_REGION_DATA); 750 + vm->arch.idt = __vm_alloc_page(vm, MEM_REGION_DATA); 751 + vm->handlers = __vm_alloc_page(vm, MEM_REGION_DATA); 752 + vm->arch.tss = __vm_alloc_page(vm, MEM_REGION_DATA); 748 753 749 754 /* Handlers have the same address in both address spaces.*/ 750 755 for (i = 0; i < NUM_INTERRUPTS; i++) 751 756 set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, KERNEL_CS); 752 757 753 - *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; 758 + *(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers; 754 759 755 760 kvm_seg_set_kernel_code_64bit(&seg); 756 761 kvm_seg_fill_gdt_64bit(vm, &seg); ··· 765 770 void vm_install_exception_handler(struct kvm_vm *vm, int vector, 766 771 void (*handler)(struct ex_regs *)) 767 772 { 768 - vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers); 773 + gva_t *handlers = (gva_t *)addr_gva2hva(vm, vm->handlers); 769 774 770 - handlers[vector] = (vm_vaddr_t)handler; 775 + handlers[vector] = (gva_t)handler; 771 776 } 772 777 773 778 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) ··· 816 821 vcpu_regs_set(vcpu, &regs); 817 822 } 818 823 819 - struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) 824 + struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) 820 825 { 821 826 struct kvm_mp_state mp_state; 822 827 struct kvm_regs regs; 823 - vm_vaddr_t stack_vaddr; 828 + gva_t stack_gva; 824 829 struct kvm_vcpu *vcpu; 825 830 826 - stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(), 827 - DEFAULT_GUEST_STACK_VADDR_MIN, 828 - MEM_REGION_DATA); 831 + stack_gva = __vm_alloc(vm, DEFAULT_STACK_PGS * getpagesize(), 832 + DEFAULT_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA); 829 833 830 - stack_vaddr += DEFAULT_STACK_PGS * getpagesize(); 834 + stack_gva += DEFAULT_STACK_PGS * getpagesize(); 831 835 832 836 /* 833 837 * Align stack to match calling sequence requirements in section "The ··· 837 843 * If this code is ever used to launch a vCPU with 32-bit entry point it 838 844 * may need to subtract 4 bytes instead of 8 bytes. 839 845 */ 840 - TEST_ASSERT(IS_ALIGNED(stack_vaddr, PAGE_SIZE), 841 - "__vm_vaddr_alloc() did not provide a page-aligned address"); 842 - stack_vaddr -= 8; 846 + TEST_ASSERT(IS_ALIGNED(stack_gva, PAGE_SIZE), 847 + "__vm_alloc() did not provide a page-aligned address"); 848 + stack_gva -= 8; 843 849 844 850 vcpu = __vm_vcpu_add(vm, vcpu_id); 845 851 vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid()); ··· 849 855 /* Setup guest general purpose registers */ 850 856 vcpu_regs_get(vcpu, &regs); 851 857 regs.rflags = regs.rflags | 0x2; 852 - regs.rsp = stack_vaddr; 858 + regs.rsp = stack_gva; 853 859 vcpu_regs_set(vcpu, &regs); 854 860 855 861 /* Setup the MP state */ ··· 866 872 return vcpu; 867 873 } 868 874 869 - struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id) 875 + struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, u32 vcpu_id) 870 876 { 871 877 struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); 872 878 ··· 901 907 return kvm_supported_cpuid; 902 908 } 903 909 904 - static uint32_t __kvm_cpu_has(const struct kvm_cpuid2 *cpuid, 905 - uint32_t function, uint32_t index, 906 - uint8_t reg, uint8_t lo, uint8_t hi) 910 + static u32 __kvm_cpu_has(const struct kvm_cpuid2 *cpuid, 911 + u32 function, u32 index, 912 + u8 reg, u8 lo, u8 hi) 907 913 { 908 914 const struct kvm_cpuid_entry2 *entry; 909 915 int i; ··· 930 936 feature.reg, feature.bit, feature.bit); 931 937 } 932 938 933 - uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, 934 - struct kvm_x86_cpu_property property) 939 + u32 kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, 940 + struct kvm_x86_cpu_property property) 935 941 { 936 942 return __kvm_cpu_has(cpuid, property.function, property.index, 937 943 property.reg, property.lo_bit, property.hi_bit); 938 944 } 939 945 940 - uint64_t kvm_get_feature_msr(uint64_t msr_index) 946 + u64 kvm_get_feature_msr(u64 msr_index) 941 947 { 942 948 struct { 943 949 struct kvm_msrs header; ··· 956 962 return buffer.entry.data; 957 963 } 958 964 959 - void __vm_xsave_require_permission(uint64_t xfeature, const char *name) 965 + void __vm_xsave_require_permission(u64 xfeature, const char *name) 960 966 { 961 967 int kvm_fd; 962 968 u64 bitmask; ··· 1013 1019 1014 1020 void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu, 1015 1021 struct kvm_x86_cpu_property property, 1016 - uint32_t value) 1022 + u32 value) 1017 1023 { 1018 1024 struct kvm_cpuid_entry2 *entry; 1019 1025 ··· 1028 1034 TEST_ASSERT_EQ(kvm_cpuid_property(vcpu->cpuid, property), value); 1029 1035 } 1030 1036 1031 - void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function) 1037 + void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, u32 function) 1032 1038 { 1033 1039 struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, function); 1034 1040 ··· 1057 1063 vcpu_set_cpuid(vcpu); 1058 1064 } 1059 1065 1060 - uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index) 1066 + u64 vcpu_get_msr(struct kvm_vcpu *vcpu, u64 msr_index) 1061 1067 { 1062 1068 struct { 1063 1069 struct kvm_msrs header; ··· 1072 1078 return buffer.entry.data; 1073 1079 } 1074 1080 1075 - int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value) 1081 + int _vcpu_set_msr(struct kvm_vcpu *vcpu, u64 msr_index, u64 msr_value) 1076 1082 { 1077 1083 struct { 1078 1084 struct kvm_msrs header; ··· 1100 1106 vcpu_regs_get(vcpu, &regs); 1101 1107 1102 1108 if (num >= 1) 1103 - regs.rdi = va_arg(ap, uint64_t); 1109 + regs.rdi = va_arg(ap, u64); 1104 1110 1105 1111 if (num >= 2) 1106 - regs.rsi = va_arg(ap, uint64_t); 1112 + regs.rsi = va_arg(ap, u64); 1107 1113 1108 1114 if (num >= 3) 1109 - regs.rdx = va_arg(ap, uint64_t); 1115 + regs.rdx = va_arg(ap, u64); 1110 1116 1111 1117 if (num >= 4) 1112 - regs.rcx = va_arg(ap, uint64_t); 1118 + regs.rcx = va_arg(ap, u64); 1113 1119 1114 1120 if (num >= 5) 1115 - regs.r8 = va_arg(ap, uint64_t); 1121 + regs.r8 = va_arg(ap, u64); 1116 1122 1117 1123 if (num >= 6) 1118 - regs.r9 = va_arg(ap, uint64_t); 1124 + regs.r9 = va_arg(ap, u64); 1119 1125 1120 1126 vcpu_regs_set(vcpu, &regs); 1121 1127 va_end(ap); 1122 1128 } 1123 1129 1124 - void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) 1130 + void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent) 1125 1131 { 1126 1132 struct kvm_regs regs; 1127 1133 struct kvm_sregs sregs; ··· 1190 1196 return list; 1191 1197 } 1192 1198 1193 - bool kvm_msr_is_in_save_restore_list(uint32_t msr_index) 1199 + bool kvm_msr_is_in_save_restore_list(u32 msr_index) 1194 1200 { 1195 1201 const struct kvm_msr_list *list = kvm_get_msr_index_list(); 1196 1202 int i; ··· 1321 1327 } 1322 1328 1323 1329 const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, 1324 - uint32_t function, uint32_t index) 1330 + u32 function, u32 index) 1325 1331 { 1326 1332 int i; 1327 1333 ··· 1338 1344 1339 1345 #define X86_HYPERCALL(inputs...) \ 1340 1346 ({ \ 1341 - uint64_t r; \ 1347 + u64 r; \ 1342 1348 \ 1343 1349 asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t" \ 1344 1350 "jnz 1f\n\t" \ ··· 1353 1359 r; \ 1354 1360 }) 1355 1361 1356 - uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, 1357 - uint64_t a3) 1362 + u64 kvm_hypercall(u64 nr, u64 a0, u64 a1, u64 a2, u64 a3) 1358 1363 { 1359 1364 return X86_HYPERCALL("a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3)); 1360 1365 } 1361 1366 1362 - uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1) 1367 + u64 __xen_hypercall(u64 nr, u64 a0, void *a1) 1363 1368 { 1364 1369 return X86_HYPERCALL("a"(nr), "D"(a0), "S"(a1)); 1365 1370 } 1366 1371 1367 - void xen_hypercall(uint64_t nr, uint64_t a0, void *a1) 1372 + void xen_hypercall(u64 nr, u64 a0, void *a1) 1368 1373 { 1369 1374 GUEST_ASSERT(!__xen_hypercall(nr, a0, a1)); 1370 1375 } ··· 1372 1379 { 1373 1380 const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */ 1374 1381 unsigned long ht_gfn, max_gfn, max_pfn; 1375 - uint8_t maxphyaddr, guest_maxphyaddr; 1382 + u8 maxphyaddr, guest_maxphyaddr; 1376 1383 1377 1384 /* 1378 1385 * Use "guest MAXPHYADDR" from KVM if it's available. Guest MAXPHYADDR ··· 1446 1453 return true; 1447 1454 } 1448 1455 1449 - void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, 1450 - uint64_t smram_gpa, 1456 + void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, u64 smram_gpa, 1451 1457 const void *smi_handler, size_t handler_size) 1452 1458 { 1453 1459 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, smram_gpa,
+10 -10
tools/testing/selftests/kvm/lib/x86/sev.c
··· 15 15 * expression would cause us to quit the loop. 16 16 */ 17 17 static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region, 18 - uint8_t page_type, bool private) 18 + u8 page_type, bool private) 19 19 { 20 20 const struct sparsebit *protected_phy_pages = region->protected_phy_pages; 21 - const vm_paddr_t gpa_base = region->region.guest_phys_addr; 21 + const gpa_t gpa_base = region->region.guest_phys_addr; 22 22 const sparsebit_idx_t lowest_page_in_region = gpa_base >> vm->page_shift; 23 23 sparsebit_idx_t i, j; 24 24 ··· 29 29 sev_register_encrypted_memory(vm, region); 30 30 31 31 sparsebit_for_each_set_range(protected_phy_pages, i, j) { 32 - const uint64_t size = (j - i + 1) * vm->page_size; 33 - const uint64_t offset = (i - lowest_page_in_region) * vm->page_size; 32 + const u64 size = (j - i + 1) * vm->page_size; 33 + const u64 offset = (i - lowest_page_in_region) * vm->page_size; 34 34 35 35 if (private) 36 36 vm_mem_set_private(vm, gpa_base + offset, size); 37 37 38 38 if (is_sev_snp_vm(vm)) 39 39 snp_launch_update_data(vm, gpa_base + offset, 40 - (uint64_t)addr_gpa2hva(vm, gpa_base + offset), 40 + (u64)addr_gpa2hva(vm, gpa_base + offset), 41 41 size, page_type); 42 42 else 43 43 sev_launch_update_data(vm, gpa_base + offset, size); ··· 79 79 vm_sev_ioctl(vm, KVM_SEV_INIT2, &init); 80 80 } 81 81 82 - void sev_vm_launch(struct kvm_vm *vm, uint32_t policy) 82 + void sev_vm_launch(struct kvm_vm *vm, u32 policy) 83 83 { 84 84 struct kvm_sev_launch_start launch_start = { 85 85 .policy = policy, ··· 103 103 vm->arch.is_pt_protected = true; 104 104 } 105 105 106 - void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement) 106 + void sev_vm_launch_measure(struct kvm_vm *vm, u8 *measurement) 107 107 { 108 108 struct kvm_sev_launch_measure launch_measure; 109 109 struct kvm_sev_guest_status guest_status; ··· 131 131 TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_RUNNING); 132 132 } 133 133 134 - void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy) 134 + void snp_vm_launch_start(struct kvm_vm *vm, u64 policy) 135 135 { 136 136 struct kvm_sev_snp_launch_start launch_start = { 137 137 .policy = policy, ··· 158 158 vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_FINISH, &launch_finish); 159 159 } 160 160 161 - struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code, 161 + struct kvm_vm *vm_sev_create_with_one_vcpu(u32 type, void *guest_code, 162 162 struct kvm_vcpu **cpu) 163 163 { 164 164 struct vm_shape shape = { ··· 174 174 return vm; 175 175 } 176 176 177 - void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement) 177 + void vm_sev_launch(struct kvm_vm *vm, u64 policy, u8 *measurement) 178 178 { 179 179 if (is_sev_snp_vm(vm)) { 180 180 vm_enable_cap(vm, KVM_CAP_EXIT_HYPERCALL, BIT(KVM_HC_MAP_GPA_RANGE));
+8 -8
tools/testing/selftests/kvm/lib/x86/svm.c
··· 28 28 * Pointer to structure with the addresses of the SVM areas. 29 29 */ 30 30 struct svm_test_data * 31 - vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva) 31 + vcpu_alloc_svm(struct kvm_vm *vm, gva_t *p_svm_gva) 32 32 { 33 - vm_vaddr_t svm_gva = vm_vaddr_alloc_page(vm); 33 + gva_t svm_gva = vm_alloc_page(vm); 34 34 struct svm_test_data *svm = addr_gva2hva(vm, svm_gva); 35 35 36 - svm->vmcb = (void *)vm_vaddr_alloc_page(vm); 36 + svm->vmcb = (void *)vm_alloc_page(vm); 37 37 svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb); 38 38 svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb); 39 39 40 - svm->save_area = (void *)vm_vaddr_alloc_page(vm); 40 + svm->save_area = (void *)vm_alloc_page(vm); 41 41 svm->save_area_hva = addr_gva2hva(vm, (uintptr_t)svm->save_area); 42 42 svm->save_area_gpa = addr_gva2gpa(vm, (uintptr_t)svm->save_area); 43 43 44 - svm->msr = (void *)vm_vaddr_alloc_page(vm); 44 + svm->msr = (void *)vm_alloc_page(vm); 45 45 svm->msr_hva = addr_gva2hva(vm, (uintptr_t)svm->msr); 46 46 svm->msr_gpa = addr_gva2gpa(vm, (uintptr_t)svm->msr); 47 47 memset(svm->msr_hva, 0, getpagesize()); ··· 84 84 void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp) 85 85 { 86 86 struct vmcb *vmcb = svm->vmcb; 87 - uint64_t vmcb_gpa = svm->vmcb_gpa; 87 + u64 vmcb_gpa = svm->vmcb_gpa; 88 88 struct vmcb_save_area *save = &vmcb->save; 89 89 struct vmcb_control_area *ctrl = &vmcb->control; 90 90 u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK 91 91 | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK; 92 92 u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK 93 93 | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK; 94 - uint64_t efer; 94 + u64 efer; 95 95 96 96 efer = rdmsr(MSR_EFER); 97 97 wrmsr(MSR_EFER, efer | EFER_SVME); ··· 158 158 * for now. registers involved in LOAD/SAVE_GPR_C are eventually 159 159 * unmodified so they do not need to be in the clobber list. 160 160 */ 161 - void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa) 161 + void run_guest(struct vmcb *vmcb, u64 vmcb_gpa) 162 162 { 163 163 asm volatile ( 164 164 "vmload %[vmcb_gpa]\n\t"
+2 -2
tools/testing/selftests/kvm/lib/x86/ucall.c
··· 6 6 */ 7 7 #include "kvm_util.h" 8 8 9 - #define UCALL_PIO_PORT ((uint16_t)0x1000) 9 + #define UCALL_PIO_PORT ((u16)0x1000) 10 10 11 - void ucall_arch_do_ucall(vm_vaddr_t uc) 11 + void ucall_arch_do_ucall(gva_t uc) 12 12 { 13 13 /* 14 14 * FIXME: Revert this hack (the entire commit that added it) once nVMX
+22 -22
tools/testing/selftests/kvm/lib/x86/vmx.c
··· 27 27 28 28 int vcpu_enable_evmcs(struct kvm_vcpu *vcpu) 29 29 { 30 - uint16_t evmcs_ver; 30 + u16 evmcs_ver; 31 31 32 32 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 33 33 (unsigned long)&evmcs_ver); ··· 79 79 * Pointer to structure with the addresses of the VMX areas. 80 80 */ 81 81 struct vmx_pages * 82 - vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) 82 + vcpu_alloc_vmx(struct kvm_vm *vm, gva_t *p_vmx_gva) 83 83 { 84 - vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm); 84 + gva_t vmx_gva = vm_alloc_page(vm); 85 85 struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); 86 86 87 87 /* Setup of a region of guest memory for the vmxon region. */ 88 - vmx->vmxon = (void *)vm_vaddr_alloc_page(vm); 88 + vmx->vmxon = (void *)vm_alloc_page(vm); 89 89 vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); 90 90 vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); 91 91 92 92 /* Setup of a region of guest memory for a vmcs. */ 93 - vmx->vmcs = (void *)vm_vaddr_alloc_page(vm); 93 + vmx->vmcs = (void *)vm_alloc_page(vm); 94 94 vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs); 95 95 vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs); 96 96 97 97 /* Setup of a region of guest memory for the MSR bitmap. */ 98 - vmx->msr = (void *)vm_vaddr_alloc_page(vm); 98 + vmx->msr = (void *)vm_alloc_page(vm); 99 99 vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr); 100 100 vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr); 101 101 memset(vmx->msr_hva, 0, getpagesize()); 102 102 103 103 /* Setup of a region of guest memory for the shadow VMCS. */ 104 - vmx->shadow_vmcs = (void *)vm_vaddr_alloc_page(vm); 104 + vmx->shadow_vmcs = (void *)vm_alloc_page(vm); 105 105 vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs); 106 106 vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs); 107 107 108 108 /* Setup of a region of guest memory for the VMREAD and VMWRITE bitmaps. */ 109 - vmx->vmread = (void *)vm_vaddr_alloc_page(vm); 109 + vmx->vmread = (void *)vm_alloc_page(vm); 110 110 vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread); 111 111 vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread); 112 112 memset(vmx->vmread_hva, 0, getpagesize()); 113 113 114 - vmx->vmwrite = (void *)vm_vaddr_alloc_page(vm); 114 + vmx->vmwrite = (void *)vm_alloc_page(vm); 115 115 vmx->vmwrite_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmwrite); 116 116 vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite); 117 117 memset(vmx->vmwrite_hva, 0, getpagesize()); ··· 125 125 126 126 bool prepare_for_vmx_operation(struct vmx_pages *vmx) 127 127 { 128 - uint64_t feature_control; 129 - uint64_t required; 128 + u64 feature_control; 129 + u64 required; 130 130 unsigned long cr0; 131 131 unsigned long cr4; 132 132 ··· 160 160 wrmsr(MSR_IA32_FEAT_CTL, feature_control | required); 161 161 162 162 /* Enter VMX root operation. */ 163 - *(uint32_t *)(vmx->vmxon) = vmcs_revision(); 163 + *(u32 *)(vmx->vmxon) = vmcs_revision(); 164 164 if (vmxon(vmx->vmxon_gpa)) 165 165 return false; 166 166 ··· 170 170 bool load_vmcs(struct vmx_pages *vmx) 171 171 { 172 172 /* Load a VMCS. */ 173 - *(uint32_t *)(vmx->vmcs) = vmcs_revision(); 173 + *(u32 *)(vmx->vmcs) = vmcs_revision(); 174 174 if (vmclear(vmx->vmcs_gpa)) 175 175 return false; 176 176 ··· 178 178 return false; 179 179 180 180 /* Setup shadow VMCS, do not load it yet. */ 181 - *(uint32_t *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul; 181 + *(u32 *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul; 182 182 if (vmclear(vmx->shadow_vmcs_gpa)) 183 183 return false; 184 184 185 185 return true; 186 186 } 187 187 188 - static bool ept_vpid_cap_supported(uint64_t mask) 188 + static bool ept_vpid_cap_supported(u64 mask) 189 189 { 190 190 return rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & mask; 191 191 } ··· 200 200 */ 201 201 static inline void init_vmcs_control_fields(struct vmx_pages *vmx) 202 202 { 203 - uint32_t sec_exec_ctl = 0; 203 + u32 sec_exec_ctl = 0; 204 204 205 205 vmwrite(VIRTUAL_PROCESSOR_ID, 0); 206 206 vmwrite(POSTED_INTR_NV, 0); ··· 208 208 vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS)); 209 209 210 210 if (vmx->eptp_gpa) { 211 - uint64_t eptp = vmx->eptp_gpa | EPTP_WB | EPTP_PWL_4; 211 + u64 eptp = vmx->eptp_gpa | EPTP_WB | EPTP_PWL_4; 212 212 213 213 TEST_ASSERT((vmx->eptp_gpa & ~PHYSICAL_PAGE_MASK) == 0, 214 214 "Illegal bits set in vmx->eptp_gpa"); ··· 259 259 */ 260 260 static inline void init_vmcs_host_state(void) 261 261 { 262 - uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS); 262 + u32 exit_controls = vmreadz(VM_EXIT_CONTROLS); 263 263 264 264 vmwrite(HOST_ES_SELECTOR, get_es()); 265 265 vmwrite(HOST_CS_SELECTOR, get_cs()); ··· 358 358 vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE)); 359 359 vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE)); 360 360 vmwrite(GUEST_DR7, 0x400); 361 - vmwrite(GUEST_RSP, (uint64_t)rsp); 362 - vmwrite(GUEST_RIP, (uint64_t)rip); 361 + vmwrite(GUEST_RSP, (u64)rsp); 362 + vmwrite(GUEST_RIP, (u64)rip); 363 363 vmwrite(GUEST_RFLAGS, 2); 364 364 vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0); 365 365 vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP)); ··· 375 375 376 376 bool kvm_cpu_has_ept(void) 377 377 { 378 - uint64_t ctrl; 378 + u64 ctrl; 379 379 380 380 if (!kvm_cpu_has(X86_FEATURE_VMX)) 381 381 return false; ··· 390 390 391 391 void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm) 392 392 { 393 - vmx->apic_access = (void *)vm_vaddr_alloc_page(vm); 393 + vmx->apic_access = (void *)vm_alloc_page(vm); 394 394 vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access); 395 395 vmx->apic_access_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->apic_access); 396 396 }
+14 -14
tools/testing/selftests/kvm/loongarch/arch_timer.c
··· 27 27 static void guest_irq_handler(struct ex_regs *regs) 28 28 { 29 29 unsigned int intid; 30 - uint32_t cpu = guest_get_vcpuid(); 31 - uint64_t xcnt, val, cfg, xcnt_diff_us; 30 + u32 cpu = guest_get_vcpuid(); 31 + u64 xcnt, val, cfg, xcnt_diff_us; 32 32 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; 33 33 34 34 intid = !!(regs->estat & BIT(INT_TI)); ··· 62 62 WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1); 63 63 } 64 64 65 - static void guest_test_period_timer(uint32_t cpu) 65 + static void guest_test_period_timer(u32 cpu) 66 66 { 67 - uint32_t irq_iter, config_iter; 68 - uint64_t us; 67 + u32 irq_iter, config_iter; 68 + u64 us; 69 69 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; 70 70 71 71 shared_data->nr_iter = test_args.nr_iter; ··· 86 86 irq_iter); 87 87 } 88 88 89 - static void guest_test_oneshot_timer(uint32_t cpu) 89 + static void guest_test_oneshot_timer(u32 cpu) 90 90 { 91 - uint32_t irq_iter, config_iter; 92 - uint64_t us; 91 + u32 irq_iter, config_iter; 92 + u64 us; 93 93 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; 94 94 95 95 shared_data->nr_iter = 0; ··· 112 112 } 113 113 } 114 114 115 - static void guest_test_emulate_timer(uint32_t cpu) 115 + static void guest_test_emulate_timer(u32 cpu) 116 116 { 117 - uint32_t config_iter; 118 - uint64_t xcnt_diff_us, us; 117 + u32 config_iter; 118 + u64 xcnt_diff_us, us; 119 119 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; 120 120 121 121 local_irq_disable(); ··· 136 136 local_irq_enable(); 137 137 } 138 138 139 - static void guest_time_count_test(uint32_t cpu) 139 + static void guest_time_count_test(u32 cpu) 140 140 { 141 - uint32_t config_iter; 141 + u32 config_iter; 142 142 unsigned long start, end, prev, us; 143 143 144 144 /* Assuming that test case starts to run in 1 second */ ··· 165 165 166 166 static void guest_code(void) 167 167 { 168 - uint32_t cpu = guest_get_vcpuid(); 168 + u32 cpu = guest_get_vcpuid(); 169 169 170 170 /* must run at first */ 171 171 guest_time_count_test(cpu);
+5 -5
tools/testing/selftests/kvm/loongarch/pmu_test.c
··· 15 15 /* Check PMU support */ 16 16 static bool has_pmu_support(void) 17 17 { 18 - uint32_t cfg6; 18 + u32 cfg6; 19 19 20 20 /* Read CPUCFG6 to check PMU */ 21 21 cfg6 = read_cpucfg(LOONGARCH_CPUCFG6); ··· 34 34 /* Dump PMU capabilities */ 35 35 static void dump_pmu_caps(void) 36 36 { 37 - uint32_t cfg6; 37 + u32 cfg6; 38 38 int nr_counters, counter_bits; 39 39 40 40 cfg6 = read_cpucfg(LOONGARCH_CPUCFG6); ··· 51 51 static void guest_pmu_base_test(void) 52 52 { 53 53 int i; 54 - uint32_t cfg6, pmnum; 55 - uint64_t cnt[4]; 54 + u32 cfg6, pmnum; 55 + u64 cnt[4]; 56 56 57 57 cfg6 = read_cpucfg(LOONGARCH_CPUCFG6); 58 58 pmnum = (cfg6 >> 4) & 0xf; ··· 114 114 115 115 static void guest_pmu_interrupt_test(void) 116 116 { 117 - uint64_t cnt; 117 + u64 cnt; 118 118 119 119 csr_write(PMU_OVERFLOW - 1, LOONGARCH_CSR_PERFCNTR0); 120 120 csr_write(PMU_ENVENT_ENABLED | CSR_PERFCTRL_PMIE | LOONGARCH_PMU_EVENT_CYCLES, LOONGARCH_CSR_PERFCTRL0);
+5 -5
tools/testing/selftests/kvm/memslot_modification_stress_test.c
··· 30 30 31 31 32 32 static int nr_vcpus = 1; 33 - static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; 33 + static u64 guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; 34 34 35 35 static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) 36 36 { ··· 55 55 } 56 56 57 57 static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay, 58 - uint64_t nr_modifications) 58 + u64 nr_modifications) 59 59 { 60 - uint64_t pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size; 61 - uint64_t gpa; 60 + u64 pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size; 61 + gpa_t gpa; 62 62 int i; 63 63 64 64 /* ··· 78 78 79 79 struct test_params { 80 80 useconds_t delay; 81 - uint64_t nr_iterations; 81 + u64 nr_iterations; 82 82 bool partition_vcpu_memory_access; 83 83 bool disable_slot_zap_quirk; 84 84 };
+82 -82
tools/testing/selftests/kvm/memslot_perf_test.c
··· 85 85 struct kvm_vm *vm; 86 86 struct kvm_vcpu *vcpu; 87 87 pthread_t vcpu_thread; 88 - uint32_t nslots; 89 - uint64_t npages; 90 - uint64_t pages_per_slot; 88 + u32 nslots; 89 + u64 npages; 90 + u64 pages_per_slot; 91 91 void **hva_slots; 92 92 bool mmio_ok; 93 - uint64_t mmio_gpa_min; 94 - uint64_t mmio_gpa_max; 93 + u64 mmio_gpa_min; 94 + u64 mmio_gpa_max; 95 95 }; 96 96 97 97 struct sync_area { 98 - uint32_t guest_page_size; 98 + u32 guest_page_size; 99 99 atomic_bool start_flag; 100 100 atomic_bool exit_flag; 101 101 atomic_bool sync_flag; ··· 186 186 "sem_timedwait() failed: %d", errno); 187 187 } 188 188 189 - static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages) 189 + static void *vm_gpa2hva(struct vm_data *data, gpa_t gpa, u64 *rempages) 190 190 { 191 - uint64_t gpage, pgoffs; 192 - uint32_t slot, slotoffs; 191 + gpa_t gpage, pgoffs; 192 + u32 slot, slotoffs; 193 193 void *base; 194 - uint32_t guest_page_size = data->vm->page_size; 194 + u32 guest_page_size = data->vm->page_size; 195 195 196 196 TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate"); 197 197 TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size, ··· 200 200 201 201 gpage = gpa / guest_page_size; 202 202 pgoffs = gpa % guest_page_size; 203 - slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1); 203 + slot = min(gpage / data->pages_per_slot, (u64)data->nslots - 1); 204 204 slotoffs = gpage - (slot * data->pages_per_slot); 205 205 206 206 if (rempages) { 207 - uint64_t slotpages; 207 + u64 slotpages; 208 208 209 209 if (slot == data->nslots - 1) 210 210 slotpages = data->npages - slot * data->pages_per_slot; ··· 217 217 } 218 218 219 219 base = data->hva_slots[slot]; 220 - return (uint8_t *)base + slotoffs * guest_page_size + pgoffs; 220 + return (u8 *)base + slotoffs * guest_page_size + pgoffs; 221 221 } 222 222 223 - static uint64_t vm_slot2gpa(struct vm_data *data, uint32_t slot) 223 + static u64 vm_slot2gpa(struct vm_data *data, u32 slot) 224 224 { 225 - uint32_t guest_page_size = data->vm->page_size; 225 + u32 guest_page_size = data->vm->page_size; 226 226 227 227 TEST_ASSERT(slot < data->nslots, "Too high slot number"); 228 228 ··· 243 243 return data; 244 244 } 245 245 246 - static bool check_slot_pages(uint32_t host_page_size, uint32_t guest_page_size, 247 - uint64_t pages_per_slot, uint64_t rempages) 246 + static bool check_slot_pages(u32 host_page_size, u32 guest_page_size, 247 + u64 pages_per_slot, u64 rempages) 248 248 { 249 249 if (!pages_per_slot) 250 250 return false; ··· 259 259 } 260 260 261 261 262 - static uint64_t get_max_slots(struct vm_data *data, uint32_t host_page_size) 262 + static u64 get_max_slots(struct vm_data *data, u32 host_page_size) 263 263 { 264 - uint32_t guest_page_size = data->vm->page_size; 265 - uint64_t mempages, pages_per_slot, rempages; 266 - uint64_t slots; 264 + u32 guest_page_size = data->vm->page_size; 265 + u64 mempages, pages_per_slot, rempages; 266 + u64 slots; 267 267 268 268 mempages = data->npages; 269 269 slots = data->nslots; ··· 281 281 return 0; 282 282 } 283 283 284 - static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots, 285 - void *guest_code, uint64_t mem_size, 284 + static bool prepare_vm(struct vm_data *data, int nslots, u64 *maxslots, 285 + void *guest_code, u64 mem_size, 286 286 struct timespec *slot_runtime) 287 287 { 288 - uint64_t mempages, rempages; 289 - uint64_t guest_addr; 290 - uint32_t slot, host_page_size, guest_page_size; 288 + u64 mempages, rempages; 289 + u64 guest_addr; 290 + u32 slot, host_page_size, guest_page_size; 291 291 struct timespec tstart; 292 292 struct sync_area *sync; 293 293 ··· 317 317 318 318 clock_gettime(CLOCK_MONOTONIC, &tstart); 319 319 for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) { 320 - uint64_t npages; 320 + u64 npages; 321 321 322 322 npages = data->pages_per_slot; 323 323 if (slot == data->nslots) ··· 331 331 *slot_runtime = timespec_elapsed(tstart); 332 332 333 333 for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) { 334 - uint64_t npages; 335 - uint64_t gpa; 334 + u64 npages; 335 + gpa_t gpa; 336 336 337 337 npages = data->pages_per_slot; 338 338 if (slot == data->nslots) ··· 448 448 static void guest_code_test_memslot_move(void) 449 449 { 450 450 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; 451 - uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); 451 + u32 page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); 452 452 uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr); 453 453 454 454 GUEST_SYNC(0); ··· 460 460 461 461 for (ptr = base; ptr < base + MEM_TEST_MOVE_SIZE; 462 462 ptr += page_size) 463 - *(uint64_t *)ptr = MEM_TEST_VAL_1; 463 + *(u64 *)ptr = MEM_TEST_VAL_1; 464 464 465 465 /* 466 466 * No host sync here since the MMIO exits are so expensive ··· 477 477 static void guest_code_test_memslot_map(void) 478 478 { 479 479 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; 480 - uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); 480 + u32 page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); 481 481 482 482 GUEST_SYNC(0); 483 483 ··· 489 489 for (ptr = MEM_TEST_GPA; 490 490 ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2; 491 491 ptr += page_size) 492 - *(uint64_t *)ptr = MEM_TEST_VAL_1; 492 + *(u64 *)ptr = MEM_TEST_VAL_1; 493 493 494 494 if (!guest_perform_sync()) 495 495 break; ··· 497 497 for (ptr = MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2; 498 498 ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE; 499 499 ptr += page_size) 500 - *(uint64_t *)ptr = MEM_TEST_VAL_2; 500 + *(u64 *)ptr = MEM_TEST_VAL_2; 501 501 502 502 if (!guest_perform_sync()) 503 503 break; ··· 526 526 * 527 527 * Just access a single page to be on the safe side. 528 528 */ 529 - *(uint64_t *)ptr = MEM_TEST_VAL_1; 529 + *(u64 *)ptr = MEM_TEST_VAL_1; 530 530 531 531 if (!guest_perform_sync()) 532 532 break; 533 533 534 534 ptr += MEM_TEST_UNMAP_SIZE / 2; 535 - *(uint64_t *)ptr = MEM_TEST_VAL_2; 535 + *(u64 *)ptr = MEM_TEST_VAL_2; 536 536 537 537 if (!guest_perform_sync()) 538 538 break; ··· 544 544 static void guest_code_test_memslot_rw(void) 545 545 { 546 546 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; 547 - uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); 547 + u32 page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); 548 548 549 549 GUEST_SYNC(0); 550 550 ··· 555 555 556 556 for (ptr = MEM_TEST_GPA; 557 557 ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size) 558 - *(uint64_t *)ptr = MEM_TEST_VAL_1; 558 + *(u64 *)ptr = MEM_TEST_VAL_1; 559 559 560 560 if (!guest_perform_sync()) 561 561 break; 562 562 563 563 for (ptr = MEM_TEST_GPA + page_size / 2; 564 564 ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size) { 565 - uint64_t val = *(uint64_t *)ptr; 565 + u64 val = *(u64 *)ptr; 566 566 567 567 GUEST_ASSERT_EQ(val, MEM_TEST_VAL_2); 568 - *(uint64_t *)ptr = 0; 568 + *(u64 *)ptr = 0; 569 569 } 570 570 571 571 if (!guest_perform_sync()) ··· 577 577 578 578 static bool test_memslot_move_prepare(struct vm_data *data, 579 579 struct sync_area *sync, 580 - uint64_t *maxslots, bool isactive) 580 + u64 *maxslots, bool isactive) 581 581 { 582 - uint32_t guest_page_size = data->vm->page_size; 583 - uint64_t movesrcgpa, movetestgpa; 582 + u32 guest_page_size = data->vm->page_size; 583 + u64 movesrcgpa, movetestgpa; 584 584 585 585 #ifdef __x86_64__ 586 586 if (disable_slot_zap_quirk) ··· 590 590 movesrcgpa = vm_slot2gpa(data, data->nslots - 1); 591 591 592 592 if (isactive) { 593 - uint64_t lastpages; 593 + u64 lastpages; 594 594 595 595 vm_gpa2hva(data, movesrcgpa, &lastpages); 596 596 if (lastpages * guest_page_size < MEM_TEST_MOVE_SIZE / 2) { ··· 613 613 614 614 static bool test_memslot_move_prepare_active(struct vm_data *data, 615 615 struct sync_area *sync, 616 - uint64_t *maxslots) 616 + u64 *maxslots) 617 617 { 618 618 return test_memslot_move_prepare(data, sync, maxslots, true); 619 619 } 620 620 621 621 static bool test_memslot_move_prepare_inactive(struct vm_data *data, 622 622 struct sync_area *sync, 623 - uint64_t *maxslots) 623 + u64 *maxslots) 624 624 { 625 625 return test_memslot_move_prepare(data, sync, maxslots, false); 626 626 } 627 627 628 628 static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync) 629 629 { 630 - uint64_t movesrcgpa; 630 + u64 movesrcgpa; 631 631 632 632 movesrcgpa = vm_slot2gpa(data, data->nslots - 1); 633 633 vm_mem_region_move(data->vm, data->nslots - 1 + 1, ··· 636 636 } 637 637 638 638 static void test_memslot_do_unmap(struct vm_data *data, 639 - uint64_t offsp, uint64_t count) 639 + u64 offsp, u64 count) 640 640 { 641 - uint64_t gpa, ctr; 642 - uint32_t guest_page_size = data->vm->page_size; 641 + gpa_t gpa, ctr; 642 + u32 guest_page_size = data->vm->page_size; 643 643 644 644 for (gpa = MEM_TEST_GPA + offsp * guest_page_size, ctr = 0; ctr < count; ) { 645 - uint64_t npages; 645 + u64 npages; 646 646 void *hva; 647 647 int ret; 648 648 ··· 661 661 } 662 662 663 663 static void test_memslot_map_unmap_check(struct vm_data *data, 664 - uint64_t offsp, uint64_t valexp) 664 + u64 offsp, u64 valexp) 665 665 { 666 - uint64_t gpa; 667 - uint64_t *val; 668 - uint32_t guest_page_size = data->vm->page_size; 666 + gpa_t gpa; 667 + u64 *val; 668 + u32 guest_page_size = data->vm->page_size; 669 669 670 670 if (!map_unmap_verify) 671 671 return; ··· 680 680 681 681 static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync) 682 682 { 683 - uint32_t guest_page_size = data->vm->page_size; 684 - uint64_t guest_pages = MEM_TEST_MAP_SIZE / guest_page_size; 683 + u32 guest_page_size = data->vm->page_size; 684 + u64 guest_pages = MEM_TEST_MAP_SIZE / guest_page_size; 685 685 686 686 /* 687 687 * Unmap the second half of the test area while guest writes to (maps) ··· 718 718 719 719 static void test_memslot_unmap_loop_common(struct vm_data *data, 720 720 struct sync_area *sync, 721 - uint64_t chunk) 721 + u64 chunk) 722 722 { 723 - uint32_t guest_page_size = data->vm->page_size; 724 - uint64_t guest_pages = MEM_TEST_UNMAP_SIZE / guest_page_size; 725 - uint64_t ctr; 723 + u32 guest_page_size = data->vm->page_size; 724 + u64 guest_pages = MEM_TEST_UNMAP_SIZE / guest_page_size; 725 + u64 ctr; 726 726 727 727 /* 728 728 * Wait for the guest to finish mapping page(s) in the first half ··· 746 746 static void test_memslot_unmap_loop(struct vm_data *data, 747 747 struct sync_area *sync) 748 748 { 749 - uint32_t host_page_size = getpagesize(); 750 - uint32_t guest_page_size = data->vm->page_size; 751 - uint64_t guest_chunk_pages = guest_page_size >= host_page_size ? 749 + u32 host_page_size = getpagesize(); 750 + u32 guest_page_size = data->vm->page_size; 751 + u64 guest_chunk_pages = guest_page_size >= host_page_size ? 752 752 1 : host_page_size / guest_page_size; 753 753 754 754 test_memslot_unmap_loop_common(data, sync, guest_chunk_pages); ··· 757 757 static void test_memslot_unmap_loop_chunked(struct vm_data *data, 758 758 struct sync_area *sync) 759 759 { 760 - uint32_t guest_page_size = data->vm->page_size; 761 - uint64_t guest_chunk_pages = MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size; 760 + u32 guest_page_size = data->vm->page_size; 761 + u64 guest_chunk_pages = MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size; 762 762 763 763 test_memslot_unmap_loop_common(data, sync, guest_chunk_pages); 764 764 } 765 765 766 766 static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync) 767 767 { 768 - uint64_t gptr; 769 - uint32_t guest_page_size = data->vm->page_size; 768 + u64 gptr; 769 + u32 guest_page_size = data->vm->page_size; 770 770 771 771 for (gptr = MEM_TEST_GPA + guest_page_size / 2; 772 772 gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += guest_page_size) 773 - *(uint64_t *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2; 773 + *(u64 *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2; 774 774 775 775 host_perform_sync(sync); 776 776 777 777 for (gptr = MEM_TEST_GPA; 778 778 gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += guest_page_size) { 779 - uint64_t *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL); 780 - uint64_t val = *vptr; 779 + u64 *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL); 780 + u64 val = *vptr; 781 781 782 782 TEST_ASSERT(val == MEM_TEST_VAL_1, 783 783 "Guest written values should read back correctly (is %"PRIu64" @ %"PRIx64")", ··· 790 790 791 791 struct test_data { 792 792 const char *name; 793 - uint64_t mem_size; 793 + u64 mem_size; 794 794 void (*guest_code)(void); 795 795 bool (*prepare)(struct vm_data *data, struct sync_area *sync, 796 - uint64_t *maxslots); 796 + u64 *maxslots); 797 797 void (*loop)(struct vm_data *data, struct sync_area *sync); 798 798 }; 799 799 800 - static bool test_execute(int nslots, uint64_t *maxslots, 800 + static bool test_execute(int nslots, u64 *maxslots, 801 801 unsigned int maxtime, 802 802 const struct test_data *tdata, 803 - uint64_t *nloops, 803 + u64 *nloops, 804 804 struct timespec *slot_runtime, 805 805 struct timespec *guest_runtime) 806 806 { 807 - uint64_t mem_size = tdata->mem_size ? : MEM_SIZE; 807 + u64 mem_size = tdata->mem_size ? : MEM_SIZE; 808 808 struct vm_data *data; 809 809 struct sync_area *sync; 810 810 struct timespec tstart; ··· 924 924 925 925 static bool check_memory_sizes(void) 926 926 { 927 - uint32_t host_page_size = getpagesize(); 928 - uint32_t guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size; 927 + u32 host_page_size = getpagesize(); 928 + u32 guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size; 929 929 930 930 if (host_page_size > SZ_64K || guest_page_size > SZ_64K) { 931 931 pr_info("Unsupported page size on host (0x%x) or guest (0x%x)\n", ··· 961 961 static bool parse_args(int argc, char *argv[], 962 962 struct test_args *targs) 963 963 { 964 - uint32_t max_mem_slots; 964 + u32 max_mem_slots; 965 965 int opt; 966 966 967 967 while ((opt = getopt(argc, argv, "hvdqs:f:e:l:r:")) != -1) { ··· 1040 1040 1041 1041 struct test_result { 1042 1042 struct timespec slot_runtime, guest_runtime, iter_runtime; 1043 - int64_t slottimens, runtimens; 1044 - uint64_t nloops; 1043 + s64 slottimens, runtimens; 1044 + u64 nloops; 1045 1045 }; 1046 1046 1047 1047 static bool test_loop(const struct test_data *data, ··· 1049 1049 struct test_result *rbestslottime, 1050 1050 struct test_result *rbestruntime) 1051 1051 { 1052 - uint64_t maxslots; 1052 + u64 maxslots; 1053 1053 struct test_result result = {}; 1054 1054 1055 1055 if (!test_execute(targs->nslots, &maxslots, targs->seconds, data,
+14 -14
tools/testing/selftests/kvm/mmu_stress_test.c
··· 20 20 static bool mprotect_ro_done; 21 21 static bool all_vcpus_hit_ro_fault; 22 22 23 - static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride) 23 + static void guest_code(u64 start_gpa, u64 end_gpa, u64 stride) 24 24 { 25 - uint64_t gpa; 25 + gpa_t gpa; 26 26 int i; 27 27 28 28 for (i = 0; i < 2; i++) { 29 29 for (gpa = start_gpa; gpa < end_gpa; gpa += stride) 30 - vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa); 30 + vcpu_arch_put_guest(*((volatile u64 *)gpa), gpa); 31 31 GUEST_SYNC(i); 32 32 } 33 33 34 34 for (gpa = start_gpa; gpa < end_gpa; gpa += stride) 35 - *((volatile uint64_t *)gpa); 35 + *((volatile u64 *)gpa); 36 36 GUEST_SYNC(2); 37 37 38 38 /* ··· 55 55 #elif defined(__aarch64__) 56 56 asm volatile("str %0, [%0]" :: "r" (gpa) : "memory"); 57 57 #else 58 - vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa); 58 + vcpu_arch_put_guest(*((volatile u64 *)gpa), gpa); 59 59 #endif 60 60 } while (!READ_ONCE(mprotect_ro_done) || !READ_ONCE(all_vcpus_hit_ro_fault)); 61 61 ··· 68 68 #endif 69 69 70 70 for (gpa = start_gpa; gpa < end_gpa; gpa += stride) 71 - vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa); 71 + vcpu_arch_put_guest(*((volatile u64 *)gpa), gpa); 72 72 GUEST_SYNC(4); 73 73 74 74 GUEST_ASSERT(0); ··· 76 76 77 77 struct vcpu_info { 78 78 struct kvm_vcpu *vcpu; 79 - uint64_t start_gpa; 80 - uint64_t end_gpa; 79 + u64 start_gpa; 80 + u64 end_gpa; 81 81 }; 82 82 83 83 static int nr_vcpus; ··· 203 203 } 204 204 205 205 static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus, 206 - uint64_t start_gpa, uint64_t end_gpa) 206 + u64 start_gpa, u64 end_gpa) 207 207 { 208 208 struct vcpu_info *info; 209 - uint64_t gpa, nr_bytes; 209 + gpa_t gpa, nr_bytes; 210 210 pthread_t *threads; 211 211 int i; 212 212 ··· 217 217 TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges"); 218 218 219 219 nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) & 220 - ~((uint64_t)vm->page_size - 1); 220 + ~((u64)vm->page_size - 1); 221 221 TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus); 222 222 223 223 for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) { ··· 278 278 * just below the 4gb boundary. This test could create memory at 279 279 * 1gb-3gb,but it's simpler to skip straight to 4gb. 280 280 */ 281 - const uint64_t start_gpa = SZ_4G; 281 + const u64 start_gpa = SZ_4G; 282 282 const int first_slot = 1; 283 283 284 284 struct timespec time_start, time_run1, time_reset, time_run2, time_ro, time_rw; 285 - uint64_t max_gpa, gpa, slot_size, max_mem, i; 285 + u64 max_gpa, gpa, slot_size, max_mem, i; 286 286 int max_slots, slot, opt, fd; 287 287 bool hugepages = false; 288 288 struct kvm_vcpu **vcpus; ··· 347 347 348 348 /* Pre-fault the memory to avoid taking mmap_sem on guest page faults. */ 349 349 for (i = 0; i < slot_size; i += vm->page_size) 350 - ((uint8_t *)mem)[i] = 0xaa; 350 + ((u8 *)mem)[i] = 0xaa; 351 351 352 352 gpa = 0; 353 353 for (slot = first_slot; slot < max_slots; slot++) {
+6 -6
tools/testing/selftests/kvm/pre_fault_memory_test.c
··· 17 17 #define TEST_NPAGES (TEST_SIZE / PAGE_SIZE) 18 18 #define TEST_SLOT 10 19 19 20 - static void guest_code(uint64_t base_gva) 20 + static void guest_code(u64 base_gva) 21 21 { 22 - volatile uint64_t val __used; 22 + volatile u64 val __used; 23 23 int i; 24 24 25 25 for (i = 0; i < TEST_NPAGES; i++) { 26 - uint64_t *src = (uint64_t *)(base_gva + i * PAGE_SIZE); 26 + u64 *src = (u64 *)(base_gva + i * PAGE_SIZE); 27 27 28 28 val = *src; 29 29 } ··· 33 33 34 34 struct slot_worker_data { 35 35 struct kvm_vm *vm; 36 - u64 gpa; 37 - uint32_t flags; 36 + gpa_t gpa; 37 + u32 flags; 38 38 bool worker_ready; 39 39 bool prefault_ready; 40 40 bool recreate_slot; ··· 161 161 162 162 static void __test_pre_fault_memory(unsigned long vm_type, bool private) 163 163 { 164 - uint64_t gpa, gva, alignment, guest_page_size; 164 + gpa_t gpa, gva, alignment, guest_page_size; 165 165 const struct vm_shape shape = { 166 166 .mode = VM_MODE_DEFAULT, 167 167 .type = vm_type,
+4 -4
tools/testing/selftests/kvm/riscv/arch_timer.c
··· 17 17 18 18 static void guest_irq_handler(struct pt_regs *regs) 19 19 { 20 - uint64_t xcnt, xcnt_diff_us, cmp; 20 + u64 xcnt, xcnt_diff_us, cmp; 21 21 unsigned int intid = regs->cause & ~CAUSE_IRQ_FLAG; 22 - uint32_t cpu = guest_get_vcpuid(); 22 + u32 cpu = guest_get_vcpuid(); 23 23 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; 24 24 25 25 timer_irq_disable(); ··· 40 40 41 41 static void guest_run(struct test_vcpu_shared_data *shared_data) 42 42 { 43 - uint32_t irq_iter, config_iter; 43 + u32 irq_iter, config_iter; 44 44 45 45 shared_data->nr_iter = 0; 46 46 shared_data->guest_stage = 0; ··· 66 66 67 67 static void guest_code(void) 68 68 { 69 - uint32_t cpu = guest_get_vcpuid(); 69 + u32 cpu = guest_get_vcpuid(); 70 70 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; 71 71 72 72 timer_irq_disable();
+3 -3
tools/testing/selftests/kvm/riscv/ebreak_test.c
··· 8 8 #include "kvm_util.h" 9 9 #include "ucall_common.h" 10 10 11 - #define LABEL_ADDRESS(v) ((uint64_t)&(v)) 11 + #define LABEL_ADDRESS(v) ((u64)&(v)) 12 12 13 13 extern unsigned char sw_bp_1, sw_bp_2; 14 - static uint64_t sw_bp_addr; 14 + static u64 sw_bp_addr; 15 15 16 16 static void guest_code(void) 17 17 { ··· 37 37 { 38 38 struct kvm_vm *vm; 39 39 struct kvm_vcpu *vcpu; 40 - uint64_t pc; 40 + u64 pc; 41 41 struct kvm_guest_debug debug = { 42 42 .control = KVM_GUESTDBG_ENABLE, 43 43 };
+2 -2
tools/testing/selftests/kvm/riscv/get-reg-list.c
··· 162 162 } 163 163 164 164 static int override_vector_reg_size(struct kvm_vcpu *vcpu, struct vcpu_reg_sublist *s, 165 - uint64_t feature) 165 + u64 feature) 166 166 { 167 167 unsigned long vlenb_reg = 0; 168 168 int rc; ··· 197 197 { 198 198 unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 }; 199 199 struct vcpu_reg_sublist *s; 200 - uint64_t feature; 200 + u64 feature; 201 201 int rc; 202 202 203 203 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++)
+4 -4
tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
··· 24 24 /* Snapshot shared memory data */ 25 25 #define PMU_SNAPSHOT_GPA_BASE BIT(30) 26 26 static void *snapshot_gva; 27 - static vm_paddr_t snapshot_gpa; 27 + static gpa_t snapshot_gpa; 28 28 29 29 static int vcpu_shared_irq_count; 30 30 static int counter_in_use; ··· 86 86 #undef switchcase_csr_read 87 87 } 88 88 89 - static inline void dummy_func_loop(uint64_t iter) 89 + static inline void dummy_func_loop(u64 iter) 90 90 { 91 91 int i = 0; 92 92 ··· 259 259 __GUEST_ASSERT(0, "SBI implementation version doesn't support PMU Snapshot"); 260 260 } 261 261 262 - static void snapshot_set_shmem(vm_paddr_t gpa, unsigned long flags) 262 + static void snapshot_set_shmem(gpa_t gpa, unsigned long flags) 263 263 { 264 264 unsigned long lo = (unsigned long)gpa; 265 265 #if __riscv_xlen == 32 ··· 610 610 virt_map(vm, PMU_SNAPSHOT_GPA_BASE, PMU_SNAPSHOT_GPA_BASE, 1); 611 611 612 612 snapshot_gva = (void *)(PMU_SNAPSHOT_GPA_BASE); 613 - snapshot_gpa = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)snapshot_gva); 613 + snapshot_gpa = addr_gva2gpa(vcpu->vm, (gva_t)snapshot_gva); 614 614 sync_global_to_guest(vcpu->vm, snapshot_gva); 615 615 sync_global_to_guest(vcpu->vm, snapshot_gpa); 616 616 }
+4 -4
tools/testing/selftests/kvm/s390/debug_test.c
··· 17 17 "j .\n"); 18 18 19 19 static struct kvm_vm *test_step_int_1(struct kvm_vcpu **vcpu, void *guest_code, 20 - size_t new_psw_off, uint64_t *new_psw) 20 + size_t new_psw_off, u64 *new_psw) 21 21 { 22 22 struct kvm_guest_debug debug = {}; 23 23 struct kvm_regs regs; ··· 27 27 vm = vm_create_with_one_vcpu(vcpu, guest_code); 28 28 lowcore = addr_gpa2hva(vm, 0); 29 29 new_psw[0] = (*vcpu)->run->psw_mask; 30 - new_psw[1] = (uint64_t)int_handler; 30 + new_psw[1] = (u64)int_handler; 31 31 memcpy(lowcore + new_psw_off, new_psw, 16); 32 32 vcpu_regs_get(*vcpu, &regs); 33 33 regs.gprs[2] = -1; ··· 42 42 static void test_step_int(void *guest_code, size_t new_psw_off) 43 43 { 44 44 struct kvm_vcpu *vcpu; 45 - uint64_t new_psw[2]; 45 + u64 new_psw[2]; 46 46 struct kvm_vm *vm; 47 47 48 48 vm = test_step_int_1(&vcpu, guest_code, new_psw_off, new_psw); ··· 79 79 .u.pgm.code = PGM_SPECIFICATION, 80 80 }; 81 81 struct kvm_vcpu *vcpu; 82 - uint64_t new_psw[2]; 82 + u64 new_psw[2]; 83 83 struct kvm_vm *vm; 84 84 85 85 vm = test_step_int_1(&vcpu, test_step_pgm_diag_guest_code,
+1 -1
tools/testing/selftests/kvm/s390/irq_routing.c
··· 27 27 struct kvm_irq_routing *routing; 28 28 struct kvm_vcpu *vcpu; 29 29 struct kvm_vm *vm; 30 - vm_paddr_t mem; 30 + gpa_t mem; 31 31 int ret; 32 32 33 33 struct kvm_irq_routing_entry ue = {
+47 -47
tools/testing/selftests/kvm/s390/memop.c
··· 34 34 struct mop_desc { 35 35 uintptr_t gaddr; 36 36 uintptr_t gaddr_v; 37 - uint64_t set_flags; 37 + u64 set_flags; 38 38 unsigned int f_check : 1; 39 39 unsigned int f_inject : 1; 40 40 unsigned int f_key : 1; ··· 42 42 unsigned int _set_flags : 1; 43 43 unsigned int _sida_offset : 1; 44 44 unsigned int _ar : 1; 45 - uint32_t size; 45 + u32 size; 46 46 enum mop_target target; 47 47 enum mop_access_mode mode; 48 48 void *buf; 49 - uint32_t sida_offset; 49 + u32 sida_offset; 50 50 void *old; 51 - uint8_t old_value[16]; 51 + u8 old_value[16]; 52 52 bool *cmpxchg_success; 53 - uint8_t ar; 54 - uint8_t key; 53 + u8 ar; 54 + u8 key; 55 55 }; 56 56 57 - const uint8_t NO_KEY = 0xff; 57 + const u8 NO_KEY = 0xff; 58 58 59 59 static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc *desc) 60 60 { ··· 85 85 ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE; 86 86 if (desc->mode == CMPXCHG) { 87 87 ksmo.op = KVM_S390_MEMOP_ABSOLUTE_CMPXCHG; 88 - ksmo.old_addr = (uint64_t)desc->old; 88 + ksmo.old_addr = (u64)desc->old; 89 89 memcpy(desc->old_value, desc->old, desc->size); 90 90 } 91 91 break; ··· 230 230 #define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38)) 231 231 #define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39)) 232 232 233 - static uint8_t __aligned(PAGE_SIZE) mem1[65536]; 234 - static uint8_t __aligned(PAGE_SIZE) mem2[65536]; 233 + static u8 __aligned(PAGE_SIZE) mem1[65536]; 234 + static u8 __aligned(PAGE_SIZE) mem2[65536]; 235 235 236 236 struct test_default { 237 237 struct kvm_vm *kvm_vm; ··· 296 296 TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!") 297 297 298 298 static void default_write_read(struct test_info copy_cpu, struct test_info mop_cpu, 299 - enum mop_target mop_target, uint32_t size, uint8_t key) 299 + enum mop_target mop_target, u32 size, u8 key) 300 300 { 301 301 prepare_mem12(); 302 302 CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, ··· 308 308 } 309 309 310 310 static void default_read(struct test_info copy_cpu, struct test_info mop_cpu, 311 - enum mop_target mop_target, uint32_t size, uint8_t key) 311 + enum mop_target mop_target, u32 size, u8 key) 312 312 { 313 313 prepare_mem12(); 314 314 CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, GADDR_V(mem1)); ··· 318 318 ASSERT_MEM_EQ(mem1, mem2, size); 319 319 } 320 320 321 - static void default_cmpxchg(struct test_default *test, uint8_t key) 321 + static void default_cmpxchg(struct test_default *test, u8 key) 322 322 { 323 323 for (int size = 1; size <= 16; size *= 2) { 324 324 for (int offset = 0; offset < 16; offset += size) { 325 - uint8_t __aligned(16) new[16] = {}; 326 - uint8_t __aligned(16) old[16]; 325 + u8 __aligned(16) new[16] = {}; 326 + u8 __aligned(16) old[16]; 327 327 bool succ; 328 328 329 329 prepare_mem12(); ··· 400 400 kvm_vm_free(t.kvm_vm); 401 401 } 402 402 403 - static void set_storage_key_range(void *addr, size_t len, uint8_t key) 403 + static void set_storage_key_range(void *addr, size_t len, u8 key) 404 404 { 405 405 uintptr_t _addr, abs, i; 406 406 int not_mapped = 0; ··· 483 483 { 484 484 switch (size) { 485 485 case 1: 486 - return (uint8_t)val; 486 + return (u8)val; 487 487 case 2: 488 - return (uint16_t)val; 488 + return (u16)val; 489 489 case 4: 490 - return (uint32_t)val; 490 + return (u32)val; 491 491 case 8: 492 - return (uint64_t)val; 492 + return (u64)val; 493 493 case 16: 494 494 return val; 495 495 } ··· 501 501 { 502 502 unsigned int count_a, count_b; 503 503 504 - count_a = __builtin_popcountl((uint64_t)(a >> 64)) + 505 - __builtin_popcountl((uint64_t)a); 506 - count_b = __builtin_popcountl((uint64_t)(b >> 64)) + 507 - __builtin_popcountl((uint64_t)b); 504 + count_a = __builtin_popcountl((u64)(a >> 64)) + 505 + __builtin_popcountl((u64)a); 506 + count_b = __builtin_popcountl((u64)(b >> 64)) + 507 + __builtin_popcountl((u64)b); 508 508 return count_a == count_b; 509 509 } 510 510 ··· 553 553 if (swap) { 554 554 int i, j; 555 555 __uint128_t new; 556 - uint8_t byte0, byte1; 556 + u8 byte0, byte1; 557 557 558 558 rand = rand * 3 + 1; 559 559 i = rand % size; ··· 585 585 586 586 switch (size) { 587 587 case 4: { 588 - uint32_t old = *old_addr; 588 + u32 old = *old_addr; 589 589 590 590 asm volatile ("cs %[old],%[new],%[address]" 591 591 : [old] "+d" (old), 592 - [address] "+Q" (*(uint32_t *)(target)) 593 - : [new] "d" ((uint32_t)new) 592 + [address] "+Q" (*(u32 *)(target)) 593 + : [new] "d" ((u32)new) 594 594 : "cc" 595 595 ); 596 - ret = old == (uint32_t)*old_addr; 596 + ret = old == (u32)*old_addr; 597 597 *old_addr = old; 598 598 return ret; 599 599 } 600 600 case 8: { 601 - uint64_t old = *old_addr; 601 + u64 old = *old_addr; 602 602 603 603 asm volatile ("csg %[old],%[new],%[address]" 604 604 : [old] "+d" (old), 605 - [address] "+Q" (*(uint64_t *)(target)) 606 - : [new] "d" ((uint64_t)new) 605 + [address] "+Q" (*(u64 *)(target)) 606 + : [new] "d" ((u64)new) 607 607 : "cc" 608 608 ); 609 - ret = old == (uint64_t)*old_addr; 609 + ret = old == (u64)*old_addr; 610 610 *old_addr = old; 611 611 return ret; 612 612 } ··· 811 811 static void test_termination(void) 812 812 { 813 813 struct test_default t = test_default_init(guest_error_key); 814 - uint64_t prefix; 815 - uint64_t teid; 816 - uint64_t teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61); 817 - uint64_t psw[2]; 814 + u64 prefix; 815 + u64 teid; 816 + u64 teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61); 817 + u64 psw[2]; 818 818 819 819 HOST_SYNC(t.vcpu, STAGE_INITED); 820 820 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); ··· 855 855 kvm_vm_free(t.kvm_vm); 856 856 } 857 857 858 - const uint64_t last_page_addr = -PAGE_SIZE; 858 + const u64 last_page_addr = -PAGE_SIZE; 859 859 860 860 static void guest_copy_key_fetch_prot_override(void) 861 861 { ··· 878 878 static void test_copy_key_fetch_prot_override(void) 879 879 { 880 880 struct test_default t = test_default_init(guest_copy_key_fetch_prot_override); 881 - vm_vaddr_t guest_0_page, guest_last_page; 881 + gva_t guest_0_page, guest_last_page; 882 882 883 - guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0); 884 - guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); 883 + guest_0_page = vm_alloc(t.kvm_vm, PAGE_SIZE, 0); 884 + guest_last_page = vm_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); 885 885 if (guest_0_page != 0 || guest_last_page != last_page_addr) { 886 886 print_skip("did not allocate guest pages at required positions"); 887 887 goto out; ··· 917 917 static void test_errors_key_fetch_prot_override_not_enabled(void) 918 918 { 919 919 struct test_default t = test_default_init(guest_copy_key_fetch_prot_override); 920 - vm_vaddr_t guest_0_page, guest_last_page; 920 + gva_t guest_0_page, guest_last_page; 921 921 922 - guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0); 923 - guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); 922 + guest_0_page = vm_alloc(t.kvm_vm, PAGE_SIZE, 0); 923 + guest_last_page = vm_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); 924 924 if (guest_0_page != 0 || guest_last_page != last_page_addr) { 925 925 print_skip("did not allocate guest pages at required positions"); 926 926 goto out; ··· 938 938 static void test_errors_key_fetch_prot_override_enabled(void) 939 939 { 940 940 struct test_default t = test_default_init(guest_copy_key_fetch_prot_override); 941 - vm_vaddr_t guest_0_page, guest_last_page; 941 + gva_t guest_0_page, guest_last_page; 942 942 943 - guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0); 944 - guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); 943 + guest_0_page = vm_alloc(t.kvm_vm, PAGE_SIZE, 0); 944 + guest_last_page = vm_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); 945 945 if (guest_0_page != 0 || guest_last_page != last_page_addr) { 946 946 print_skip("did not allocate guest pages at required positions"); 947 947 goto out;
+3 -3
tools/testing/selftests/kvm/s390/resets.c
··· 20 20 21 21 struct kvm_s390_irq buf[ARBITRARY_NON_ZERO_VCPU_ID + LOCAL_IRQS]; 22 22 23 - static uint8_t regs_null[512]; 23 + static u8 regs_null[512]; 24 24 25 25 static void guest_code_initial(void) 26 26 { ··· 57 57 ); 58 58 } 59 59 60 - static void test_one_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t value) 60 + static void test_one_reg(struct kvm_vcpu *vcpu, u64 id, u64 value) 61 61 { 62 - uint64_t eval_reg; 62 + u64 eval_reg; 63 63 64 64 eval_reg = vcpu_get_reg(vcpu, id); 65 65 TEST_ASSERT(eval_reg == value, "value == 0x%lx", value);
+1 -1
tools/testing/selftests/kvm/s390/shared_zeropage_test.c
··· 13 13 #include "kselftest.h" 14 14 #include "ucall_common.h" 15 15 16 - static void set_storage_key(void *addr, uint8_t skey) 16 + static void set_storage_key(void *addr, u8 skey) 17 17 { 18 18 asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); 19 19 }
+12 -12
tools/testing/selftests/kvm/s390/tprot.c
··· 14 14 #define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38)) 15 15 #define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39)) 16 16 17 - static __aligned(PAGE_SIZE) uint8_t pages[2][PAGE_SIZE]; 18 - static uint8_t *const page_store_prot = pages[0]; 19 - static uint8_t *const page_fetch_prot = pages[1]; 17 + static __aligned(PAGE_SIZE) u8 pages[2][PAGE_SIZE]; 18 + static u8 *const page_store_prot = pages[0]; 19 + static u8 *const page_fetch_prot = pages[1]; 20 20 21 21 /* Nonzero return value indicates that address not mapped */ 22 - static int set_storage_key(void *addr, uint8_t key) 22 + static int set_storage_key(void *addr, u8 key) 23 23 { 24 24 int not_mapped = 0; 25 25 ··· 44 44 TRANSL_UNAVAIL = 3, 45 45 }; 46 46 47 - static enum permission test_protection(void *addr, uint8_t key) 47 + static enum permission test_protection(void *addr, u8 key) 48 48 { 49 - uint64_t mask; 49 + u64 mask; 50 50 51 51 asm volatile ( 52 52 "tprot %[addr], 0(%[key])\n" ··· 72 72 struct test { 73 73 enum stage stage; 74 74 void *addr; 75 - uint8_t key; 75 + u8 key; 76 76 enum permission expected; 77 77 } tests[] = { 78 78 /* ··· 146 146 /* 147 147 * Some fetch protection override tests require that page 0 148 148 * be mapped, however, when the hosts tries to map that page via 149 - * vm_vaddr_alloc, it may happen that some other page gets mapped 149 + * vm_alloc, it may happen that some other page gets mapped 150 150 * instead. 151 151 * In order to skip these tests we detect this inside the guest 152 152 */ ··· 207 207 struct kvm_vcpu *vcpu; 208 208 struct kvm_vm *vm; 209 209 struct kvm_run *run; 210 - vm_vaddr_t guest_0_page; 210 + gva_t guest_0_page; 211 211 212 212 ksft_print_header(); 213 213 ksft_set_plan(STAGE_END); ··· 216 216 run = vcpu->run; 217 217 218 218 HOST_SYNC(vcpu, STAGE_INIT_SIMPLE); 219 - mprotect(addr_gva2hva(vm, (vm_vaddr_t)pages), PAGE_SIZE * 2, PROT_READ); 219 + mprotect(addr_gva2hva(vm, (gva_t)pages), PAGE_SIZE * 2, PROT_READ); 220 220 HOST_SYNC(vcpu, TEST_SIMPLE); 221 221 222 - guest_0_page = vm_vaddr_alloc(vm, PAGE_SIZE, 0); 222 + guest_0_page = vm_alloc(vm, PAGE_SIZE, 0); 223 223 if (guest_0_page != 0) { 224 224 /* Use NO_TAP so we don't get a PASS print */ 225 225 HOST_SYNC_NO_TAP(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE); ··· 229 229 HOST_SYNC(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE); 230 230 } 231 231 if (guest_0_page == 0) 232 - mprotect(addr_gva2hva(vm, (vm_vaddr_t)0), PAGE_SIZE, PROT_READ); 232 + mprotect(addr_gva2hva(vm, (gva_t)0), PAGE_SIZE, PROT_READ); 233 233 run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE; 234 234 run->kvm_dirty_regs = KVM_SYNC_CRS; 235 235 HOST_SYNC(vcpu, TEST_FETCH_PROT_OVERRIDE);
+4 -4
tools/testing/selftests/kvm/s390/ucontrol_test.c
··· 111 111 uintptr_t base_hva; 112 112 uintptr_t code_hva; 113 113 int kvm_run_size; 114 - vm_paddr_t pgd; 114 + gpa_t pgd; 115 115 void *vm_mem; 116 116 int vcpu_fd; 117 117 int kvm_fd; ··· 269 269 } 270 270 271 271 /* calculate host virtual addr from guest physical addr */ 272 - static void *gpa2hva(FIXTURE_DATA(uc_kvm) *self, u64 gpa) 272 + static void *gpa2hva(FIXTURE_DATA(uc_kvm) *self, gpa_t gpa) 273 273 { 274 274 return (void *)(self->base_hva - self->base_gpa + gpa); 275 275 } ··· 571 571 { 572 572 struct kvm_s390_sie_block *sie_block = self->sie_block; 573 573 struct kvm_sync_regs *sync_regs = &self->run->s.regs; 574 - u64 test_vaddr = VM_MEM_SIZE - (SZ_1M / 2); 574 + u64 test_gva = VM_MEM_SIZE - (SZ_1M / 2); 575 575 struct kvm_run *run = self->run; 576 576 const u8 skeyvalue = 0x34; 577 577 ··· 583 583 /* set register content for test_skey_asm to access not mapped memory */ 584 584 sync_regs->gprs[1] = skeyvalue; 585 585 sync_regs->gprs[5] = self->base_gpa; 586 - sync_regs->gprs[6] = test_vaddr; 586 + sync_regs->gprs[6] = test_gva; 587 587 run->kvm_dirty_regs |= KVM_SYNC_GPRS; 588 588 589 589 /* DAT disabled + 64 bit mode */
+20 -20
tools/testing/selftests/kvm/set_memory_region_test.c
··· 30 30 #define MEM_REGION_GPA 0xc0000000 31 31 #define MEM_REGION_SLOT 10 32 32 33 - static const uint64_t MMIO_VAL = 0xbeefull; 33 + static const u64 MMIO_VAL = 0xbeefull; 34 34 35 - extern const uint64_t final_rip_start; 36 - extern const uint64_t final_rip_end; 35 + extern const u64 final_rip_start; 36 + extern const u64 final_rip_end; 37 37 38 38 static sem_t vcpu_ready; 39 39 40 - static inline uint64_t guest_spin_on_val(uint64_t spin_val) 40 + static inline u64 guest_spin_on_val(u64 spin_val) 41 41 { 42 - uint64_t val; 42 + u64 val; 43 43 44 44 do { 45 - val = READ_ONCE(*((uint64_t *)MEM_REGION_GPA)); 45 + val = READ_ONCE(*((u64 *)MEM_REGION_GPA)); 46 46 } while (val == spin_val); 47 47 48 48 GUEST_SYNC(0); ··· 54 54 struct kvm_vcpu *vcpu = data; 55 55 struct kvm_run *run = vcpu->run; 56 56 struct ucall uc; 57 - uint64_t cmd; 57 + u64 cmd; 58 58 59 59 /* 60 60 * Loop until the guest is done. Re-enter the guest on all MMIO exits, ··· 111 111 void *guest_code) 112 112 { 113 113 struct kvm_vm *vm; 114 - uint64_t *hva; 115 - uint64_t gpa; 114 + u64 *hva; 115 + gpa_t gpa; 116 116 117 117 vm = vm_create_with_one_vcpu(vcpu, guest_code); 118 118 ··· 144 144 145 145 static void guest_code_move_memory_region(void) 146 146 { 147 - uint64_t val; 147 + u64 val; 148 148 149 149 GUEST_SYNC(0); 150 150 ··· 180 180 pthread_t vcpu_thread; 181 181 struct kvm_vcpu *vcpu; 182 182 struct kvm_vm *vm; 183 - uint64_t *hva; 183 + u64 *hva; 184 184 185 185 vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_move_memory_region); 186 186 ··· 224 224 static void guest_code_delete_memory_region(void) 225 225 { 226 226 struct desc_ptr idt; 227 - uint64_t val; 227 + u64 val; 228 228 229 229 /* 230 230 * Clobber the IDT so that a #PF due to the memory region being deleted ··· 345 345 346 346 static void test_invalid_memory_region_flags(void) 347 347 { 348 - uint32_t supported_flags = KVM_MEM_LOG_DIRTY_PAGES; 349 - const uint32_t v2_only_flags = KVM_MEM_GUEST_MEMFD; 348 + u32 supported_flags = KVM_MEM_LOG_DIRTY_PAGES; 349 + const u32 v2_only_flags = KVM_MEM_GUEST_MEMFD; 350 350 struct kvm_vm *vm; 351 351 int r, i; 352 352 ··· 410 410 { 411 411 int ret; 412 412 struct kvm_vm *vm; 413 - uint32_t max_mem_slots; 414 - uint32_t slot; 413 + u32 max_mem_slots; 414 + u32 slot; 415 415 void *mem, *mem_aligned, *mem_extra; 416 416 size_t alignment = 1; 417 417 ··· 434 434 435 435 for (slot = 0; slot < max_mem_slots; slot++) 436 436 vm_set_user_memory_region(vm, slot, 0, 437 - ((uint64_t)slot * MEM_REGION_SIZE), 437 + ((u64)slot * MEM_REGION_SIZE), 438 438 MEM_REGION_SIZE, 439 - mem_aligned + (uint64_t)slot * MEM_REGION_SIZE); 439 + mem_aligned + (u64)slot * MEM_REGION_SIZE); 440 440 441 441 /* Check it cannot be added memory slots beyond the limit */ 442 442 mem_extra = kvm_mmap(MEM_REGION_SIZE, PROT_READ | PROT_WRITE, 443 443 MAP_PRIVATE | MAP_ANONYMOUS, -1); 444 444 445 445 ret = __vm_set_user_memory_region(vm, max_mem_slots, 0, 446 - (uint64_t)max_mem_slots * MEM_REGION_SIZE, 446 + (u64)max_mem_slots * MEM_REGION_SIZE, 447 447 MEM_REGION_SIZE, mem_extra); 448 448 TEST_ASSERT(ret == -1 && errno == EINVAL, 449 449 "Adding one more memory slot should fail with EINVAL"); ··· 556 556 set_idt(&idt_desc); 557 557 558 558 /* Generate a #GP by dereferencing a non-canonical address */ 559 - *((uint8_t *)NONCANONICAL) = 0x1; 559 + *((u8 *)NONCANONICAL) = 0x1; 560 560 561 561 GUEST_ASSERT(0); 562 562 }
+42 -37
tools/testing/selftests/kvm/steal_time.c
··· 25 25 #define ST_GPA_BASE (1 << 30) 26 26 27 27 static void *st_gva[NR_VCPUS]; 28 - static uint64_t guest_stolen_time[NR_VCPUS]; 28 + static u64 guest_stolen_time[NR_VCPUS]; 29 29 30 30 #if defined(__x86_64__) 31 31 ··· 42 42 static void guest_code(int cpu) 43 43 { 44 44 struct kvm_steal_time *st = st_gva[cpu]; 45 - uint32_t version; 45 + u32 version; 46 46 47 - GUEST_ASSERT_EQ(rdmsr(MSR_KVM_STEAL_TIME), ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED)); 47 + GUEST_ASSERT_EQ(rdmsr(MSR_KVM_STEAL_TIME), ((u64)st_gva[cpu] | KVM_MSR_ENABLED)); 48 48 49 49 memset(st, 0, sizeof(*st)); 50 50 GUEST_SYNC(0); ··· 67 67 return kvm_cpu_has(X86_FEATURE_KVM_STEAL_TIME); 68 68 } 69 69 70 - static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) 70 + static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) 71 71 { 72 72 /* ST_GPA_BASE is identity mapped */ 73 73 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); ··· 76 76 vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED); 77 77 } 78 78 79 - static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) 79 + static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) 80 80 { 81 81 struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); 82 82 ··· 118 118 #define PV_TIME_ST 0xc5000021 119 119 120 120 struct st_time { 121 - uint32_t rev; 122 - uint32_t attr; 123 - uint64_t st_time; 121 + u32 rev; 122 + u32 attr; 123 + u64 st_time; 124 124 }; 125 125 126 - static int64_t smccc(uint32_t func, uint64_t arg) 126 + static s64 smccc(u32 func, u64 arg) 127 127 { 128 128 struct arm_smccc_res res; 129 129 ··· 140 140 static void guest_code(int cpu) 141 141 { 142 142 struct st_time *st; 143 - int64_t status; 143 + s64 status; 144 144 145 145 status = smccc(SMCCC_ARCH_FEATURES, PV_TIME_FEATURES); 146 146 GUEST_ASSERT_EQ(status, 0); ··· 175 175 return !__vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev); 176 176 } 177 177 178 - static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) 178 + static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) 179 179 { 180 180 struct kvm_vm *vm = vcpu->vm; 181 - uint64_t st_ipa; 181 + u64 st_ipa; 182 182 183 183 struct kvm_device_attr dev = { 184 184 .group = KVM_ARM_VCPU_PVTIME_CTRL, 185 185 .attr = KVM_ARM_VCPU_PVTIME_IPA, 186 - .addr = (uint64_t)&st_ipa, 186 + .addr = (u64)&st_ipa, 187 187 }; 188 188 189 189 /* ST_GPA_BASE is identity mapped */ ··· 194 194 vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev); 195 195 } 196 196 197 - static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) 197 + static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) 198 198 { 199 199 struct st_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); 200 200 ··· 208 208 { 209 209 struct kvm_vm *vm; 210 210 struct kvm_vcpu *vcpu; 211 - uint64_t st_ipa; 211 + u64 st_ipa; 212 212 int ret; 213 213 214 214 vm = vm_create_with_one_vcpu(&vcpu, NULL); ··· 216 216 struct kvm_device_attr dev = { 217 217 .group = KVM_ARM_VCPU_PVTIME_CTRL, 218 218 .attr = KVM_ARM_VCPU_PVTIME_IPA, 219 - .addr = (uint64_t)&st_ipa, 219 + .addr = (u64)&st_ipa, 220 220 }; 221 221 222 222 vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev); ··· 239 239 /* SBI STA shmem must have 64-byte alignment */ 240 240 #define STEAL_TIME_SIZE ((sizeof(struct sta_struct) + 63) & ~63) 241 241 242 - static vm_paddr_t st_gpa[NR_VCPUS]; 242 + static gpa_t st_gpa[NR_VCPUS]; 243 243 244 244 struct sta_struct { 245 - uint32_t sequence; 246 - uint32_t flags; 247 - uint64_t steal; 248 - uint8_t preempted; 249 - uint8_t pad[47]; 245 + u32 sequence; 246 + u32 flags; 247 + u64 steal; 248 + u8 preempted; 249 + u8 pad[47]; 250 250 } __packed; 251 251 252 - static void sta_set_shmem(vm_paddr_t gpa, unsigned long flags) 252 + static void sta_set_shmem(gpa_t gpa, unsigned long flags) 253 253 { 254 254 unsigned long lo = (unsigned long)gpa; 255 255 #if __riscv_xlen == 32 ··· 272 272 static void guest_code(int cpu) 273 273 { 274 274 struct sta_struct *st = st_gva[cpu]; 275 - uint32_t sequence; 275 + u32 sequence; 276 276 long out_val = 0; 277 277 bool probe; 278 278 ··· 297 297 298 298 static bool is_steal_time_supported(struct kvm_vcpu *vcpu) 299 299 { 300 - uint64_t id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA); 300 + u64 id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA); 301 301 unsigned long enabled = vcpu_get_reg(vcpu, id); 302 302 303 303 TEST_ASSERT(enabled == 0 || enabled == 1, "Expected boolean result"); ··· 305 305 return enabled; 306 306 } 307 307 308 - static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) 308 + static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) 309 309 { 310 310 /* ST_GPA_BASE is identity mapped */ 311 311 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); 312 - st_gpa[i] = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)st_gva[i]); 312 + st_gpa[i] = addr_gva2gpa(vcpu->vm, (gva_t)st_gva[i]); 313 313 sync_global_to_guest(vcpu->vm, st_gva[i]); 314 314 sync_global_to_guest(vcpu->vm, st_gpa[i]); 315 315 } 316 316 317 - static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) 317 + static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) 318 318 { 319 319 struct sta_struct *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); 320 320 int i; ··· 335 335 struct kvm_vm *vm; 336 336 struct kvm_vcpu *vcpu; 337 337 struct kvm_one_reg reg; 338 - uint64_t shmem; 338 + u64 shmem; 339 339 int ret; 340 340 341 341 vm = vm_create_with_one_vcpu(&vcpu, NULL); ··· 345 345 KVM_REG_RISCV_SBI_STATE | 346 346 KVM_REG_RISCV_SBI_STA | 347 347 KVM_REG_RISCV_SBI_STA_REG(shmem_lo); 348 - reg.addr = (uint64_t)&shmem; 348 + reg.addr = (u64)&shmem; 349 349 350 350 shmem = ST_GPA_BASE + 1; 351 351 ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg); ··· 388 388 389 389 static void guest_code(int cpu) 390 390 { 391 - uint32_t version; 391 + u32 version; 392 392 struct kvm_steal_time *st = st_gva[cpu]; 393 393 394 394 memset(st, 0, sizeof(*st)); ··· 410 410 static bool is_steal_time_supported(struct kvm_vcpu *vcpu) 411 411 { 412 412 int err; 413 - uint64_t val; 413 + u64 val; 414 414 struct kvm_device_attr attr = { 415 415 .group = KVM_LOONGARCH_VCPU_CPUCFG, 416 416 .attr = CPUCFG_KVM_FEATURE, 417 - .addr = (uint64_t)&val, 417 + .addr = (u64)&val, 418 418 }; 419 419 420 420 err = __vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &attr); ··· 428 428 return val & BIT(KVM_FEATURE_STEAL_TIME); 429 429 } 430 430 431 - static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) 431 + static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) 432 432 { 433 433 int err; 434 - uint64_t st_gpa; 434 + u64 st_gpa; 435 435 struct kvm_vm *vm = vcpu->vm; 436 436 struct kvm_device_attr attr = { 437 437 .group = KVM_LOONGARCH_VCPU_PVTIME_CTRL, 438 438 .attr = KVM_LOONGARCH_VCPU_PVTIME_GPA, 439 - .addr = (uint64_t)&st_gpa, 439 + .addr = (u64)&st_gpa, 440 440 }; 441 441 442 442 /* ST_GPA_BASE is identity mapped */ ··· 451 451 TEST_ASSERT(err == 0, "Fail to set PV stealtime GPA"); 452 452 } 453 453 454 - static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) 454 + static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) 455 455 { 456 456 struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); 457 457 ··· 460 460 ksft_print_msg(" flags: %d\n", st->flags); 461 461 ksft_print_msg(" version: %d\n", st->version); 462 462 ksft_print_msg(" preempted: %d\n", st->preempted); 463 + } 464 + 465 + static void check_steal_time_uapi(void) 466 + { 467 + 463 468 } 464 469 #endif 465 470
+6 -6
tools/testing/selftests/kvm/system_counter_offset_test.c
··· 17 17 #ifdef __x86_64__ 18 18 19 19 struct test_case { 20 - uint64_t tsc_offset; 20 + u64 tsc_offset; 21 21 }; 22 22 23 23 static struct test_case test_cases[] = { ··· 39 39 &test->tsc_offset); 40 40 } 41 41 42 - static uint64_t guest_read_system_counter(struct test_case *test) 42 + static u64 guest_read_system_counter(struct test_case *test) 43 43 { 44 44 return rdtsc(); 45 45 } 46 46 47 - static uint64_t host_read_guest_system_counter(struct test_case *test) 47 + static u64 host_read_guest_system_counter(struct test_case *test) 48 48 { 49 49 return rdtsc() + test->tsc_offset; 50 50 } ··· 69 69 } 70 70 } 71 71 72 - static void handle_sync(struct ucall *uc, uint64_t start, uint64_t end) 72 + static void handle_sync(struct ucall *uc, u64 start, u64 end) 73 73 { 74 - uint64_t obs = uc->args[2]; 74 + u64 obs = uc->args[2]; 75 75 76 76 TEST_ASSERT(start <= obs && obs <= end, 77 77 "unexpected system counter value: %"PRIu64" expected range: [%"PRIu64", %"PRIu64"]", ··· 88 88 89 89 static void enter_guest(struct kvm_vcpu *vcpu) 90 90 { 91 - uint64_t start, end; 91 + u64 start, end; 92 92 struct ucall uc; 93 93 int i; 94 94
+7 -7
tools/testing/selftests/kvm/x86/amx_test.c
··· 80 80 asm volatile(".byte 0xc4, 0xe2, 0x78, 0x49, 0xc0" ::); 81 81 } 82 82 83 - static inline void __xsavec(struct xstate *xstate, uint64_t rfbm) 83 + static inline void __xsavec(struct xstate *xstate, u64 rfbm) 84 84 { 85 - uint32_t rfbm_lo = rfbm; 86 - uint32_t rfbm_hi = rfbm >> 32; 85 + u32 rfbm_lo = rfbm; 86 + u32 rfbm_hi = rfbm >> 32; 87 87 88 88 asm volatile("xsavec (%%rdi)" 89 89 : : "D" (xstate), "a" (rfbm_lo), "d" (rfbm_hi) ··· 236 236 struct kvm_x86_state *state; 237 237 struct kvm_x86_state *tile_state = NULL; 238 238 int xsave_restore_size; 239 - vm_vaddr_t amx_cfg, tiledata, xstate; 239 + gva_t amx_cfg, tiledata, xstate; 240 240 struct ucall uc; 241 241 int ret; 242 242 ··· 263 263 vcpu_regs_get(vcpu, &regs1); 264 264 265 265 /* amx cfg for guest_code */ 266 - amx_cfg = vm_vaddr_alloc_page(vm); 266 + amx_cfg = vm_alloc_page(vm); 267 267 memset(addr_gva2hva(vm, amx_cfg), 0x0, getpagesize()); 268 268 269 269 /* amx tiledata for guest_code */ 270 - tiledata = vm_vaddr_alloc_pages(vm, 2); 270 + tiledata = vm_alloc_pages(vm, 2); 271 271 memset(addr_gva2hva(vm, tiledata), rand() | 1, 2 * getpagesize()); 272 272 273 273 /* XSAVE state for guest_code */ 274 - xstate = vm_vaddr_alloc_pages(vm, DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE)); 274 + xstate = vm_alloc_pages(vm, DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE)); 275 275 memset(addr_gva2hva(vm, xstate), 0, PAGE_SIZE * DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE)); 276 276 vcpu_args_set(vcpu, 3, amx_cfg, tiledata, xstate); 277 277
+8 -8
tools/testing/selftests/kvm/x86/aperfmperf_test.c
··· 35 35 return open_path_or_exit(path, O_RDONLY); 36 36 } 37 37 38 - static uint64_t read_dev_msr(int msr_fd, uint32_t msr) 38 + static u64 read_dev_msr(int msr_fd, u32 msr) 39 39 { 40 - uint64_t data; 40 + u64 data; 41 41 ssize_t rc; 42 42 43 43 rc = pread(msr_fd, &data, sizeof(data), msr); ··· 107 107 108 108 static void guest_no_aperfmperf(void) 109 109 { 110 - uint64_t msr_val; 111 - uint8_t vector; 110 + u64 msr_val; 111 + u8 vector; 112 112 113 113 vector = rdmsr_safe(MSR_IA32_APERF, &msr_val); 114 114 GUEST_ASSERT(vector == GP_VECTOR); ··· 122 122 int main(int argc, char *argv[]) 123 123 { 124 124 const bool has_nested = kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX); 125 - uint64_t host_aperf_before, host_mperf_before; 126 - vm_vaddr_t nested_test_data_gva; 125 + u64 host_aperf_before, host_mperf_before; 126 + gva_t nested_test_data_gva; 127 127 struct kvm_vcpu *vcpu; 128 128 struct kvm_vm *vm; 129 129 int msr_fd, cpu, i; ··· 166 166 host_mperf_before = read_dev_msr(msr_fd, MSR_IA32_MPERF); 167 167 168 168 for (i = 0; i <= NUM_ITERATIONS * (1 + has_nested); i++) { 169 - uint64_t host_aperf_after, host_mperf_after; 170 - uint64_t guest_aperf, guest_mperf; 169 + u64 host_aperf_after, host_mperf_after; 170 + u64 guest_aperf, guest_mperf; 171 171 struct ucall uc; 172 172 173 173 vcpu_run(vcpu);
+12 -12
tools/testing/selftests/kvm/x86/apic_bus_clock_test.c
··· 19 19 * timer frequency. 20 20 */ 21 21 static const struct { 22 - const uint32_t tdcr; 23 - const uint32_t divide_count; 22 + const u32 tdcr; 23 + const u32 divide_count; 24 24 } tdcrs[] = { 25 25 {0x0, 2}, 26 26 {0x1, 4}, ··· 42 42 xapic_enable(); 43 43 } 44 44 45 - static uint32_t apic_read_reg(unsigned int reg) 45 + static u32 apic_read_reg(unsigned int reg) 46 46 { 47 47 return is_x2apic ? x2apic_read_reg(reg) : xapic_read_reg(reg); 48 48 } 49 49 50 - static void apic_write_reg(unsigned int reg, uint32_t val) 50 + static void apic_write_reg(unsigned int reg, u32 val) 51 51 { 52 52 if (is_x2apic) 53 53 x2apic_write_reg(reg, val); ··· 55 55 xapic_write_reg(reg, val); 56 56 } 57 57 58 - static void apic_guest_code(uint64_t apic_hz, uint64_t delay_ms) 58 + static void apic_guest_code(u64 apic_hz, u64 delay_ms) 59 59 { 60 - uint64_t tsc_hz = guest_tsc_khz * 1000; 61 - const uint32_t tmict = ~0u; 62 - uint64_t tsc0, tsc1, freq; 63 - uint32_t tmcct; 60 + u64 tsc_hz = guest_tsc_khz * 1000; 61 + const u32 tmict = ~0u; 62 + u64 tsc0, tsc1, freq; 63 + u32 tmcct; 64 64 int i; 65 65 66 66 apic_enable(); ··· 121 121 } 122 122 } 123 123 124 - static void run_apic_bus_clock_test(uint64_t apic_hz, uint64_t delay_ms, 124 + static void run_apic_bus_clock_test(u64 apic_hz, u64 delay_ms, 125 125 bool x2apic) 126 126 { 127 127 struct kvm_vcpu *vcpu; ··· 168 168 * Arbitrarilty default to 25MHz for the APIC bus frequency, which is 169 169 * different enough from the default 1GHz to be interesting. 170 170 */ 171 - uint64_t apic_hz = 25 * 1000 * 1000; 172 - uint64_t delay_ms = 100; 171 + u64 apic_hz = 25 * 1000 * 1000; 172 + u64 delay_ms = 100; 173 173 int opt; 174 174 175 175 TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_APIC_BUS_CYCLES_NS));
+3 -3
tools/testing/selftests/kvm/x86/cpuid_test.c
··· 140 140 } 141 141 } 142 142 143 - struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct kvm_cpuid2 *cpuid) 143 + struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, gva_t *p_gva, struct kvm_cpuid2 *cpuid) 144 144 { 145 145 int size = sizeof(*cpuid) + cpuid->nent * sizeof(cpuid->entries[0]); 146 - vm_vaddr_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR); 146 + gva_t gva = vm_alloc(vm, size, KVM_UTIL_MIN_VADDR); 147 147 struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva); 148 148 149 149 memcpy(guest_cpuids, cpuid, size); ··· 217 217 int main(void) 218 218 { 219 219 struct kvm_vcpu *vcpu; 220 - vm_vaddr_t cpuid_gva; 220 + gva_t cpuid_gva; 221 221 struct kvm_vm *vm; 222 222 int stage; 223 223
+2 -2
tools/testing/selftests/kvm/x86/debug_regs.c
··· 16 16 #define IRQ_VECTOR 0xAA 17 17 18 18 /* For testing data access debug BP */ 19 - uint32_t guest_value; 19 + u32 guest_value; 20 20 21 21 extern unsigned char sw_bp, hw_bp, write_data, ss_start, bd_start; 22 22 ··· 86 86 struct kvm_run *run; 87 87 struct kvm_vm *vm; 88 88 struct ucall uc; 89 - uint64_t cmd; 89 + u64 cmd; 90 90 int i; 91 91 /* Instruction lengths starting at ss_start */ 92 92 int ss_size[6] = {
+8 -8
tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c
··· 23 23 #define SLOTS 2 24 24 #define ITERATIONS 2 25 25 26 - static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; 26 + static u64 guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; 27 27 28 28 static enum vm_mem_backing_src_type backing_src = VM_MEM_SRC_ANONYMOUS_HUGETLB; 29 29 ··· 33 33 static int vcpu_last_completed_iteration[KVM_MAX_VCPUS]; 34 34 35 35 struct kvm_page_stats { 36 - uint64_t pages_4k; 37 - uint64_t pages_2m; 38 - uint64_t pages_1g; 39 - uint64_t hugepages; 36 + u64 pages_4k; 37 + u64 pages_2m; 38 + u64 pages_1g; 39 + u64 hugepages; 40 40 }; 41 41 42 42 static void get_page_stats(struct kvm_vm *vm, struct kvm_page_stats *stats, const char *stage) ··· 89 89 { 90 90 struct kvm_vm *vm; 91 91 unsigned long **bitmaps; 92 - uint64_t guest_num_pages; 93 - uint64_t host_num_pages; 94 - uint64_t pages_per_slot; 92 + u64 guest_num_pages; 93 + u64 host_num_pages; 94 + u64 pages_per_slot; 95 95 int i; 96 96 struct kvm_page_stats stats_populated; 97 97 struct kvm_page_stats stats_dirty_logging_enabled;
+3 -3
tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c
··· 29 29 * SMI handler: runs in real-address mode. 30 30 * Reports SMRAM_STAGE via port IO, then does RSM. 31 31 */ 32 - static uint8_t smi_handler[] = { 32 + static u8 smi_handler[] = { 33 33 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */ 34 34 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */ 35 35 0x0f, 0xaa, /* rsm */ 36 36 }; 37 37 38 - static inline void sync_with_host(uint64_t phase) 38 + static inline void sync_with_host(u64 phase) 39 39 { 40 40 asm volatile("in $" XSTR(SYNC_PORT) ", %%al \n" 41 41 : "+a" (phase)); ··· 73 73 74 74 int main(int argc, char *argv[]) 75 75 { 76 - vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0; 76 + gva_t vmx_pages_gva = 0, hv_pages_gva = 0; 77 77 struct hyperv_test_pages *hv; 78 78 struct hv_enlightened_vmcs *evmcs; 79 79 struct kvm_vcpu *vcpu;
+26 -26
tools/testing/selftests/kvm/x86/fastops_test.c
··· 15 15 "pop %[flags]\n\t" 16 16 17 17 #define flags_constraint(flags_val) [flags]"=r"(flags_val) 18 - #define bt_constraint(__bt_val) [bt_val]"rm"((uint32_t)__bt_val) 18 + #define bt_constraint(__bt_val) [bt_val]"rm"((u32)__bt_val) 19 19 20 20 #define guest_execute_fastop_1(FEP, insn, __val, __flags) \ 21 21 ({ \ ··· 28 28 #define guest_test_fastop_1(insn, type_t, __val) \ 29 29 ({ \ 30 30 type_t val = __val, ex_val = __val, input = __val; \ 31 - uint64_t flags, ex_flags; \ 31 + u64 flags, ex_flags; \ 32 32 \ 33 33 guest_execute_fastop_1("", insn, ex_val, ex_flags); \ 34 34 guest_execute_fastop_1(KVM_FEP, insn, val, flags); \ 35 35 \ 36 36 __GUEST_ASSERT(val == ex_val, \ 37 37 "Wanted 0x%lx for '%s 0x%lx', got 0x%lx", \ 38 - (uint64_t)ex_val, insn, (uint64_t)input, (uint64_t)val); \ 38 + (u64)ex_val, insn, (u64)input, (u64)val); \ 39 39 __GUEST_ASSERT(flags == ex_flags, \ 40 40 "Wanted flags 0x%lx for '%s 0x%lx', got 0x%lx", \ 41 - ex_flags, insn, (uint64_t)input, flags); \ 41 + ex_flags, insn, (u64)input, flags); \ 42 42 }) 43 43 44 44 #define guest_execute_fastop_2(FEP, insn, __input, __output, __flags) \ ··· 52 52 #define guest_test_fastop_2(insn, type_t, __val1, __val2) \ 53 53 ({ \ 54 54 type_t input = __val1, input2 = __val2, output = __val2, ex_output = __val2; \ 55 - uint64_t flags, ex_flags; \ 55 + u64 flags, ex_flags; \ 56 56 \ 57 57 guest_execute_fastop_2("", insn, input, ex_output, ex_flags); \ 58 58 guest_execute_fastop_2(KVM_FEP, insn, input, output, flags); \ 59 59 \ 60 60 __GUEST_ASSERT(output == ex_output, \ 61 61 "Wanted 0x%lx for '%s 0x%lx 0x%lx', got 0x%lx", \ 62 - (uint64_t)ex_output, insn, (uint64_t)input, \ 63 - (uint64_t)input2, (uint64_t)output); \ 62 + (u64)ex_output, insn, (u64)input, \ 63 + (u64)input2, (u64)output); \ 64 64 __GUEST_ASSERT(flags == ex_flags, \ 65 65 "Wanted flags 0x%lx for '%s 0x%lx, 0x%lx', got 0x%lx", \ 66 - ex_flags, insn, (uint64_t)input, (uint64_t)input2, flags); \ 66 + ex_flags, insn, (u64)input, (u64)input2, flags); \ 67 67 }) 68 68 69 69 #define guest_execute_fastop_cl(FEP, insn, __shift, __output, __flags) \ ··· 77 77 #define guest_test_fastop_cl(insn, type_t, __val1, __val2) \ 78 78 ({ \ 79 79 type_t output = __val2, ex_output = __val2, input = __val2; \ 80 - uint8_t shift = __val1; \ 81 - uint64_t flags, ex_flags; \ 80 + u8 shift = __val1; \ 81 + u64 flags, ex_flags; \ 82 82 \ 83 83 guest_execute_fastop_cl("", insn, shift, ex_output, ex_flags); \ 84 84 guest_execute_fastop_cl(KVM_FEP, insn, shift, output, flags); \ 85 85 \ 86 86 __GUEST_ASSERT(output == ex_output, \ 87 87 "Wanted 0x%lx for '%s 0x%x, 0x%lx', got 0x%lx", \ 88 - (uint64_t)ex_output, insn, shift, (uint64_t)input, \ 89 - (uint64_t)output); \ 88 + (u64)ex_output, insn, shift, (u64)input, \ 89 + (u64)output); \ 90 90 __GUEST_ASSERT(flags == ex_flags, \ 91 91 "Wanted flags 0x%lx for '%s 0x%x, 0x%lx', got 0x%lx", \ 92 - ex_flags, insn, shift, (uint64_t)input, flags); \ 92 + ex_flags, insn, shift, (u64)input, flags); \ 93 93 }) 94 94 95 95 #define guest_execute_fastop_div(__KVM_ASM_SAFE, insn, __a, __d, __rm, __flags) \ 96 96 ({ \ 97 - uint64_t ign_error_code; \ 98 - uint8_t vector; \ 97 + u64 ign_error_code; \ 98 + u8 vector; \ 99 99 \ 100 100 __asm__ __volatile__(fastop(__KVM_ASM_SAFE(insn " %[denom]")) \ 101 101 : "+a"(__a), "+d"(__d), flags_constraint(__flags), \ ··· 109 109 ({ \ 110 110 type_t _a = __val1, _d = __val1, rm = __val2; \ 111 111 type_t a = _a, d = _d, ex_a = _a, ex_d = _d; \ 112 - uint64_t flags, ex_flags; \ 113 - uint8_t v, ex_v; \ 112 + u64 flags, ex_flags; \ 113 + u8 v, ex_v; \ 114 114 \ 115 115 ex_v = guest_execute_fastop_div(KVM_ASM_SAFE, insn, ex_a, ex_d, rm, ex_flags); \ 116 116 v = guest_execute_fastop_div(KVM_ASM_SAFE_FEP, insn, a, d, rm, flags); \ ··· 118 118 GUEST_ASSERT_EQ(v, ex_v); \ 119 119 __GUEST_ASSERT(v == ex_v, \ 120 120 "Wanted vector 0x%x for '%s 0x%lx:0x%lx/0x%lx', got 0x%x", \ 121 - ex_v, insn, (uint64_t)_a, (uint64_t)_d, (uint64_t)rm, v); \ 121 + ex_v, insn, (u64)_a, (u64)_d, (u64)rm, v); \ 122 122 __GUEST_ASSERT(a == ex_a && d == ex_d, \ 123 123 "Wanted 0x%lx:0x%lx for '%s 0x%lx:0x%lx/0x%lx', got 0x%lx:0x%lx",\ 124 - (uint64_t)ex_a, (uint64_t)ex_d, insn, (uint64_t)_a, \ 125 - (uint64_t)_d, (uint64_t)rm, (uint64_t)a, (uint64_t)d); \ 124 + (u64)ex_a, (u64)ex_d, insn, (u64)_a, \ 125 + (u64)_d, (u64)rm, (u64)a, (u64)d); \ 126 126 __GUEST_ASSERT(v || ex_v || (flags == ex_flags), \ 127 127 "Wanted flags 0x%lx for '%s 0x%lx:0x%lx/0x%lx', got 0x%lx", \ 128 - ex_flags, insn, (uint64_t)_a, (uint64_t)_d, (uint64_t)rm, flags);\ 128 + ex_flags, insn, (u64)_a, (u64)_d, (u64)rm, flags);\ 129 129 }) 130 130 131 - static const uint64_t vals[] = { 131 + static const u64 vals[] = { 132 132 0, 133 133 1, 134 134 2, ··· 185 185 186 186 static void guest_code(void) 187 187 { 188 - guest_test_fastops(uint8_t, "b"); 189 - guest_test_fastops(uint16_t, "w"); 190 - guest_test_fastops(uint32_t, "l"); 191 - guest_test_fastops(uint64_t, "q"); 188 + guest_test_fastops(u8, "b"); 189 + guest_test_fastops(u16, "w"); 190 + guest_test_fastops(u32, "l"); 191 + guest_test_fastops(u64, "q"); 192 192 193 193 GUEST_DONE(); 194 194 }
+6 -6
tools/testing/selftests/kvm/x86/feature_msrs_test.c
··· 12 12 #include "kvm_util.h" 13 13 #include "processor.h" 14 14 15 - static bool is_kvm_controlled_msr(uint32_t msr) 15 + static bool is_kvm_controlled_msr(u32 msr) 16 16 { 17 17 return msr == MSR_IA32_VMX_CR0_FIXED1 || msr == MSR_IA32_VMX_CR4_FIXED1; 18 18 } ··· 21 21 * For VMX MSRs with a "true" variant, KVM requires userspace to set the "true" 22 22 * MSR, and doesn't allow setting the hidden version. 23 23 */ 24 - static bool is_hidden_vmx_msr(uint32_t msr) 24 + static bool is_hidden_vmx_msr(u32 msr) 25 25 { 26 26 switch (msr) { 27 27 case MSR_IA32_VMX_PINBASED_CTLS: ··· 34 34 } 35 35 } 36 36 37 - static bool is_quirked_msr(uint32_t msr) 37 + static bool is_quirked_msr(u32 msr) 38 38 { 39 39 return msr != MSR_AMD64_DE_CFG; 40 40 } 41 41 42 - static void test_feature_msr(uint32_t msr) 42 + static void test_feature_msr(u32 msr) 43 43 { 44 - const uint64_t supported_mask = kvm_get_feature_msr(msr); 45 - uint64_t reset_value = is_quirked_msr(msr) ? supported_mask : 0; 44 + const u64 supported_mask = kvm_get_feature_msr(msr); 45 + u64 reset_value = is_quirked_msr(msr) ? supported_mask : 0; 46 46 struct kvm_vcpu *vcpu; 47 47 struct kvm_vm *vm; 48 48
+10 -10
tools/testing/selftests/kvm/x86/fix_hypercall_test.c
··· 26 26 regs->rip += HYPERCALL_INSN_SIZE; 27 27 } 28 28 29 - static const uint8_t vmx_vmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xc1 }; 30 - static const uint8_t svm_vmmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xd9 }; 29 + static const u8 vmx_vmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xc1 }; 30 + static const u8 svm_vmmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xd9 }; 31 31 32 - extern uint8_t hypercall_insn[HYPERCALL_INSN_SIZE]; 33 - static uint64_t do_sched_yield(uint8_t apic_id) 32 + extern u8 hypercall_insn[HYPERCALL_INSN_SIZE]; 33 + static u64 do_sched_yield(u8 apic_id) 34 34 { 35 - uint64_t ret; 35 + u64 ret; 36 36 37 37 asm volatile("hypercall_insn:\n\t" 38 38 ".byte 0xcc,0xcc,0xcc\n\t" 39 39 : "=a"(ret) 40 - : "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id) 40 + : "a"((u64)KVM_HC_SCHED_YIELD), "b"((u64)apic_id) 41 41 : "memory"); 42 42 43 43 return ret; ··· 45 45 46 46 static void guest_main(void) 47 47 { 48 - const uint8_t *native_hypercall_insn; 49 - const uint8_t *other_hypercall_insn; 50 - uint64_t ret; 48 + const u8 *native_hypercall_insn; 49 + const u8 *other_hypercall_insn; 50 + u64 ret; 51 51 52 52 if (host_cpu_is_intel) { 53 53 native_hypercall_insn = vmx_vmcall; ··· 72 72 * the "right" hypercall. 73 73 */ 74 74 if (quirk_disabled) { 75 - GUEST_ASSERT(ret == (uint64_t)-EFAULT); 75 + GUEST_ASSERT(ret == (u64)-EFAULT); 76 76 GUEST_ASSERT(!memcmp(other_hypercall_insn, hypercall_insn, 77 77 HYPERCALL_INSN_SIZE)); 78 78 } else {
+3 -3
tools/testing/selftests/kvm/x86/flds_emulation.h
··· 12 12 * KVM to emulate the instruction (e.g. by providing an MMIO address) to 13 13 * exercise emulation failures. 14 14 */ 15 - static inline void flds(uint64_t address) 15 + static inline void flds(u64 address) 16 16 { 17 17 __asm__ __volatile__(FLDS_MEM_EAX :: "a"(address)); 18 18 } ··· 21 21 { 22 22 struct kvm_run *run = vcpu->run; 23 23 struct kvm_regs regs; 24 - uint8_t *insn_bytes; 25 - uint64_t flags; 24 + u8 *insn_bytes; 25 + u64 flags; 26 26 27 27 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR); 28 28
+5 -5
tools/testing/selftests/kvm/x86/hwcr_msr_test.c
··· 10 10 11 11 void test_hwcr_bit(struct kvm_vcpu *vcpu, unsigned int bit) 12 12 { 13 - const uint64_t ignored = BIT_ULL(3) | BIT_ULL(6) | BIT_ULL(8); 14 - const uint64_t valid = BIT_ULL(18) | BIT_ULL(24); 15 - const uint64_t legal = ignored | valid; 16 - uint64_t val = BIT_ULL(bit); 17 - uint64_t actual; 13 + const u64 ignored = BIT_ULL(3) | BIT_ULL(6) | BIT_ULL(8); 14 + const u64 valid = BIT_ULL(18) | BIT_ULL(24); 15 + const u64 legal = ignored | valid; 16 + u64 val = BIT_ULL(bit); 17 + u64 actual; 18 18 int r; 19 19 20 20 r = _vcpu_set_msr(vcpu, MSR_K7_HWCR, val);
+3 -3
tools/testing/selftests/kvm/x86/hyperv_clock.c
··· 98 98 GUEST_ASSERT(r2 >= t1 && r2 - t2 < 100000); 99 99 } 100 100 101 - static void guest_main(struct ms_hyperv_tsc_page *tsc_page, vm_paddr_t tsc_page_gpa) 101 + static void guest_main(struct ms_hyperv_tsc_page *tsc_page, gpa_t tsc_page_gpa) 102 102 { 103 103 u64 tsc_scale, tsc_offset; 104 104 ··· 208 208 struct kvm_vcpu *vcpu; 209 209 struct kvm_vm *vm; 210 210 struct ucall uc; 211 - vm_vaddr_t tsc_page_gva; 211 + gva_t tsc_page_gva; 212 212 int stage; 213 213 214 214 TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_TIME)); ··· 218 218 219 219 vcpu_set_hv_cpuid(vcpu); 220 220 221 - tsc_page_gva = vm_vaddr_alloc_page(vm); 221 + tsc_page_gva = vm_alloc_page(vm); 222 222 memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize()); 223 223 TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0, 224 224 "TSC page has to be page aligned");
+5 -5
tools/testing/selftests/kvm/x86/hyperv_evmcs.c
··· 30 30 { 31 31 } 32 32 33 - static inline void rdmsr_from_l2(uint32_t msr) 33 + static inline void rdmsr_from_l2(u32 msr) 34 34 { 35 35 /* Currently, L1 doesn't preserve GPRs during vmexits. */ 36 36 __asm__ __volatile__ ("rdmsr" : : "c"(msr) : ··· 76 76 } 77 77 78 78 void guest_code(struct vmx_pages *vmx_pages, struct hyperv_test_pages *hv_pages, 79 - vm_vaddr_t hv_hcall_page_gpa) 79 + gpa_t hv_hcall_page_gpa) 80 80 { 81 81 #define L2_GUEST_STACK_SIZE 64 82 82 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; ··· 231 231 232 232 int main(int argc, char *argv[]) 233 233 { 234 - vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0; 235 - vm_vaddr_t hcall_page; 234 + gva_t vmx_pages_gva = 0, hv_pages_gva = 0; 235 + gva_t hcall_page; 236 236 237 237 struct kvm_vcpu *vcpu; 238 238 struct kvm_vm *vm; ··· 246 246 247 247 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 248 248 249 - hcall_page = vm_vaddr_alloc_pages(vm, 1); 249 + hcall_page = vm_alloc_pages(vm, 1); 250 250 memset(addr_gva2hva(vm, hcall_page), 0x0, getpagesize()); 251 251 252 252 vcpu_set_hv_cpuid(vcpu);
+10 -10
tools/testing/selftests/kvm/x86/hyperv_extended_hypercalls.c
··· 15 15 /* Any value is fine */ 16 16 #define EXT_CAPABILITIES 0xbull 17 17 18 - static void guest_code(vm_paddr_t in_pg_gpa, vm_paddr_t out_pg_gpa, 19 - vm_vaddr_t out_pg_gva) 18 + static void guest_code(gpa_t in_pg_gpa, gpa_t out_pg_gpa, 19 + gva_t out_pg_gva) 20 20 { 21 - uint64_t *output_gva; 21 + u64 *output_gva; 22 22 23 23 wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); 24 24 wrmsr(HV_X64_MSR_HYPERCALL, in_pg_gpa); 25 25 26 - output_gva = (uint64_t *)out_pg_gva; 26 + output_gva = (u64 *)out_pg_gva; 27 27 28 28 hyperv_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, in_pg_gpa, out_pg_gpa); 29 29 30 - /* TLFS states output will be a uint64_t value */ 30 + /* TLFS states output will be a u64 value */ 31 31 GUEST_ASSERT_EQ(*output_gva, EXT_CAPABILITIES); 32 32 33 33 GUEST_DONE(); ··· 35 35 36 36 int main(void) 37 37 { 38 - vm_vaddr_t hcall_out_page; 39 - vm_vaddr_t hcall_in_page; 38 + gva_t hcall_out_page; 39 + gva_t hcall_in_page; 40 40 struct kvm_vcpu *vcpu; 41 41 struct kvm_run *run; 42 42 struct kvm_vm *vm; 43 - uint64_t *outval; 43 + u64 *outval; 44 44 struct ucall uc; 45 45 46 46 TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_CPUID)); ··· 57 57 vcpu_set_hv_cpuid(vcpu); 58 58 59 59 /* Hypercall input */ 60 - hcall_in_page = vm_vaddr_alloc_pages(vm, 1); 60 + hcall_in_page = vm_alloc_pages(vm, 1); 61 61 memset(addr_gva2hva(vm, hcall_in_page), 0x0, vm->page_size); 62 62 63 63 /* Hypercall output */ 64 - hcall_out_page = vm_vaddr_alloc_pages(vm, 1); 64 + hcall_out_page = vm_alloc_pages(vm, 1); 65 65 memset(addr_gva2hva(vm, hcall_out_page), 0x0, vm->page_size); 66 66 67 67 vcpu_args_set(vcpu, 3, addr_gva2gpa(vm, hcall_in_page),
+13 -13
tools/testing/selftests/kvm/x86/hyperv_features.c
··· 22 22 KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EBX, 0) 23 23 24 24 struct msr_data { 25 - uint32_t idx; 25 + u32 idx; 26 26 bool fault_expected; 27 27 bool write; 28 28 u64 write_val; 29 29 }; 30 30 31 31 struct hcall_data { 32 - uint64_t control; 33 - uint64_t expect; 32 + u64 control; 33 + u64 expect; 34 34 bool ud_expected; 35 35 }; 36 36 37 - static bool is_write_only_msr(uint32_t msr) 37 + static bool is_write_only_msr(u32 msr) 38 38 { 39 39 return msr == HV_X64_MSR_EOI; 40 40 } 41 41 42 42 static void guest_msr(struct msr_data *msr) 43 43 { 44 - uint8_t vector = 0; 45 - uint64_t msr_val = 0; 44 + u8 vector = 0; 45 + u64 msr_val = 0; 46 46 47 47 GUEST_ASSERT(msr->idx); 48 48 ··· 82 82 GUEST_DONE(); 83 83 } 84 84 85 - static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall) 85 + static void guest_hcall(gpa_t pgs_gpa, struct hcall_data *hcall) 86 86 { 87 87 u64 res, input, output; 88 - uint8_t vector; 88 + u8 vector; 89 89 90 90 GUEST_ASSERT_NE(hcall->control, 0); 91 91 ··· 134 134 struct kvm_vm *vm; 135 135 struct ucall uc; 136 136 int stage = 0; 137 - vm_vaddr_t msr_gva; 137 + gva_t msr_gva; 138 138 struct msr_data *msr; 139 139 bool has_invtsc = kvm_cpu_has(X86_FEATURE_INVTSC); 140 140 141 141 while (true) { 142 142 vm = vm_create_with_one_vcpu(&vcpu, guest_msr); 143 143 144 - msr_gva = vm_vaddr_alloc_page(vm); 144 + msr_gva = vm_alloc_page(vm); 145 145 memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize()); 146 146 msr = addr_gva2hva(vm, msr_gva); 147 147 ··· 523 523 struct kvm_vm *vm; 524 524 struct ucall uc; 525 525 int stage = 0; 526 - vm_vaddr_t hcall_page, hcall_params; 526 + gva_t hcall_page, hcall_params; 527 527 struct hcall_data *hcall; 528 528 529 529 while (true) { 530 530 vm = vm_create_with_one_vcpu(&vcpu, guest_hcall); 531 531 532 532 /* Hypercall input/output */ 533 - hcall_page = vm_vaddr_alloc_pages(vm, 2); 533 + hcall_page = vm_alloc_pages(vm, 2); 534 534 memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize()); 535 535 536 - hcall_params = vm_vaddr_alloc_page(vm); 536 + hcall_params = vm_alloc_page(vm); 537 537 memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize()); 538 538 hcall = addr_gva2hva(vm, hcall_params); 539 539
+6 -6
tools/testing/selftests/kvm/x86/hyperv_ipi.c
··· 18 18 19 19 #define IPI_VECTOR 0xfe 20 20 21 - static volatile uint64_t ipis_rcvd[RECEIVER_VCPU_ID_2 + 1]; 21 + static volatile u64 ipis_rcvd[RECEIVER_VCPU_ID_2 + 1]; 22 22 23 23 struct hv_vpset { 24 24 u64 format; ··· 45 45 struct hv_vpset vp_set; 46 46 }; 47 47 48 - static inline void hv_init(vm_vaddr_t pgs_gpa) 48 + static inline void hv_init(gpa_t pgs_gpa) 49 49 { 50 50 wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); 51 51 wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa); 52 52 } 53 53 54 - static void receiver_code(void *hcall_page, vm_vaddr_t pgs_gpa) 54 + static void receiver_code(void *hcall_page, gpa_t pgs_gpa) 55 55 { 56 56 u32 vcpu_id; 57 57 ··· 85 85 asm volatile("nop"); 86 86 } 87 87 88 - static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa) 88 + static void sender_guest_code(void *hcall_page, gpa_t pgs_gpa) 89 89 { 90 90 struct hv_send_ipi *ipi = (struct hv_send_ipi *)hcall_page; 91 91 struct hv_send_ipi_ex *ipi_ex = (struct hv_send_ipi_ex *)hcall_page; ··· 243 243 { 244 244 struct kvm_vm *vm; 245 245 struct kvm_vcpu *vcpu[3]; 246 - vm_vaddr_t hcall_page; 246 + gva_t hcall_page; 247 247 pthread_t threads[2]; 248 248 int stage = 1, r; 249 249 struct ucall uc; ··· 253 253 vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code); 254 254 255 255 /* Hypercall input/output */ 256 - hcall_page = vm_vaddr_alloc_pages(vm, 2); 256 + hcall_page = vm_alloc_pages(vm, 2); 257 257 memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize()); 258 258 259 259
+5 -5
tools/testing/selftests/kvm/x86/hyperv_svm_test.c
··· 21 21 #define L2_GUEST_STACK_SIZE 256 22 22 23 23 /* Exit to L1 from L2 with RDMSR instruction */ 24 - static inline void rdmsr_from_l2(uint32_t msr) 24 + static inline void rdmsr_from_l2(u32 msr) 25 25 { 26 26 /* Currently, L1 doesn't preserve GPRs during vmexits. */ 27 27 __asm__ __volatile__ ("rdmsr" : : "c"(msr) : ··· 67 67 68 68 static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm, 69 69 struct hyperv_test_pages *hv_pages, 70 - vm_vaddr_t pgs_gpa) 70 + gpa_t pgs_gpa) 71 71 { 72 72 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 73 73 struct vmcb *vmcb = svm->vmcb; ··· 149 149 150 150 int main(int argc, char *argv[]) 151 151 { 152 - vm_vaddr_t nested_gva = 0, hv_pages_gva = 0; 153 - vm_vaddr_t hcall_page; 152 + gva_t nested_gva = 0, hv_pages_gva = 0; 153 + gva_t hcall_page; 154 154 struct kvm_vcpu *vcpu; 155 155 struct kvm_vm *vm; 156 156 struct ucall uc; ··· 165 165 vcpu_alloc_svm(vm, &nested_gva); 166 166 vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva); 167 167 168 - hcall_page = vm_vaddr_alloc_pages(vm, 1); 168 + hcall_page = vm_alloc_pages(vm, 1); 169 169 memset(addr_gva2hva(vm, hcall_page), 0x0, getpagesize()); 170 170 171 171 vcpu_args_set(vcpu, 3, nested_gva, hv_pages_gva, addr_gva2gpa(vm, hcall_page));
+18 -18
tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
··· 61 61 * - GVAs of the test pages' PTEs 62 62 */ 63 63 struct test_data { 64 - vm_vaddr_t hcall_gva; 65 - vm_paddr_t hcall_gpa; 66 - vm_vaddr_t test_pages; 67 - vm_vaddr_t test_pages_pte[NTEST_PAGES]; 64 + gva_t hcall_gva; 65 + gpa_t hcall_gpa; 66 + gva_t test_pages; 67 + gva_t test_pages_pte[NTEST_PAGES]; 68 68 }; 69 69 70 70 /* 'Worker' vCPU code checking the contents of the test page */ 71 - static void worker_guest_code(vm_vaddr_t test_data) 71 + static void worker_guest_code(gva_t test_data) 72 72 { 73 73 struct test_data *data = (struct test_data *)test_data; 74 74 u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX); ··· 133 133 * Update PTEs swapping two test pages. 134 134 * TODO: use swap()/xchg() when these are provided. 135 135 */ 136 - static void swap_two_test_pages(vm_paddr_t pte_gva1, vm_paddr_t pte_gva2) 136 + static void swap_two_test_pages(gpa_t pte_gva1, gpa_t pte_gva2) 137 137 { 138 - uint64_t tmp = *(uint64_t *)pte_gva1; 138 + u64 tmp = *(u64 *)pte_gva1; 139 139 140 - *(uint64_t *)pte_gva1 = *(uint64_t *)pte_gva2; 141 - *(uint64_t *)pte_gva2 = tmp; 140 + *(u64 *)pte_gva1 = *(u64 *)pte_gva2; 141 + *(u64 *)pte_gva2 = tmp; 142 142 } 143 143 144 144 /* ··· 196 196 #define TESTVAL2 0x0202020202020202 197 197 198 198 /* Main vCPU doing the test */ 199 - static void sender_guest_code(vm_vaddr_t test_data) 199 + static void sender_guest_code(gva_t test_data) 200 200 { 201 201 struct test_data *data = (struct test_data *)test_data; 202 202 struct hv_tlb_flush *flush = (struct hv_tlb_flush *)data->hcall_gva; 203 203 struct hv_tlb_flush_ex *flush_ex = (struct hv_tlb_flush_ex *)data->hcall_gva; 204 - vm_paddr_t hcall_gpa = data->hcall_gpa; 204 + gpa_t hcall_gpa = data->hcall_gpa; 205 205 int i, stage = 1; 206 206 207 207 wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); ··· 581 581 struct kvm_vm *vm; 582 582 struct kvm_vcpu *vcpu[3]; 583 583 pthread_t threads[2]; 584 - vm_vaddr_t test_data_page, gva; 585 - vm_paddr_t gpa; 586 - uint64_t *pte; 584 + gva_t test_data_page, gva; 585 + gpa_t gpa; 586 + u64 *pte; 587 587 struct test_data *data; 588 588 struct ucall uc; 589 589 int stage = 1, r, i; ··· 593 593 vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code); 594 594 595 595 /* Test data page */ 596 - test_data_page = vm_vaddr_alloc_page(vm); 596 + test_data_page = vm_alloc_page(vm); 597 597 data = (struct test_data *)addr_gva2hva(vm, test_data_page); 598 598 599 599 /* Hypercall input/output */ 600 - data->hcall_gva = vm_vaddr_alloc_pages(vm, 2); 600 + data->hcall_gva = vm_alloc_pages(vm, 2); 601 601 data->hcall_gpa = addr_gva2gpa(vm, data->hcall_gva); 602 602 memset(addr_gva2hva(vm, data->hcall_gva), 0x0, 2 * PAGE_SIZE); 603 603 ··· 606 606 * and the test will swap their mappings. The third page keeps the indication 607 607 * about the current state of mappings. 608 608 */ 609 - data->test_pages = vm_vaddr_alloc_pages(vm, NTEST_PAGES + 1); 609 + data->test_pages = vm_alloc_pages(vm, NTEST_PAGES + 1); 610 610 for (i = 0; i < NTEST_PAGES; i++) 611 611 memset(addr_gva2hva(vm, data->test_pages + PAGE_SIZE * i), 612 612 (u8)(i + 1), PAGE_SIZE); ··· 617 617 * Get PTE pointers for test pages and map them inside the guest. 618 618 * Use separate page for each PTE for simplicity. 619 619 */ 620 - gva = vm_vaddr_unused_gap(vm, NTEST_PAGES * PAGE_SIZE, KVM_UTIL_MIN_VADDR); 620 + gva = vm_unused_gva_gap(vm, NTEST_PAGES * PAGE_SIZE, KVM_UTIL_MIN_VADDR); 621 621 for (i = 0; i < NTEST_PAGES; i++) { 622 622 pte = vm_get_pte(vm, data->test_pages + i * PAGE_SIZE); 623 623 gpa = addr_hva2gpa(vm, pte);
+1 -1
tools/testing/selftests/kvm/x86/kvm_buslock_test.c
··· 73 73 int main(int argc, char *argv[]) 74 74 { 75 75 const bool has_nested = kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX); 76 - vm_vaddr_t nested_test_data_gva; 76 + gva_t nested_test_data_gva; 77 77 struct kvm_vcpu *vcpu; 78 78 struct kvm_run *run; 79 79 struct kvm_vm *vm;
+7 -7
tools/testing/selftests/kvm/x86/kvm_clock_test.c
··· 17 17 #include "processor.h" 18 18 19 19 struct test_case { 20 - uint64_t kvmclock_base; 21 - int64_t realtime_offset; 20 + u64 kvmclock_base; 21 + s64 realtime_offset; 22 22 }; 23 23 24 24 static struct test_case test_cases[] = { ··· 31 31 #define GUEST_SYNC_CLOCK(__stage, __val) \ 32 32 GUEST_SYNC_ARGS(__stage, __val, 0, 0, 0) 33 33 34 - static void guest_main(vm_paddr_t pvti_pa, struct pvclock_vcpu_time_info *pvti) 34 + static void guest_main(gpa_t pvti_pa, struct pvclock_vcpu_time_info *pvti) 35 35 { 36 36 int i; 37 37 ··· 52 52 static void handle_sync(struct ucall *uc, struct kvm_clock_data *start, 53 53 struct kvm_clock_data *end) 54 54 { 55 - uint64_t obs, exp_lo, exp_hi; 55 + u64 obs, exp_lo, exp_hi; 56 56 57 57 obs = uc->args[2]; 58 58 exp_lo = start->clock; ··· 135 135 int main(void) 136 136 { 137 137 struct kvm_vcpu *vcpu; 138 - vm_vaddr_t pvti_gva; 139 - vm_paddr_t pvti_gpa; 138 + gva_t pvti_gva; 139 + gpa_t pvti_gpa; 140 140 struct kvm_vm *vm; 141 141 int flags; 142 142 ··· 147 147 148 148 vm = vm_create_with_one_vcpu(&vcpu, guest_main); 149 149 150 - pvti_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000); 150 + pvti_gva = vm_alloc(vm, getpagesize(), 0x10000); 151 151 pvti_gpa = addr_gva2gpa(vm, pvti_gva); 152 152 vcpu_args_set(vcpu, 2, pvti_gpa, pvti_gva); 153 153
+5 -5
tools/testing/selftests/kvm/x86/kvm_pv_test.c
··· 13 13 #include "processor.h" 14 14 15 15 struct msr_data { 16 - uint32_t idx; 16 + u32 idx; 17 17 const char *name; 18 18 }; 19 19 ··· 40 40 41 41 static void test_msr(struct msr_data *msr) 42 42 { 43 - uint64_t ignored; 44 - uint8_t vector; 43 + u64 ignored; 44 + u8 vector; 45 45 46 46 PR_MSR(msr); 47 47 ··· 53 53 } 54 54 55 55 struct hcall_data { 56 - uint64_t nr; 56 + u64 nr; 57 57 const char *name; 58 58 }; 59 59 ··· 73 73 74 74 static void test_hcall(struct hcall_data *hc) 75 75 { 76 - uint64_t r; 76 + u64 r; 77 77 78 78 PR_HCALL(hc); 79 79 r = kvm_hypercall(hc->nr, 0, 0, 0, 0);
+1 -1
tools/testing/selftests/kvm/x86/monitor_mwait_test.c
··· 67 67 68 68 int main(int argc, char *argv[]) 69 69 { 70 - uint64_t disabled_quirks; 70 + u64 disabled_quirks; 71 71 struct kvm_vcpu *vcpu; 72 72 struct kvm_vm *vm; 73 73 struct ucall uc;
+1 -1
tools/testing/selftests/kvm/x86/nested_close_kvm_test.c
··· 67 67 68 68 int main(int argc, char *argv[]) 69 69 { 70 - vm_vaddr_t guest_gva; 70 + gva_t guest_gva; 71 71 struct kvm_vcpu *vcpu; 72 72 struct kvm_vm *vm; 73 73
+5 -5
tools/testing/selftests/kvm/x86/nested_dirty_log_test.c
··· 47 47 #define TEST_SYNC_WRITE_FAULT BIT(1) 48 48 #define TEST_SYNC_NO_FAULT BIT(2) 49 49 50 - static void l2_guest_code(vm_vaddr_t base) 50 + static void l2_guest_code(gva_t base) 51 51 { 52 - vm_vaddr_t page0 = TEST_GUEST_ADDR(base, 0); 53 - vm_vaddr_t page1 = TEST_GUEST_ADDR(base, 1); 52 + gva_t page0 = TEST_GUEST_ADDR(base, 0); 53 + gva_t page1 = TEST_GUEST_ADDR(base, 1); 54 54 55 55 READ_ONCE(*(u64 *)page0); 56 56 GUEST_SYNC(page0 | TEST_SYNC_READ_FAULT); ··· 143 143 static void test_handle_ucall_sync(struct kvm_vm *vm, u64 arg, 144 144 unsigned long *bmap) 145 145 { 146 - vm_vaddr_t gva = arg & ~(PAGE_SIZE - 1); 146 + gva_t gva = arg & ~(PAGE_SIZE - 1); 147 147 int page_nr, i; 148 148 149 149 /* ··· 198 198 199 199 static void test_dirty_log(bool nested_tdp) 200 200 { 201 - vm_vaddr_t nested_gva = 0; 201 + gva_t nested_gva = 0; 202 202 unsigned long *bmap; 203 203 struct kvm_vcpu *vcpu; 204 204 struct kvm_vm *vm;
+10 -10
tools/testing/selftests/kvm/x86/nested_emulation_test.c
··· 13 13 14 14 struct emulated_instruction { 15 15 const char name[32]; 16 - uint8_t opcode[15]; 17 - uint32_t exit_reason[NR_VIRTUALIZATION_FLAVORS]; 16 + u8 opcode[15]; 17 + u32 exit_reason[NR_VIRTUALIZATION_FLAVORS]; 18 18 }; 19 19 20 20 static struct emulated_instruction instructions[] = { ··· 32 32 }, 33 33 }; 34 34 35 - static uint8_t kvm_fep[] = { 0x0f, 0x0b, 0x6b, 0x76, 0x6d }; /* ud2 ; .ascii "kvm" */ 36 - static uint8_t l2_guest_code[sizeof(kvm_fep) + 15]; 37 - static uint8_t *l2_instruction = &l2_guest_code[sizeof(kvm_fep)]; 35 + static u8 kvm_fep[] = { 0x0f, 0x0b, 0x6b, 0x76, 0x6d }; /* ud2 ; .ascii "kvm" */ 36 + static u8 l2_guest_code[sizeof(kvm_fep) + 15]; 37 + static u8 *l2_instruction = &l2_guest_code[sizeof(kvm_fep)]; 38 38 39 - static uint32_t get_instruction_length(struct emulated_instruction *insn) 39 + static u32 get_instruction_length(struct emulated_instruction *insn) 40 40 { 41 - uint32_t i; 41 + u32 i; 42 42 43 43 for (i = 0; i < ARRAY_SIZE(insn->opcode) && insn->opcode[i]; i++) 44 44 ; ··· 81 81 82 82 for (i = 0; i < ARRAY_SIZE(instructions); i++) { 83 83 struct emulated_instruction *insn = &instructions[i]; 84 - uint32_t insn_len = get_instruction_length(insn); 85 - uint32_t exit_insn_len; 84 + u32 insn_len = get_instruction_length(insn); 85 + u32 exit_insn_len; 86 86 u32 exit_reason; 87 87 88 88 /* ··· 122 122 123 123 int main(int argc, char *argv[]) 124 124 { 125 - vm_vaddr_t nested_test_data_gva; 125 + gva_t nested_test_data_gva; 126 126 struct kvm_vcpu *vcpu; 127 127 struct kvm_vm *vm; 128 128
+3 -3
tools/testing/selftests/kvm/x86/nested_exceptions_test.c
··· 72 72 } 73 73 74 74 static void svm_run_l2(struct svm_test_data *svm, void *l2_code, int vector, 75 - uint32_t error_code) 75 + u32 error_code) 76 76 { 77 77 struct vmcb *vmcb = svm->vmcb; 78 78 struct vmcb_control_area *ctrl = &vmcb->control; ··· 111 111 GUEST_DONE(); 112 112 } 113 113 114 - static void vmx_run_l2(void *l2_code, int vector, uint32_t error_code) 114 + static void vmx_run_l2(void *l2_code, int vector, u32 error_code) 115 115 { 116 116 GUEST_ASSERT(!vmwrite(GUEST_RIP, (u64)l2_code)); 117 117 ··· 216 216 */ 217 217 int main(int argc, char *argv[]) 218 218 { 219 - vm_vaddr_t nested_test_data_gva; 219 + gva_t nested_test_data_gva; 220 220 struct kvm_vcpu_events events; 221 221 struct kvm_vcpu *vcpu; 222 222 struct kvm_vm *vm;
+1 -1
tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c
··· 78 78 { 79 79 struct kvm_vcpu *vcpu; 80 80 struct kvm_vm *vm; 81 - vm_vaddr_t guest_gva = 0; 81 + gva_t guest_gva = 0; 82 82 83 83 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) || 84 84 kvm_cpu_has(X86_FEATURE_SVM));
+2 -2
tools/testing/selftests/kvm/x86/nested_set_state_test.c
··· 250 250 251 251 static void vcpu_efer_enable_svm(struct kvm_vcpu *vcpu) 252 252 { 253 - uint64_t old_efer = vcpu_get_msr(vcpu, MSR_EFER); 253 + u64 old_efer = vcpu_get_msr(vcpu, MSR_EFER); 254 254 255 255 vcpu_set_msr(vcpu, MSR_EFER, old_efer | EFER_SVME); 256 256 } 257 257 258 258 static void vcpu_efer_disable_svm(struct kvm_vcpu *vcpu) 259 259 { 260 - uint64_t old_efer = vcpu_get_msr(vcpu, MSR_EFER); 260 + u64 old_efer = vcpu_get_msr(vcpu, MSR_EFER); 261 261 262 262 vcpu_set_msr(vcpu, MSR_EFER, old_efer & ~EFER_SVME); 263 263 }
+6 -6
tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c
··· 53 53 /* The virtual machine object. */ 54 54 static struct kvm_vm *vm; 55 55 56 - static void check_ia32_tsc_adjust(int64_t max) 56 + static void check_ia32_tsc_adjust(s64 max) 57 57 { 58 - int64_t adjust; 58 + s64 adjust; 59 59 60 60 adjust = rdmsr(MSR_IA32_TSC_ADJUST); 61 61 GUEST_SYNC(adjust); ··· 64 64 65 65 static void l2_guest_code(void) 66 66 { 67 - uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE; 67 + u64 l1_tsc = rdtsc() - TSC_OFFSET_VALUE; 68 68 69 69 wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE); 70 70 check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE); ··· 88 88 */ 89 89 if (this_cpu_has(X86_FEATURE_VMX)) { 90 90 struct vmx_pages *vmx_pages = data; 91 - uint32_t control; 91 + u32 control; 92 92 93 93 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 94 94 GUEST_ASSERT(load_vmcs(vmx_pages)); ··· 117 117 GUEST_DONE(); 118 118 } 119 119 120 - static void report(int64_t val) 120 + static void report(s64 val) 121 121 { 122 122 pr_info("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n", 123 123 val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE); ··· 125 125 126 126 int main(int argc, char *argv[]) 127 127 { 128 - vm_vaddr_t nested_gva; 128 + gva_t nested_gva; 129 129 struct kvm_vcpu *vcpu; 130 130 131 131 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) ||
+12 -12
tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c
··· 19 19 /* L2 is scaled up (from L1's perspective) by this factor */ 20 20 #define L2_SCALE_FACTOR 4ULL 21 21 22 - #define TSC_OFFSET_L2 ((uint64_t) -33125236320908) 22 + #define TSC_OFFSET_L2 ((u64)-33125236320908) 23 23 #define TSC_MULTIPLIER_L2 (L2_SCALE_FACTOR << 48) 24 24 25 25 #define L2_GUEST_STACK_SIZE 64 ··· 35 35 * measurements, a difference of 1% between the actual and the expected value 36 36 * is tolerated. 37 37 */ 38 - static void compare_tsc_freq(uint64_t actual, uint64_t expected) 38 + static void compare_tsc_freq(u64 actual, u64 expected) 39 39 { 40 - uint64_t tolerance, thresh_low, thresh_high; 40 + u64 tolerance, thresh_low, thresh_high; 41 41 42 42 tolerance = expected / 100; 43 43 thresh_low = expected - tolerance; ··· 55 55 56 56 static void check_tsc_freq(int level) 57 57 { 58 - uint64_t tsc_start, tsc_end, tsc_freq; 58 + u64 tsc_start, tsc_end, tsc_freq; 59 59 60 60 /* 61 61 * Reading the TSC twice with about a second's difference should give ··· 106 106 static void l1_vmx_code(struct vmx_pages *vmx_pages) 107 107 { 108 108 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 109 - uint32_t control; 109 + u32 control; 110 110 111 111 /* check that L1's frequency looks alright before launching L2 */ 112 112 check_tsc_freq(UCHECK_L1); ··· 152 152 { 153 153 struct kvm_vcpu *vcpu; 154 154 struct kvm_vm *vm; 155 - vm_vaddr_t guest_gva = 0; 155 + gva_t guest_gva = 0; 156 156 157 - uint64_t tsc_start, tsc_end; 158 - uint64_t tsc_khz; 159 - uint64_t l1_scale_factor; 160 - uint64_t l0_tsc_freq = 0; 161 - uint64_t l1_tsc_freq = 0; 162 - uint64_t l2_tsc_freq = 0; 157 + u64 tsc_start, tsc_end; 158 + u64 tsc_khz; 159 + u64 l1_scale_factor; 160 + u64 l0_tsc_freq = 0; 161 + u64 l1_tsc_freq = 0; 162 + u64 l2_tsc_freq = 0; 163 163 164 164 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) || 165 165 kvm_cpu_has(X86_FEATURE_SVM));
+1 -1
tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c
··· 128 128 129 129 int main(int argc, char *argv[]) 130 130 { 131 - vm_vaddr_t nested_gva = 0; 131 + gva_t nested_gva = 0; 132 132 struct vmcb *test_vmcb[2]; 133 133 struct kvm_vcpu *vcpu; 134 134 struct kvm_vm *vm;
+9 -9
tools/testing/selftests/kvm/x86/nx_huge_pages_test.c
··· 32 32 #define RETURN_OPCODE 0xC3 33 33 34 34 /* Call the specified memory address. */ 35 - static void guest_do_CALL(uint64_t target) 35 + static void guest_do_CALL(u64 target) 36 36 { 37 37 ((void (*)(void)) target)(); 38 38 } ··· 46 46 */ 47 47 void guest_code(void) 48 48 { 49 - uint64_t hpage_1 = HPAGE_GVA; 50 - uint64_t hpage_2 = hpage_1 + (PAGE_SIZE * 512); 51 - uint64_t hpage_3 = hpage_2 + (PAGE_SIZE * 512); 49 + u64 hpage_1 = HPAGE_GVA; 50 + u64 hpage_2 = hpage_1 + (PAGE_SIZE * 512); 51 + u64 hpage_3 = hpage_2 + (PAGE_SIZE * 512); 52 52 53 - READ_ONCE(*(uint64_t *)hpage_1); 53 + READ_ONCE(*(u64 *)hpage_1); 54 54 GUEST_SYNC(1); 55 55 56 - READ_ONCE(*(uint64_t *)hpage_2); 56 + READ_ONCE(*(u64 *)hpage_2); 57 57 GUEST_SYNC(2); 58 58 59 59 guest_do_CALL(hpage_1); ··· 62 62 guest_do_CALL(hpage_3); 63 63 GUEST_SYNC(4); 64 64 65 - READ_ONCE(*(uint64_t *)hpage_1); 65 + READ_ONCE(*(u64 *)hpage_1); 66 66 GUEST_SYNC(5); 67 67 68 - READ_ONCE(*(uint64_t *)hpage_3); 68 + READ_ONCE(*(u64 *)hpage_3); 69 69 GUEST_SYNC(6); 70 70 } 71 71 ··· 107 107 { 108 108 struct kvm_vcpu *vcpu; 109 109 struct kvm_vm *vm; 110 - uint64_t nr_bytes; 110 + u64 nr_bytes; 111 111 void *hva; 112 112 int r; 113 113
+3 -3
tools/testing/selftests/kvm/x86/platform_info_test.c
··· 23 23 24 24 static void guest_code(void) 25 25 { 26 - uint64_t msr_platform_info; 27 - uint8_t vector; 26 + u64 msr_platform_info; 27 + u8 vector; 28 28 29 29 GUEST_SYNC(true); 30 30 msr_platform_info = rdmsr(MSR_PLATFORM_INFO); ··· 42 42 { 43 43 struct kvm_vcpu *vcpu; 44 44 struct kvm_vm *vm; 45 - uint64_t msr_platform_info; 45 + u64 msr_platform_info; 46 46 struct ucall uc; 47 47 48 48 TEST_REQUIRE(kvm_has_cap(KVM_CAP_MSR_PLATFORM_INFO));
+54 -55
tools/testing/selftests/kvm/x86/pmu_counters_test.c
··· 30 30 #define NUM_INSNS_RETIRED (NUM_LOOPS * NUM_INSNS_PER_LOOP + NUM_EXTRA_INSNS) 31 31 32 32 /* Track which architectural events are supported by hardware. */ 33 - static uint32_t hardware_pmu_arch_events; 33 + static u32 hardware_pmu_arch_events; 34 34 35 - static uint8_t kvm_pmu_version; 35 + static u8 kvm_pmu_version; 36 36 static bool kvm_has_perf_caps; 37 37 38 38 #define X86_PMU_FEATURE_NULL \ ··· 57 57 * kvm_x86_pmu_feature use syntax that's only valid in function scope, and the 58 58 * compiler often thinks the feature definitions aren't compile-time constants. 59 59 */ 60 - static struct kvm_intel_pmu_event intel_event_to_feature(uint8_t idx) 60 + static struct kvm_intel_pmu_event intel_event_to_feature(u8 idx) 61 61 { 62 62 const struct kvm_intel_pmu_event __intel_event_to_feature[] = { 63 63 [INTEL_ARCH_CPU_CYCLES_INDEX] = { X86_PMU_FEATURE_CPU_CYCLES, X86_PMU_FEATURE_CPU_CYCLES_FIXED }, ··· 89 89 90 90 static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, 91 91 void *guest_code, 92 - uint8_t pmu_version, 93 - uint64_t perf_capabilities) 92 + u8 pmu_version, 93 + u64 perf_capabilities) 94 94 { 95 95 struct kvm_vm *vm; 96 96 ··· 132 132 } while (uc.cmd != UCALL_DONE); 133 133 } 134 134 135 - static uint8_t guest_get_pmu_version(void) 135 + static u8 guest_get_pmu_version(void) 136 136 { 137 137 /* 138 138 * Return the effective PMU version, i.e. the minimum between what KVM ··· 141 141 * supported by KVM to verify KVM doesn't freak out and do something 142 142 * bizarre with an architecturally valid, but unsupported, version. 143 143 */ 144 - return min_t(uint8_t, kvm_pmu_version, this_cpu_property(X86_PROPERTY_PMU_VERSION)); 144 + return min_t(u8, kvm_pmu_version, this_cpu_property(X86_PROPERTY_PMU_VERSION)); 145 145 } 146 146 147 147 /* ··· 153 153 * Sanity check that in all cases, the event doesn't count when it's disabled, 154 154 * and that KVM correctly emulates the write of an arbitrary value. 155 155 */ 156 - static void guest_assert_event_count(uint8_t idx, uint32_t pmc, uint32_t pmc_msr) 156 + static void guest_assert_event_count(u8 idx, u32 pmc, u32 pmc_msr) 157 157 { 158 - uint64_t count; 158 + u64 count; 159 159 160 160 count = _rdpmc(pmc); 161 161 if (!(hardware_pmu_arch_events & BIT(idx))) ··· 236 236 FEP "xor %%eax, %%eax\n\t" \ 237 237 FEP "xor %%edx, %%edx\n\t" \ 238 238 "wrmsr\n\t" \ 239 - :: "a"((uint32_t)_value), "d"(_value >> 32), \ 239 + :: "a"((u32)_value), "d"(_value >> 32), \ 240 240 "c"(_msr), "D"(_msr), [m]"m"(kvm_pmu_version) \ 241 241 ); \ 242 242 } while (0) ··· 255 255 guest_assert_event_count(_idx, _pmc, _pmc_msr); \ 256 256 } while (0) 257 257 258 - static void __guest_test_arch_event(uint8_t idx, uint32_t pmc, uint32_t pmc_msr, 259 - uint32_t ctrl_msr, uint64_t ctrl_msr_value) 258 + static void __guest_test_arch_event(u8 idx, u32 pmc, u32 pmc_msr, 259 + u32 ctrl_msr, u64 ctrl_msr_value) 260 260 { 261 261 GUEST_TEST_EVENT(idx, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, ""); 262 262 ··· 264 264 GUEST_TEST_EVENT(idx, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, KVM_FEP); 265 265 } 266 266 267 - static void guest_test_arch_event(uint8_t idx) 267 + static void guest_test_arch_event(u8 idx) 268 268 { 269 - uint32_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); 270 - uint32_t pmu_version = guest_get_pmu_version(); 269 + u32 nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); 270 + u32 pmu_version = guest_get_pmu_version(); 271 271 /* PERF_GLOBAL_CTRL exists only for Architectural PMU Version 2+. */ 272 272 bool guest_has_perf_global_ctrl = pmu_version >= 2; 273 273 struct kvm_x86_pmu_feature gp_event, fixed_event; 274 - uint32_t base_pmc_msr; 274 + u32 base_pmc_msr; 275 275 unsigned int i; 276 276 277 277 /* The host side shouldn't invoke this without a guest PMU. */ ··· 289 289 GUEST_ASSERT(nr_gp_counters); 290 290 291 291 for (i = 0; i < nr_gp_counters; i++) { 292 - uint64_t eventsel = ARCH_PERFMON_EVENTSEL_OS | 292 + u64 eventsel = ARCH_PERFMON_EVENTSEL_OS | 293 293 ARCH_PERFMON_EVENTSEL_ENABLE | 294 294 intel_pmu_arch_events[idx]; 295 295 ··· 320 320 321 321 static void guest_test_arch_events(void) 322 322 { 323 - uint8_t i; 323 + u8 i; 324 324 325 325 for (i = 0; i < NR_INTEL_ARCH_EVENTS; i++) 326 326 guest_test_arch_event(i); ··· 328 328 GUEST_DONE(); 329 329 } 330 330 331 - static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities, 332 - uint8_t length, uint32_t unavailable_mask) 331 + static void test_arch_events(u8 pmu_version, u64 perf_capabilities, 332 + u8 length, u32 unavailable_mask) 333 333 { 334 334 struct kvm_vcpu *vcpu; 335 335 struct kvm_vm *vm; ··· 373 373 "Expected " #insn "(0x%x) to yield 0x%lx, got 0x%lx", \ 374 374 msr, expected, val); 375 375 376 - static void guest_test_rdpmc(uint32_t rdpmc_idx, bool expect_success, 377 - uint64_t expected_val) 376 + static void guest_test_rdpmc(u32 rdpmc_idx, bool expect_success, 377 + u64 expected_val) 378 378 { 379 - uint8_t vector; 380 - uint64_t val; 379 + u8 vector; 380 + u64 val; 381 381 382 382 vector = rdpmc_safe(rdpmc_idx, &val); 383 383 GUEST_ASSERT_PMC_MSR_ACCESS(RDPMC, rdpmc_idx, !expect_success, vector); ··· 393 393 GUEST_ASSERT_PMC_VALUE(RDPMC, rdpmc_idx, val, expected_val); 394 394 } 395 395 396 - static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters, 397 - uint8_t nr_counters, uint32_t or_mask) 396 + static void guest_rd_wr_counters(u32 base_msr, u8 nr_possible_counters, 397 + u8 nr_counters, u32 or_mask) 398 398 { 399 399 const bool pmu_has_fast_mode = !guest_get_pmu_version(); 400 - uint8_t i; 400 + u8 i; 401 401 402 402 for (i = 0; i < nr_possible_counters; i++) { 403 403 /* 404 404 * TODO: Test a value that validates full-width writes and the 405 405 * width of the counters. 406 406 */ 407 - const uint64_t test_val = 0xffff; 408 - const uint32_t msr = base_msr + i; 407 + const u64 test_val = 0xffff; 408 + const u32 msr = base_msr + i; 409 409 410 410 /* 411 411 * Fixed counters are supported if the counter is less than the ··· 418 418 * KVM drops writes to MSR_P6_PERFCTR[0|1] if the counters are 419 419 * unsupported, i.e. doesn't #GP and reads back '0'. 420 420 */ 421 - const uint64_t expected_val = expect_success ? test_val : 0; 421 + const u64 expected_val = expect_success ? test_val : 0; 422 422 const bool expect_gp = !expect_success && msr != MSR_P6_PERFCTR0 && 423 423 msr != MSR_P6_PERFCTR1; 424 - uint32_t rdpmc_idx; 425 - uint8_t vector; 426 - uint64_t val; 424 + u32 rdpmc_idx; 425 + u8 vector; 426 + u64 val; 427 427 428 428 vector = wrmsr_safe(msr, test_val); 429 429 GUEST_ASSERT_PMC_MSR_ACCESS(WRMSR, msr, expect_gp, vector); ··· 461 461 462 462 static void guest_test_gp_counters(void) 463 463 { 464 - uint8_t pmu_version = guest_get_pmu_version(); 465 - uint8_t nr_gp_counters = 0; 466 - uint32_t base_msr; 464 + u8 pmu_version = guest_get_pmu_version(); 465 + u8 nr_gp_counters = 0; 466 + u32 base_msr; 467 467 468 468 if (pmu_version) 469 469 nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); ··· 477 477 * counters, of which there are none. 478 478 */ 479 479 if (pmu_version > 1) { 480 - uint64_t global_ctrl = rdmsr(MSR_CORE_PERF_GLOBAL_CTRL); 480 + u64 global_ctrl = rdmsr(MSR_CORE_PERF_GLOBAL_CTRL); 481 481 482 482 if (nr_gp_counters) 483 483 GUEST_ASSERT_EQ(global_ctrl, GENMASK_ULL(nr_gp_counters - 1, 0)); ··· 495 495 GUEST_DONE(); 496 496 } 497 497 498 - static void test_gp_counters(uint8_t pmu_version, uint64_t perf_capabilities, 499 - uint8_t nr_gp_counters) 498 + static void test_gp_counters(u8 pmu_version, u64 perf_capabilities, 499 + u8 nr_gp_counters) 500 500 { 501 501 struct kvm_vcpu *vcpu; 502 502 struct kvm_vm *vm; ··· 514 514 515 515 static void guest_test_fixed_counters(void) 516 516 { 517 - uint64_t supported_bitmask = 0; 518 - uint8_t nr_fixed_counters = 0; 519 - uint8_t i; 517 + u64 supported_bitmask = 0; 518 + u8 nr_fixed_counters = 0; 519 + u8 i; 520 520 521 521 /* Fixed counters require Architectural vPMU Version 2+. */ 522 522 if (guest_get_pmu_version() >= 2) ··· 533 533 nr_fixed_counters, supported_bitmask); 534 534 535 535 for (i = 0; i < MAX_NR_FIXED_COUNTERS; i++) { 536 - uint8_t vector; 537 - uint64_t val; 536 + u8 vector; 537 + u64 val; 538 538 539 539 if (i >= nr_fixed_counters && !(supported_bitmask & BIT_ULL(i))) { 540 540 vector = wrmsr_safe(MSR_CORE_PERF_FIXED_CTR_CTRL, ··· 561 561 GUEST_DONE(); 562 562 } 563 563 564 - static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities, 565 - uint8_t nr_fixed_counters, 566 - uint32_t supported_bitmask) 564 + static void test_fixed_counters(u8 pmu_version, u64 perf_capabilities, 565 + u8 nr_fixed_counters, u32 supported_bitmask) 567 566 { 568 567 struct kvm_vcpu *vcpu; 569 568 struct kvm_vm *vm; ··· 582 583 583 584 static void test_intel_counters(void) 584 585 { 585 - uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); 586 - uint8_t nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); 587 - uint8_t pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION); 586 + u8 nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); 587 + u8 nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); 588 + u8 pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION); 588 589 unsigned int i; 589 - uint8_t v, j; 590 - uint32_t k; 590 + u8 v, j; 591 + u32 k; 591 592 592 - const uint64_t perf_caps[] = { 593 + const u64 perf_caps[] = { 593 594 0, 594 595 PMU_CAP_FW_WRITES, 595 596 }; ··· 601 602 * as alternating bit sequencues, e.g. to detect if KVM is checking the 602 603 * wrong bit(s). 603 604 */ 604 - const uint32_t unavailable_masks[] = { 605 + const u32 unavailable_masks[] = { 605 606 0x0, 606 607 0xffffffffu, 607 608 0xaaaaaaaau, ··· 619 620 * Intel, i.e. is the last version that is guaranteed to be backwards 620 621 * compatible with KVM's existing behavior. 621 622 */ 622 - uint8_t max_pmu_version = max_t(typeof(pmu_version), pmu_version, 5); 623 + u8 max_pmu_version = max_t(typeof(pmu_version), pmu_version, 5); 623 624 624 625 /* 625 626 * Detect the existence of events that aren't supported by selftests.
+51 -51
tools/testing/selftests/kvm/x86/pmu_event_filter_test.c
··· 53 53 }; 54 54 55 55 struct { 56 - uint64_t loads; 57 - uint64_t stores; 58 - uint64_t loads_stores; 59 - uint64_t branches_retired; 60 - uint64_t instructions_retired; 56 + u64 loads; 57 + u64 stores; 58 + u64 loads_stores; 59 + u64 branches_retired; 60 + u64 instructions_retired; 61 61 } pmc_results; 62 62 63 63 /* ··· 75 75 * 76 76 * Return on success. GUEST_SYNC(0) on error. 77 77 */ 78 - static void check_msr(uint32_t msr, uint64_t bits_to_flip) 78 + static void check_msr(u32 msr, u64 bits_to_flip) 79 79 { 80 - uint64_t v = rdmsr(msr) ^ bits_to_flip; 80 + u64 v = rdmsr(msr) ^ bits_to_flip; 81 81 82 82 wrmsr(msr, v); 83 83 if (rdmsr(msr) != v) ··· 89 89 GUEST_SYNC(-EIO); 90 90 } 91 91 92 - static void run_and_measure_loop(uint32_t msr_base) 92 + static void run_and_measure_loop(u32 msr_base) 93 93 { 94 - const uint64_t branches_retired = rdmsr(msr_base + 0); 95 - const uint64_t insn_retired = rdmsr(msr_base + 1); 94 + const u64 branches_retired = rdmsr(msr_base + 0); 95 + const u64 insn_retired = rdmsr(msr_base + 1); 96 96 97 97 __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); 98 98 ··· 147 147 * Run the VM to the next GUEST_SYNC(value), and return the value passed 148 148 * to the sync. Any other exit from the guest is fatal. 149 149 */ 150 - static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu) 150 + static u64 run_vcpu_to_sync(struct kvm_vcpu *vcpu) 151 151 { 152 152 struct ucall uc; 153 153 ··· 161 161 162 162 static void run_vcpu_and_sync_pmc_results(struct kvm_vcpu *vcpu) 163 163 { 164 - uint64_t r; 164 + u64 r; 165 165 166 166 memset(&pmc_results, 0, sizeof(pmc_results)); 167 167 sync_global_to_guest(vcpu->vm, pmc_results); ··· 182 182 */ 183 183 static bool sanity_check_pmu(struct kvm_vcpu *vcpu) 184 184 { 185 - uint64_t r; 185 + u64 r; 186 186 187 187 vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler); 188 188 r = run_vcpu_to_sync(vcpu); ··· 195 195 * Remove the first occurrence of 'event' (if any) from the filter's 196 196 * event list. 197 197 */ 198 - static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event) 198 + static void remove_event(struct __kvm_pmu_event_filter *f, u64 event) 199 199 { 200 200 bool found = false; 201 201 int i; ··· 212 212 213 213 #define ASSERT_PMC_COUNTING_INSTRUCTIONS() \ 214 214 do { \ 215 - uint64_t br = pmc_results.branches_retired; \ 216 - uint64_t ir = pmc_results.instructions_retired; \ 215 + u64 br = pmc_results.branches_retired; \ 216 + u64 ir = pmc_results.instructions_retired; \ 217 217 bool br_matched = this_pmu_has_errata(BRANCHES_RETIRED_OVERCOUNT) ? \ 218 218 br >= NUM_BRANCHES : br == NUM_BRANCHES; \ 219 219 \ ··· 228 228 229 229 #define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS() \ 230 230 do { \ 231 - uint64_t br = pmc_results.branches_retired; \ 232 - uint64_t ir = pmc_results.instructions_retired; \ 231 + u64 br = pmc_results.branches_retired; \ 232 + u64 ir = pmc_results.instructions_retired; \ 233 233 \ 234 234 TEST_ASSERT(!br, "%s: Branch instructions retired = %lu (expected 0)", \ 235 235 __func__, br); \ ··· 378 378 379 379 static bool supports_event_mem_inst_retired(void) 380 380 { 381 - uint32_t eax, ebx, ecx, edx; 381 + u32 eax, ebx, ecx, edx; 382 382 383 383 cpuid(1, &eax, &ebx, &ecx, &edx); 384 384 if (x86_family(eax) == 0x6) { ··· 415 415 #define EXCLUDE_MASKED_ENTRY(event_select, mask, match) \ 416 416 KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, true) 417 417 418 - static void masked_events_guest_test(uint32_t msr_base) 418 + static void masked_events_guest_test(u32 msr_base) 419 419 { 420 420 /* 421 421 * The actual value of the counters don't determine the outcome of 422 422 * the test. Only that they are zero or non-zero. 423 423 */ 424 - const uint64_t loads = rdmsr(msr_base + 0); 425 - const uint64_t stores = rdmsr(msr_base + 1); 426 - const uint64_t loads_stores = rdmsr(msr_base + 2); 424 + const u64 loads = rdmsr(msr_base + 0); 425 + const u64 stores = rdmsr(msr_base + 1); 426 + const u64 loads_stores = rdmsr(msr_base + 2); 427 427 int val; 428 428 429 429 ··· 476 476 } 477 477 478 478 static void run_masked_events_test(struct kvm_vcpu *vcpu, 479 - const uint64_t masked_events[], 479 + const u64 masked_events[], 480 480 const int nmasked_events) 481 481 { 482 482 struct __kvm_pmu_event_filter f = { ··· 485 485 .flags = KVM_PMU_EVENT_FLAG_MASKED_EVENTS, 486 486 }; 487 487 488 - memcpy(f.events, masked_events, sizeof(uint64_t) * nmasked_events); 488 + memcpy(f.events, masked_events, sizeof(u64) * nmasked_events); 489 489 test_with_filter(vcpu, &f); 490 490 } 491 491 ··· 494 494 #define ALLOW_LOADS_STORES BIT(2) 495 495 496 496 struct masked_events_test { 497 - uint64_t intel_events[MAX_TEST_EVENTS]; 498 - uint64_t intel_event_end; 499 - uint64_t amd_events[MAX_TEST_EVENTS]; 500 - uint64_t amd_event_end; 497 + u64 intel_events[MAX_TEST_EVENTS]; 498 + u64 intel_event_end; 499 + u64 amd_events[MAX_TEST_EVENTS]; 500 + u64 amd_event_end; 501 501 const char *msg; 502 - uint32_t flags; 502 + u32 flags; 503 503 }; 504 504 505 505 /* ··· 582 582 }; 583 583 584 584 static int append_test_events(const struct masked_events_test *test, 585 - uint64_t *events, int nevents) 585 + u64 *events, int nevents) 586 586 { 587 - const uint64_t *evts; 587 + const u64 *evts; 588 588 int i; 589 589 590 590 evts = use_intel_pmu() ? test->intel_events : test->amd_events; ··· 603 603 return a == b; 604 604 } 605 605 606 - static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events, 606 + static void run_masked_events_tests(struct kvm_vcpu *vcpu, u64 *events, 607 607 int nevents) 608 608 { 609 609 int ntests = ARRAY_SIZE(test_cases); ··· 630 630 } 631 631 } 632 632 633 - static void add_dummy_events(uint64_t *events, int nevents) 633 + static void add_dummy_events(u64 *events, int nevents) 634 634 { 635 635 int i; 636 636 ··· 650 650 static void test_masked_events(struct kvm_vcpu *vcpu) 651 651 { 652 652 int nevents = KVM_PMU_EVENT_FILTER_MAX_EVENTS - MAX_TEST_EVENTS; 653 - uint64_t events[KVM_PMU_EVENT_FILTER_MAX_EVENTS]; 653 + u64 events[KVM_PMU_EVENT_FILTER_MAX_EVENTS]; 654 654 655 655 /* Run the test cases against a sparse PMU event filter. */ 656 656 run_masked_events_tests(vcpu, events, 0); ··· 668 668 return __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f); 669 669 } 670 670 671 - static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event, 672 - uint32_t flags, uint32_t action) 671 + static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, u64 event, 672 + u32 flags, u32 action) 673 673 { 674 674 struct __kvm_pmu_event_filter f = { 675 675 .nevents = 1, ··· 685 685 686 686 static void test_filter_ioctl(struct kvm_vcpu *vcpu) 687 687 { 688 - uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); 688 + u8 nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); 689 689 struct __kvm_pmu_event_filter f; 690 - uint64_t e = ~0ul; 690 + u64 e = ~0ul; 691 691 int r; 692 692 693 693 /* ··· 729 729 TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed"); 730 730 } 731 731 732 - static void intel_run_fixed_counter_guest_code(uint8_t idx) 732 + static void intel_run_fixed_counter_guest_code(u8 idx) 733 733 { 734 734 for (;;) { 735 735 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); ··· 745 745 } 746 746 } 747 747 748 - static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu, 749 - uint32_t action, uint32_t bitmap) 748 + static u64 test_with_fixed_counter_filter(struct kvm_vcpu *vcpu, 749 + u32 action, u32 bitmap) 750 750 { 751 751 struct __kvm_pmu_event_filter f = { 752 752 .action = action, ··· 757 757 return run_vcpu_to_sync(vcpu); 758 758 } 759 759 760 - static uint64_t test_set_gp_and_fixed_event_filter(struct kvm_vcpu *vcpu, 761 - uint32_t action, 762 - uint32_t bitmap) 760 + static u64 test_set_gp_and_fixed_event_filter(struct kvm_vcpu *vcpu, 761 + u32 action, 762 + u32 bitmap) 763 763 { 764 764 struct __kvm_pmu_event_filter f = base_event_filter; 765 765 ··· 770 770 return run_vcpu_to_sync(vcpu); 771 771 } 772 772 773 - static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx, 774 - uint8_t nr_fixed_counters) 773 + static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, u8 idx, 774 + u8 nr_fixed_counters) 775 775 { 776 776 unsigned int i; 777 - uint32_t bitmap; 778 - uint64_t count; 777 + u32 bitmap; 778 + u64 count; 779 779 780 780 TEST_ASSERT(nr_fixed_counters < sizeof(bitmap) * 8, 781 781 "Invalid nr_fixed_counters"); ··· 815 815 816 816 static void test_fixed_counter_bitmap(void) 817 817 { 818 - uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); 818 + u8 nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); 819 819 struct kvm_vm *vm; 820 820 struct kvm_vcpu *vcpu; 821 - uint8_t idx; 821 + u8 idx; 822 822 823 823 /* 824 824 * Check that pmu_event_filter works as expected when it's applied to
+39 -39
tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
··· 23 23 #include <processor.h> 24 24 25 25 #define BASE_DATA_SLOT 10 26 - #define BASE_DATA_GPA ((uint64_t)(1ull << 32)) 27 - #define PER_CPU_DATA_SIZE ((uint64_t)(SZ_2M + PAGE_SIZE)) 26 + #define BASE_DATA_GPA ((u64)(1ull << 32)) 27 + #define PER_CPU_DATA_SIZE ((u64)(SZ_2M + PAGE_SIZE)) 28 28 29 29 /* Horrific macro so that the line info is captured accurately :-( */ 30 30 #define memcmp_g(gpa, pattern, size) \ 31 31 do { \ 32 - uint8_t *mem = (uint8_t *)gpa; \ 32 + u8 *mem = (u8 *)gpa; \ 33 33 size_t i; \ 34 34 \ 35 35 for (i = 0; i < size; i++) \ ··· 38 38 pattern, i, gpa + i, mem[i]); \ 39 39 } while (0) 40 40 41 - static void memcmp_h(uint8_t *mem, uint64_t gpa, uint8_t pattern, size_t size) 41 + static void memcmp_h(u8 *mem, gpa_t gpa, u8 pattern, size_t size) 42 42 { 43 43 size_t i; 44 44 ··· 70 70 SYNC_PRIVATE, 71 71 }; 72 72 73 - static void guest_sync_shared(uint64_t gpa, uint64_t size, 74 - uint8_t current_pattern, uint8_t new_pattern) 73 + static void guest_sync_shared(gpa_t gpa, u64 size, 74 + u8 current_pattern, u8 new_pattern) 75 75 { 76 76 GUEST_SYNC5(SYNC_SHARED, gpa, size, current_pattern, new_pattern); 77 77 } 78 78 79 - static void guest_sync_private(uint64_t gpa, uint64_t size, uint8_t pattern) 79 + static void guest_sync_private(gpa_t gpa, u64 size, u8 pattern) 80 80 { 81 81 GUEST_SYNC4(SYNC_PRIVATE, gpa, size, pattern); 82 82 } ··· 86 86 #define MAP_GPA_SHARED BIT(1) 87 87 #define MAP_GPA_DO_FALLOCATE BIT(2) 88 88 89 - static void guest_map_mem(uint64_t gpa, uint64_t size, bool map_shared, 89 + static void guest_map_mem(gpa_t gpa, u64 size, bool map_shared, 90 90 bool do_fallocate) 91 91 { 92 - uint64_t flags = MAP_GPA_SET_ATTRIBUTES; 92 + u64 flags = MAP_GPA_SET_ATTRIBUTES; 93 93 94 94 if (map_shared) 95 95 flags |= MAP_GPA_SHARED; ··· 98 98 kvm_hypercall_map_gpa_range(gpa, size, flags); 99 99 } 100 100 101 - static void guest_map_shared(uint64_t gpa, uint64_t size, bool do_fallocate) 101 + static void guest_map_shared(gpa_t gpa, u64 size, bool do_fallocate) 102 102 { 103 103 guest_map_mem(gpa, size, true, do_fallocate); 104 104 } 105 105 106 - static void guest_map_private(uint64_t gpa, uint64_t size, bool do_fallocate) 106 + static void guest_map_private(gpa_t gpa, u64 size, bool do_fallocate) 107 107 { 108 108 guest_map_mem(gpa, size, false, do_fallocate); 109 109 } 110 110 111 111 struct { 112 - uint64_t offset; 113 - uint64_t size; 112 + u64 offset; 113 + u64 size; 114 114 } static const test_ranges[] = { 115 115 GUEST_STAGE(0, PAGE_SIZE), 116 116 GUEST_STAGE(0, SZ_2M), ··· 119 119 GUEST_STAGE(SZ_2M, PAGE_SIZE), 120 120 }; 121 121 122 - static void guest_test_explicit_conversion(uint64_t base_gpa, bool do_fallocate) 122 + static void guest_test_explicit_conversion(u64 base_gpa, bool do_fallocate) 123 123 { 124 - const uint8_t def_p = 0xaa; 125 - const uint8_t init_p = 0xcc; 126 - uint64_t j; 124 + const u8 def_p = 0xaa; 125 + const u8 init_p = 0xcc; 126 + u64 j; 127 127 int i; 128 128 129 129 /* Memory should be shared by default. */ ··· 134 134 memcmp_g(base_gpa, init_p, PER_CPU_DATA_SIZE); 135 135 136 136 for (i = 0; i < ARRAY_SIZE(test_ranges); i++) { 137 - uint64_t gpa = base_gpa + test_ranges[i].offset; 138 - uint64_t size = test_ranges[i].size; 139 - uint8_t p1 = 0x11; 140 - uint8_t p2 = 0x22; 141 - uint8_t p3 = 0x33; 142 - uint8_t p4 = 0x44; 137 + gpa_t gpa = base_gpa + test_ranges[i].offset; 138 + u64 size = test_ranges[i].size; 139 + u8 p1 = 0x11; 140 + u8 p2 = 0x22; 141 + u8 p3 = 0x33; 142 + u8 p4 = 0x44; 143 143 144 144 /* 145 145 * Set the test region to pattern one to differentiate it from ··· 214 214 } 215 215 } 216 216 217 - static void guest_punch_hole(uint64_t gpa, uint64_t size) 217 + static void guest_punch_hole(gpa_t gpa, u64 size) 218 218 { 219 219 /* "Mapping" memory shared via fallocate() is done via PUNCH_HOLE. */ 220 - uint64_t flags = MAP_GPA_SHARED | MAP_GPA_DO_FALLOCATE; 220 + u64 flags = MAP_GPA_SHARED | MAP_GPA_DO_FALLOCATE; 221 221 222 222 kvm_hypercall_map_gpa_range(gpa, size, flags); 223 223 } ··· 227 227 * proper conversion. Freeing (PUNCH_HOLE) should zap SPTEs, and reallocating 228 228 * (subsequent fault) should zero memory. 229 229 */ 230 - static void guest_test_punch_hole(uint64_t base_gpa, bool precise) 230 + static void guest_test_punch_hole(u64 base_gpa, bool precise) 231 231 { 232 - const uint8_t init_p = 0xcc; 232 + const u8 init_p = 0xcc; 233 233 int i; 234 234 235 235 /* ··· 239 239 guest_map_private(base_gpa, PER_CPU_DATA_SIZE, false); 240 240 241 241 for (i = 0; i < ARRAY_SIZE(test_ranges); i++) { 242 - uint64_t gpa = base_gpa + test_ranges[i].offset; 243 - uint64_t size = test_ranges[i].size; 242 + gpa_t gpa = base_gpa + test_ranges[i].offset; 243 + u64 size = test_ranges[i].size; 244 244 245 245 /* 246 246 * Free all memory before each iteration, even for the !precise ··· 268 268 } 269 269 } 270 270 271 - static void guest_code(uint64_t base_gpa) 271 + static void guest_code(u64 base_gpa) 272 272 { 273 273 /* 274 274 * Run the conversion test twice, with and without doing fallocate() on ··· 289 289 static void handle_exit_hypercall(struct kvm_vcpu *vcpu) 290 290 { 291 291 struct kvm_run *run = vcpu->run; 292 - uint64_t gpa = run->hypercall.args[0]; 293 - uint64_t size = run->hypercall.args[1] * PAGE_SIZE; 292 + gpa_t gpa = run->hypercall.args[0]; 293 + u64 size = run->hypercall.args[1] * PAGE_SIZE; 294 294 bool set_attributes = run->hypercall.args[2] & MAP_GPA_SET_ATTRIBUTES; 295 295 bool map_shared = run->hypercall.args[2] & MAP_GPA_SHARED; 296 296 bool do_fallocate = run->hypercall.args[2] & MAP_GPA_DO_FALLOCATE; ··· 337 337 case UCALL_ABORT: 338 338 REPORT_GUEST_ASSERT(uc); 339 339 case UCALL_SYNC: { 340 - uint64_t gpa = uc.args[1]; 340 + gpa_t gpa = uc.args[1]; 341 341 size_t size = uc.args[2]; 342 342 size_t i; 343 343 ··· 347 347 348 348 for (i = 0; i < size; i += vm->page_size) { 349 349 size_t nr_bytes = min_t(size_t, vm->page_size, size - i); 350 - uint8_t *hva = addr_gpa2hva(vm, gpa + i); 350 + u8 *hva = addr_gpa2hva(vm, gpa + i); 351 351 352 352 /* In all cases, the host should observe the shared data. */ 353 353 memcmp_h(hva, gpa + i, uc.args[3], nr_bytes); ··· 366 366 } 367 367 } 368 368 369 - static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t nr_vcpus, 370 - uint32_t nr_memslots) 369 + static void test_mem_conversions(enum vm_mem_backing_src_type src_type, u32 nr_vcpus, 370 + u32 nr_memslots) 371 371 { 372 372 /* 373 373 * Allocate enough memory so that each vCPU's chunk of memory can be ··· 402 402 KVM_MEM_GUEST_MEMFD, memfd, slot_size * i); 403 403 404 404 for (i = 0; i < nr_vcpus; i++) { 405 - uint64_t gpa = BASE_DATA_GPA + i * per_cpu_size; 405 + gpa_t gpa = BASE_DATA_GPA + i * per_cpu_size; 406 406 407 407 vcpu_args_set(vcpus[i], 1, gpa); 408 408 ··· 450 450 int main(int argc, char *argv[]) 451 451 { 452 452 enum vm_mem_backing_src_type src_type = DEFAULT_VM_MEM_SRC; 453 - uint32_t nr_memslots = 1; 454 - uint32_t nr_vcpus = 1; 453 + u32 nr_memslots = 1; 454 + u32 nr_vcpus = 1; 455 455 int opt; 456 456 457 457 TEST_REQUIRE(kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM));
+7 -7
tools/testing/selftests/kvm/x86/private_mem_kvm_exits_test.c
··· 17 17 #define EXITS_TEST_SIZE (EXITS_TEST_NPAGES * PAGE_SIZE) 18 18 #define EXITS_TEST_SLOT 10 19 19 20 - static uint64_t guest_repeatedly_read(void) 20 + static u64 guest_repeatedly_read(void) 21 21 { 22 - volatile uint64_t value; 22 + volatile u64 value; 23 23 24 24 while (true) 25 - value = *((uint64_t *) EXITS_TEST_GVA); 25 + value = *((u64 *)EXITS_TEST_GVA); 26 26 27 27 return value; 28 28 } 29 29 30 - static uint32_t run_vcpu_get_exit_reason(struct kvm_vcpu *vcpu) 30 + static u32 run_vcpu_get_exit_reason(struct kvm_vcpu *vcpu) 31 31 { 32 32 int r; 33 33 ··· 50 50 struct kvm_vcpu *vcpu; 51 51 pthread_t vm_thread; 52 52 void *thread_return; 53 - uint32_t exit_reason; 53 + u32 exit_reason; 54 54 55 55 vm = vm_create_shape_with_one_vcpu(protected_vm_shape, &vcpu, 56 56 guest_repeatedly_read); ··· 72 72 vm_mem_region_delete(vm, EXITS_TEST_SLOT); 73 73 74 74 pthread_join(vm_thread, &thread_return); 75 - exit_reason = (uint32_t)(uint64_t)thread_return; 75 + exit_reason = (u32)(u64)thread_return; 76 76 77 77 TEST_ASSERT_EQ(exit_reason, KVM_EXIT_MEMORY_FAULT); 78 78 TEST_ASSERT_EQ(vcpu->run->memory_fault.flags, KVM_MEMORY_EXIT_FLAG_PRIVATE); ··· 86 86 { 87 87 struct kvm_vm *vm; 88 88 struct kvm_vcpu *vcpu; 89 - uint32_t exit_reason; 89 + u32 exit_reason; 90 90 91 91 vm = vm_create_shape_with_one_vcpu(protected_vm_shape, &vcpu, 92 92 guest_repeatedly_read);
+3 -3
tools/testing/selftests/kvm/x86/set_boot_cpu_id.c
··· 86 86 } 87 87 } 88 88 89 - static struct kvm_vm *create_vm(uint32_t nr_vcpus, uint32_t bsp_vcpu_id, 89 + static struct kvm_vm *create_vm(u32 nr_vcpus, u32 bsp_vcpu_id, 90 90 struct kvm_vcpu *vcpus[]) 91 91 { 92 92 struct kvm_vm *vm; 93 - uint32_t i; 93 + u32 i; 94 94 95 95 vm = vm_create(nr_vcpus); 96 96 ··· 104 104 return vm; 105 105 } 106 106 107 - static void run_vm_bsp(uint32_t bsp_vcpu_id) 107 + static void run_vm_bsp(u32 bsp_vcpu_id) 108 108 { 109 109 struct kvm_vcpu *vcpus[2]; 110 110 struct kvm_vm *vm;
+3 -3
tools/testing/selftests/kvm/x86/set_sregs_test.c
··· 46 46 X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \ 47 47 X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT) 48 48 49 - static uint64_t calc_supported_cr4_feature_bits(void) 49 + static u64 calc_supported_cr4_feature_bits(void) 50 50 { 51 - uint64_t cr4 = KVM_ALWAYS_ALLOWED_CR4; 51 + u64 cr4 = KVM_ALWAYS_ALLOWED_CR4; 52 52 53 53 if (kvm_cpu_has(X86_FEATURE_UMIP)) 54 54 cr4 |= X86_CR4_UMIP; ··· 74 74 return cr4; 75 75 } 76 76 77 - static void test_cr_bits(struct kvm_vcpu *vcpu, uint64_t cr4) 77 + static void test_cr_bits(struct kvm_vcpu *vcpu, u64 cr4) 78 78 { 79 79 struct kvm_sregs sregs; 80 80 int rc, i;
+3 -3
tools/testing/selftests/kvm/x86/sev_init2_tests.c
··· 34 34 { 35 35 struct kvm_sev_cmd cmd = { 36 36 .id = cmd_id, 37 - .data = (uint64_t)data, 37 + .data = (u64)data, 38 38 .sev_fd = open_sev_dev_path_or_exit(), 39 39 }; 40 40 int ret; ··· 94 94 "VM type is KVM_X86_SW_PROTECTED_VM"); 95 95 } 96 96 97 - void test_flags(uint32_t vm_type) 97 + void test_flags(u32 vm_type) 98 98 { 99 99 int i; 100 100 ··· 104 104 "invalid flag"); 105 105 } 106 106 107 - void test_features(uint32_t vm_type, uint64_t supported_features) 107 + void test_features(u32 vm_type, u64 supported_features) 108 108 { 109 109 int i; 110 110
+11 -11
tools/testing/selftests/kvm/x86/sev_smoke_test.c
··· 13 13 #include "linux/psp-sev.h" 14 14 #include "sev.h" 15 15 16 - static void guest_sev_test_msr(uint32_t msr) 16 + static void guest_sev_test_msr(u32 msr) 17 17 { 18 - uint64_t val = rdmsr(msr); 18 + u64 val = rdmsr(msr); 19 19 20 20 wrmsr(msr, val); 21 21 GUEST_ASSERT(val == rdmsr(msr)); ··· 23 23 24 24 #define guest_sev_test_reg(reg) \ 25 25 do { \ 26 - uint64_t val = get_##reg(); \ 26 + u64 val = get_##reg(); \ 27 27 \ 28 28 set_##reg(val); \ 29 29 GUEST_ASSERT(val == get_##reg()); \ ··· 42 42 43 43 static void guest_snp_code(void) 44 44 { 45 - uint64_t sev_msr = rdmsr(MSR_AMD64_SEV); 45 + u64 sev_msr = rdmsr(MSR_AMD64_SEV); 46 46 47 47 GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ENABLED); 48 48 GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ES_ENABLED); ··· 104 104 abort(); 105 105 } 106 106 107 - static void test_sync_vmsa(uint32_t type, uint64_t policy) 107 + static void test_sync_vmsa(u32 type, u64 policy) 108 108 { 109 109 struct kvm_vcpu *vcpu; 110 110 struct kvm_vm *vm; 111 - vm_vaddr_t gva; 111 + gva_t gva; 112 112 void *hva; 113 113 114 114 double x87val = M_PI; 115 115 struct kvm_xsave __attribute__((aligned(64))) xsave = { 0 }; 116 116 117 117 vm = vm_sev_create_with_one_vcpu(type, guest_code_xsave, &vcpu); 118 - gva = vm_vaddr_alloc_shared(vm, PAGE_SIZE, KVM_UTIL_MIN_VADDR, 119 - MEM_REGION_TEST_DATA); 118 + gva = vm_alloc_shared(vm, PAGE_SIZE, KVM_UTIL_MIN_VADDR, 119 + MEM_REGION_TEST_DATA); 120 120 hva = addr_gva2hva(vm, gva); 121 121 122 122 vcpu_args_set(vcpu, 1, gva); ··· 150 150 kvm_vm_free(vm); 151 151 } 152 152 153 - static void test_sev(void *guest_code, uint32_t type, uint64_t policy) 153 + static void test_sev(void *guest_code, u32 type, u64 policy) 154 154 { 155 155 struct kvm_vcpu *vcpu; 156 156 struct kvm_vm *vm; ··· 201 201 __asm__ __volatile__("ud2"); 202 202 } 203 203 204 - static void test_sev_shutdown(uint32_t type, uint64_t policy) 204 + static void test_sev_shutdown(u32 type, u64 policy) 205 205 { 206 206 struct kvm_vcpu *vcpu; 207 207 struct kvm_vm *vm; ··· 218 218 kvm_vm_free(vm); 219 219 } 220 220 221 - static void test_sev_smoke(void *guest, uint32_t type, uint64_t policy) 221 + static void test_sev_smoke(void *guest, u32 type, u64 policy) 222 222 { 223 223 const u64 xf_mask = XFEATURE_MASK_X87_AVX; 224 224
+4 -4
tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c
··· 20 20 21 21 static void guest_code(bool tdp_enabled) 22 22 { 23 - uint64_t error_code; 24 - uint64_t vector; 23 + u64 error_code; 24 + u64 vector; 25 25 26 26 vector = kvm_asm_safe_ec(FLDS_MEM_EAX, error_code, "a"(MEM_REGION_GVA)); 27 27 ··· 47 47 struct kvm_vcpu *vcpu; 48 48 struct kvm_vm *vm; 49 49 struct ucall uc; 50 - uint64_t *hva; 51 - uint64_t gpa; 50 + u64 *hva; 51 + gpa_t gpa; 52 52 int rc; 53 53 54 54 TEST_REQUIRE(kvm_has_cap(KVM_CAP_SMALLER_MAXPHYADDR));
+4 -4
tools/testing/selftests/kvm/x86/smm_test.c
··· 34 34 * independent subset of asm here. 35 35 * SMI handler always report back fixed stage SMRAM_STAGE. 36 36 */ 37 - uint8_t smi_handler[] = { 37 + u8 smi_handler[] = { 38 38 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */ 39 39 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */ 40 40 0x0f, 0xaa, /* rsm */ 41 41 }; 42 42 43 - static inline void sync_with_host(uint64_t phase) 43 + static inline void sync_with_host(u64 phase) 44 44 { 45 45 asm volatile("in $" XSTR(SYNC_PORT)", %%al \n" 46 46 : "+a" (phase)); ··· 65 65 { 66 66 #define L2_GUEST_STACK_SIZE 64 67 67 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 68 - uint64_t apicbase = rdmsr(MSR_IA32_APICBASE); 68 + u64 apicbase = rdmsr(MSR_IA32_APICBASE); 69 69 struct svm_test_data *svm = arg; 70 70 struct vmx_pages *vmx_pages = arg; 71 71 ··· 113 113 114 114 int main(int argc, char *argv[]) 115 115 { 116 - vm_vaddr_t nested_gva = 0; 116 + gva_t nested_gva = 0; 117 117 118 118 struct kvm_vcpu *vcpu; 119 119 struct kvm_regs regs;
+7 -7
tools/testing/selftests/kvm/x86/state_test.c
··· 144 144 GUEST_SYNC(1); 145 145 146 146 if (this_cpu_has(X86_FEATURE_XSAVE)) { 147 - uint64_t supported_xcr0 = this_cpu_supported_xcr0(); 148 - uint8_t buffer[PAGE_SIZE]; 147 + u64 supported_xcr0 = this_cpu_supported_xcr0(); 148 + u8 buffer[PAGE_SIZE]; 149 149 150 150 memset(buffer, 0xcc, sizeof(buffer)); 151 151 ··· 172 172 } 173 173 174 174 if (this_cpu_has(X86_FEATURE_MPX)) { 175 - uint64_t bounds[2] = { 10, 0xffffffffull }; 176 - uint64_t output[2] = { }; 175 + u64 bounds[2] = { 10, 0xffffffffull }; 176 + u64 output[2] = { }; 177 177 178 178 GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_BNDREGS); 179 179 GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_BNDCSR); ··· 257 257 258 258 int main(int argc, char *argv[]) 259 259 { 260 - uint64_t *xstate_bv, saved_xstate_bv; 261 - vm_vaddr_t nested_gva = 0; 260 + u64 *xstate_bv, saved_xstate_bv; 261 + gva_t nested_gva = 0; 262 262 struct kvm_cpuid2 empty_cpuid = {}; 263 263 struct kvm_regs regs1, regs2; 264 264 struct kvm_vcpu *vcpu, *vcpuN; ··· 331 331 * supported features, even if something goes awry in saving 332 332 * the original snapshot. 333 333 */ 334 - xstate_bv = (void *)&((uint8_t *)state->xsave->region)[512]; 334 + xstate_bv = (void *)&((u8 *)state->xsave->region)[512]; 335 335 saved_xstate_bv = *xstate_bv; 336 336 337 337 vcpuN = __vm_vcpu_add(vm, vcpu->id + 1);
+1 -1
tools/testing/selftests/kvm/x86/svm_int_ctl_test.c
··· 82 82 int main(int argc, char *argv[]) 83 83 { 84 84 struct kvm_vcpu *vcpu; 85 - vm_vaddr_t svm_gva; 85 + gva_t svm_gva; 86 86 struct kvm_vm *vm; 87 87 struct ucall uc; 88 88
+1 -1
tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c
··· 97 97 { 98 98 struct kvm_x86_state *state = NULL; 99 99 struct kvm_vcpu *vcpu; 100 - vm_vaddr_t svm_gva; 101 100 struct kvm_vm *vm; 102 101 struct ucall uc; 102 + gva_t svm_gva; 103 103 104 104 pr_info("Testing with nested LBRV %s\n", nested_lbrv ? "enabled" : "disabled"); 105 105
+1 -1
tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c
··· 38 38 { 39 39 struct kvm_vcpu *vcpu; 40 40 struct kvm_vm *vm; 41 - vm_vaddr_t nested_gva = 0; 41 + gva_t nested_gva = 0; 42 42 43 43 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM)); 44 44
+1 -1
tools/testing/selftests/kvm/x86/svm_nested_shutdown_test.c
··· 42 42 int main(int argc, char *argv[]) 43 43 { 44 44 struct kvm_vcpu *vcpu; 45 - vm_vaddr_t svm_gva; 45 + gva_t svm_gva; 46 46 struct kvm_vm *vm; 47 47 48 48 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
+5 -5
tools/testing/selftests/kvm/x86/svm_nested_soft_inject_test.c
··· 76 76 ud2(); 77 77 } 78 78 79 - static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t idt_alt) 79 + static void l1_guest_code(struct svm_test_data *svm, u64 is_nmi, u64 idt_alt) 80 80 { 81 81 #define L2_GUEST_STACK_SIZE 64 82 82 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; ··· 144 144 { 145 145 struct kvm_vcpu *vcpu; 146 146 struct kvm_vm *vm; 147 - vm_vaddr_t svm_gva; 148 - vm_vaddr_t idt_alt_vm; 147 + gva_t svm_gva; 148 + gva_t idt_alt_vm; 149 149 struct kvm_guest_debug debug; 150 150 151 151 pr_info("Running %s test\n", is_nmi ? "NMI" : "soft int"); ··· 161 161 if (!is_nmi) { 162 162 void *idt, *idt_alt; 163 163 164 - idt_alt_vm = vm_vaddr_alloc_page(vm); 164 + idt_alt_vm = vm_alloc_page(vm); 165 165 idt_alt = addr_gva2hva(vm, idt_alt_vm); 166 166 idt = addr_gva2hva(vm, vm->arch.idt); 167 167 memcpy(idt_alt, idt, getpagesize()); 168 168 } else { 169 169 idt_alt_vm = 0; 170 170 } 171 - vcpu_args_set(vcpu, 3, svm_gva, (uint64_t)is_nmi, (uint64_t)idt_alt_vm); 171 + vcpu_args_set(vcpu, 3, svm_gva, (u64)is_nmi, (u64)idt_alt_vm); 172 172 173 173 memset(&debug, 0, sizeof(debug)); 174 174 vcpu_guest_debug_set(vcpu, &debug);
+7 -7
tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c
··· 28 28 vmcall(); 29 29 } 30 30 31 - static void l1_vmrun(struct svm_test_data *svm, u64 gpa) 31 + static void l1_vmrun(struct svm_test_data *svm, gpa_t gpa) 32 32 { 33 33 generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); 34 34 35 35 asm volatile ("vmrun %[gpa]" : : [gpa] "a" (gpa) : "memory"); 36 36 } 37 37 38 - static void l1_vmload(struct svm_test_data *svm, u64 gpa) 38 + static void l1_vmload(struct svm_test_data *svm, gpa_t gpa) 39 39 { 40 40 generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); 41 41 42 42 asm volatile ("vmload %[gpa]" : : [gpa] "a" (gpa) : "memory"); 43 43 } 44 44 45 - static void l1_vmsave(struct svm_test_data *svm, u64 gpa) 45 + static void l1_vmsave(struct svm_test_data *svm, gpa_t gpa) 46 46 { 47 47 generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); 48 48 49 49 asm volatile ("vmsave %[gpa]" : : [gpa] "a" (gpa) : "memory"); 50 50 } 51 51 52 - static void l1_vmexit(struct svm_test_data *svm, u64 gpa) 52 + static void l1_vmexit(struct svm_test_data *svm, gpa_t gpa) 53 53 { 54 54 generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); 55 55 ··· 74 74 75 75 static void test_invalid_vmcb12(struct kvm_vcpu *vcpu) 76 76 { 77 - vm_vaddr_t nested_gva = 0; 77 + gva_t nested_gva = 0; 78 78 struct ucall uc; 79 79 80 80 ··· 90 90 91 91 static void test_unmappable_vmcb12(struct kvm_vcpu *vcpu) 92 92 { 93 - vm_vaddr_t nested_gva = 0; 93 + gva_t nested_gva = 0; 94 94 95 95 vcpu_alloc_svm(vcpu->vm, &nested_gva); 96 96 vcpu_args_set(vcpu, 2, nested_gva, unmappable_gpa(vcpu)); ··· 103 103 static void test_unmappable_vmcb12_vmexit(struct kvm_vcpu *vcpu) 104 104 { 105 105 struct kvm_x86_state *state; 106 - vm_vaddr_t nested_gva = 0; 106 + gva_t nested_gva = 0; 107 107 struct ucall uc; 108 108 109 109 /*
+1 -1
tools/testing/selftests/kvm/x86/svm_vmcall_test.c
··· 36 36 int main(int argc, char *argv[]) 37 37 { 38 38 struct kvm_vcpu *vcpu; 39 - vm_vaddr_t svm_gva; 39 + gva_t svm_gva; 40 40 struct kvm_vm *vm; 41 41 42 42 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
+1 -1
tools/testing/selftests/kvm/x86/sync_regs_test.c
··· 20 20 #include "kvm_util.h" 21 21 #include "processor.h" 22 22 23 - #define UCALL_PIO_PORT ((uint16_t)0x1000) 23 + #define UCALL_PIO_PORT ((u16)0x1000) 24 24 25 25 struct ucall uc_none = { 26 26 .cmd = UCALL_NONE,
+2 -2
tools/testing/selftests/kvm/x86/triple_fault_event_test.c
··· 72 72 73 73 74 74 if (has_vmx) { 75 - vm_vaddr_t vmx_pages_gva; 75 + gva_t vmx_pages_gva; 76 76 77 77 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_vmx); 78 78 vcpu_alloc_vmx(vm, &vmx_pages_gva); 79 79 vcpu_args_set(vcpu, 1, vmx_pages_gva); 80 80 } else { 81 - vm_vaddr_t svm_gva; 81 + gva_t svm_gva; 82 82 83 83 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_svm); 84 84 vcpu_alloc_svm(vm, &svm_gva);
+1 -1
tools/testing/selftests/kvm/x86/tsc_msrs_test.c
··· 95 95 { 96 96 struct kvm_vcpu *vcpu; 97 97 struct kvm_vm *vm; 98 - uint64_t val; 98 + u64 val; 99 99 100 100 ksft_print_header(); 101 101 ksft_set_plan(5);
+2 -2
tools/testing/selftests/kvm/x86/tsc_scaling_sync.c
··· 21 21 #define TEST_TSC_KHZ 2345678UL 22 22 #define TEST_TSC_OFFSET 200000000 23 23 24 - uint64_t tsc_sync; 24 + u64 tsc_sync; 25 25 static void guest_code(void) 26 26 { 27 - uint64_t start_tsc, local_tsc, tmp; 27 + u64 start_tsc, local_tsc, tmp; 28 28 29 29 start_tsc = rdtsc(); 30 30 do {
+23 -22
tools/testing/selftests/kvm/x86/ucna_injection_test.c
··· 45 45 46 46 #define MCI_CTL2_RESERVED_BIT BIT_ULL(29) 47 47 48 - static uint64_t supported_mcg_caps; 48 + static u64 supported_mcg_caps; 49 49 50 50 /* 51 51 * Record states about the injected UCNA. ··· 53 53 * handler. Variables without the 'i_' prefixes are recorded in guest main 54 54 * execution thread. 55 55 */ 56 - static volatile uint64_t i_ucna_rcvd; 57 - static volatile uint64_t i_ucna_addr; 58 - static volatile uint64_t ucna_addr; 59 - static volatile uint64_t ucna_addr2; 56 + static volatile u64 i_ucna_rcvd; 57 + static volatile u64 i_ucna_addr; 58 + static volatile u64 ucna_addr; 59 + static volatile u64 ucna_addr2; 60 60 61 61 struct thread_params { 62 62 struct kvm_vcpu *vcpu; 63 - uint64_t *p_i_ucna_rcvd; 64 - uint64_t *p_i_ucna_addr; 65 - uint64_t *p_ucna_addr; 66 - uint64_t *p_ucna_addr2; 63 + u64 *p_i_ucna_rcvd; 64 + u64 *p_i_ucna_addr; 65 + u64 *p_ucna_addr; 66 + u64 *p_ucna_addr2; 67 67 }; 68 68 69 69 static void verify_apic_base_addr(void) 70 70 { 71 - uint64_t msr = rdmsr(MSR_IA32_APICBASE); 72 - uint64_t base = GET_APIC_BASE(msr); 71 + u64 msr = rdmsr(MSR_IA32_APICBASE); 72 + u64 base = GET_APIC_BASE(msr); 73 73 74 74 GUEST_ASSERT(base == APIC_DEFAULT_GPA); 75 75 } 76 76 77 77 static void ucna_injection_guest_code(void) 78 78 { 79 - uint64_t ctl2; 79 + u64 ctl2; 80 80 verify_apic_base_addr(); 81 81 xapic_enable(); 82 82 ··· 106 106 107 107 static void cmci_disabled_guest_code(void) 108 108 { 109 - uint64_t ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK)); 109 + u64 ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK)); 110 110 wrmsr(MSR_IA32_MCx_CTL2(UCNA_BANK), ctl2 | MCI_CTL2_CMCI_EN); 111 111 112 112 GUEST_DONE(); ··· 114 114 115 115 static void cmci_enabled_guest_code(void) 116 116 { 117 - uint64_t ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK)); 117 + u64 ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK)); 118 118 wrmsr(MSR_IA32_MCx_CTL2(UCNA_BANK), ctl2 | MCI_CTL2_RESERVED_BIT); 119 119 120 120 GUEST_DONE(); ··· 145 145 printf("vCPU received GP in guest.\n"); 146 146 } 147 147 148 - static void inject_ucna(struct kvm_vcpu *vcpu, uint64_t addr) { 148 + static void inject_ucna(struct kvm_vcpu *vcpu, u64 addr) 149 + { 149 150 /* 150 151 * A UCNA error is indicated with VAL=1, UC=1, PCC=0, S=0 and AR=0 in 151 152 * the IA32_MCi_STATUS register. 152 153 * MSCOD=1 (BIT[16] - MscodDataRdErr). 153 154 * MCACOD=0x0090 (Memory controller error format, channel 0) 154 155 */ 155 - uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | 156 + u64 status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | 156 157 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | 0x10090; 157 158 struct kvm_x86_mce mce = {}; 158 159 mce.status = status; ··· 217 216 { 218 217 struct kvm_vm *vm = vcpu->vm; 219 218 params->vcpu = vcpu; 220 - params->p_i_ucna_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&i_ucna_rcvd); 221 - params->p_i_ucna_addr = (uint64_t *)addr_gva2hva(vm, (uint64_t)&i_ucna_addr); 222 - params->p_ucna_addr = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ucna_addr); 223 - params->p_ucna_addr2 = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ucna_addr2); 219 + params->p_i_ucna_rcvd = (u64 *)addr_gva2hva(vm, (u64)&i_ucna_rcvd); 220 + params->p_i_ucna_addr = (u64 *)addr_gva2hva(vm, (u64)&i_ucna_addr); 221 + params->p_ucna_addr = (u64 *)addr_gva2hva(vm, (u64)&ucna_addr); 222 + params->p_ucna_addr2 = (u64 *)addr_gva2hva(vm, (u64)&ucna_addr2); 224 223 225 224 run_ucna_injection(params); 226 225 ··· 243 242 244 243 static void setup_mce_cap(struct kvm_vcpu *vcpu, bool enable_cmci_p) 245 244 { 246 - uint64_t mcg_caps = MCG_CTL_P | MCG_SER_P | MCG_LMCE_P | KVM_MAX_MCE_BANKS; 245 + u64 mcg_caps = MCG_CTL_P | MCG_SER_P | MCG_LMCE_P | KVM_MAX_MCE_BANKS; 247 246 if (enable_cmci_p) 248 247 mcg_caps |= MCG_CMCI_P; 249 248 ··· 251 250 vcpu_ioctl(vcpu, KVM_X86_SETUP_MCE, &mcg_caps); 252 251 } 253 252 254 - static struct kvm_vcpu *create_vcpu_with_mce_cap(struct kvm_vm *vm, uint32_t vcpuid, 253 + static struct kvm_vcpu *create_vcpu_with_mce_cap(struct kvm_vm *vm, u32 vcpuid, 255 254 bool enable_cmci_p, void *guest_code) 256 255 { 257 256 struct kvm_vcpu *vcpu = vm_vcpu_add(vm, vcpuid, guest_code);
+2 -2
tools/testing/selftests/kvm/x86/userspace_io_test.c
··· 10 10 #include "kvm_util.h" 11 11 #include "processor.h" 12 12 13 - static void guest_ins_port80(uint8_t *buffer, unsigned int count) 13 + static void guest_ins_port80(u8 *buffer, unsigned int count) 14 14 { 15 15 unsigned long end; 16 16 ··· 26 26 27 27 static void guest_code(void) 28 28 { 29 - uint8_t buffer[8192]; 29 + u8 buffer[8192]; 30 30 int i; 31 31 32 32 /*
+29 -29
tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c
··· 23 23 .nmsrs = 1, 24 24 /* Test an MSR the kernel knows about. */ 25 25 .base = MSR_IA32_XSS, 26 - .bitmap = (uint8_t*)&deny_bits, 26 + .bitmap = (u8 *)&deny_bits, 27 27 }, { 28 28 .flags = KVM_MSR_FILTER_READ | 29 29 KVM_MSR_FILTER_WRITE, 30 30 .nmsrs = 1, 31 31 /* Test an MSR the kernel doesn't know about. */ 32 32 .base = MSR_IA32_FLUSH_CMD, 33 - .bitmap = (uint8_t*)&deny_bits, 33 + .bitmap = (u8 *)&deny_bits, 34 34 }, { 35 35 .flags = KVM_MSR_FILTER_READ | 36 36 KVM_MSR_FILTER_WRITE, 37 37 .nmsrs = 1, 38 38 /* Test a fabricated MSR that no one knows about. */ 39 39 .base = MSR_NON_EXISTENT, 40 - .bitmap = (uint8_t*)&deny_bits, 40 + .bitmap = (u8 *)&deny_bits, 41 41 }, 42 42 }, 43 43 }; ··· 49 49 .flags = KVM_MSR_FILTER_READ, 50 50 .nmsrs = 1, 51 51 .base = MSR_FS_BASE, 52 - .bitmap = (uint8_t*)&deny_bits, 52 + .bitmap = (u8 *)&deny_bits, 53 53 }, 54 54 }, 55 55 }; ··· 61 61 .flags = KVM_MSR_FILTER_READ, 62 62 .nmsrs = 1, 63 63 .base = MSR_GS_BASE, 64 - .bitmap = (uint8_t*)&deny_bits, 64 + .bitmap = (u8 *)&deny_bits, 65 65 }, 66 66 }, 67 67 }; 68 68 69 - static uint64_t msr_non_existent_data; 69 + static u64 msr_non_existent_data; 70 70 static int guest_exception_count; 71 71 static u32 msr_reads, msr_writes; 72 72 ··· 77 77 static u8 bitmap_c0000000_read[KVM_MSR_FILTER_MAX_BITMAP_SIZE]; 78 78 static u8 bitmap_deadbeef[1] = { 0x1 }; 79 79 80 - static void deny_msr(uint8_t *bitmap, u32 msr) 80 + static void deny_msr(u8 *bitmap, u32 msr) 81 81 { 82 82 u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1); 83 83 ··· 142 142 * Note: Force test_rdmsr() to not be inlined to prevent the labels, 143 143 * rdmsr_start and rdmsr_end, from being defined multiple times. 144 144 */ 145 - static noinline uint64_t test_rdmsr(uint32_t msr) 145 + static noinline u64 test_rdmsr(u32 msr) 146 146 { 147 - uint32_t a, d; 147 + u32 a, d; 148 148 149 149 guest_exception_count = 0; 150 150 151 151 __asm__ __volatile__("rdmsr_start: rdmsr; rdmsr_end:" : 152 152 "=a"(a), "=d"(d) : "c"(msr) : "memory"); 153 153 154 - return a | ((uint64_t) d << 32); 154 + return a | ((u64)d << 32); 155 155 } 156 156 157 157 /* 158 158 * Note: Force test_wrmsr() to not be inlined to prevent the labels, 159 159 * wrmsr_start and wrmsr_end, from being defined multiple times. 160 160 */ 161 - static noinline void test_wrmsr(uint32_t msr, uint64_t value) 161 + static noinline void test_wrmsr(u32 msr, u64 value) 162 162 { 163 - uint32_t a = value; 164 - uint32_t d = value >> 32; 163 + u32 a = value; 164 + u32 d = value >> 32; 165 165 166 166 guest_exception_count = 0; 167 167 ··· 176 176 * Note: Force test_em_rdmsr() to not be inlined to prevent the labels, 177 177 * rdmsr_start and rdmsr_end, from being defined multiple times. 178 178 */ 179 - static noinline uint64_t test_em_rdmsr(uint32_t msr) 179 + static noinline u64 test_em_rdmsr(u32 msr) 180 180 { 181 - uint32_t a, d; 181 + u32 a, d; 182 182 183 183 guest_exception_count = 0; 184 184 185 185 __asm__ __volatile__(KVM_FEP "em_rdmsr_start: rdmsr; em_rdmsr_end:" : 186 186 "=a"(a), "=d"(d) : "c"(msr) : "memory"); 187 187 188 - return a | ((uint64_t) d << 32); 188 + return a | ((u64)d << 32); 189 189 } 190 190 191 191 /* 192 192 * Note: Force test_em_wrmsr() to not be inlined to prevent the labels, 193 193 * wrmsr_start and wrmsr_end, from being defined multiple times. 194 194 */ 195 - static noinline void test_em_wrmsr(uint32_t msr, uint64_t value) 195 + static noinline void test_em_wrmsr(u32 msr, u64 value) 196 196 { 197 - uint32_t a = value; 198 - uint32_t d = value >> 32; 197 + u32 a = value; 198 + u32 d = value >> 32; 199 199 200 200 guest_exception_count = 0; 201 201 ··· 208 208 209 209 static void guest_code_filter_allow(void) 210 210 { 211 - uint64_t data; 211 + u64 data; 212 212 213 213 /* 214 214 * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_XSS. ··· 328 328 329 329 static void guest_code_permission_bitmap(void) 330 330 { 331 - uint64_t data; 331 + u64 data; 332 332 333 333 data = test_rdmsr(MSR_FS_BASE); 334 334 GUEST_ASSERT(data == MSR_FS_BASE); ··· 391 391 } 392 392 } 393 393 394 - static void process_rdmsr(struct kvm_vcpu *vcpu, uint32_t msr_index) 394 + static void process_rdmsr(struct kvm_vcpu *vcpu, u32 msr_index) 395 395 { 396 396 struct kvm_run *run = vcpu->run; 397 397 ··· 423 423 } 424 424 } 425 425 426 - static void process_wrmsr(struct kvm_vcpu *vcpu, uint32_t msr_index) 426 + static void process_wrmsr(struct kvm_vcpu *vcpu, u32 msr_index) 427 427 { 428 428 struct kvm_run *run = vcpu->run; 429 429 ··· 464 464 uc.cmd, UCALL_DONE); 465 465 } 466 466 467 - static uint64_t process_ucall(struct kvm_vcpu *vcpu) 467 + static u64 process_ucall(struct kvm_vcpu *vcpu) 468 468 { 469 469 struct ucall uc = {}; 470 470 ··· 489 489 } 490 490 491 491 static void run_guest_then_process_rdmsr(struct kvm_vcpu *vcpu, 492 - uint32_t msr_index) 492 + u32 msr_index) 493 493 { 494 494 vcpu_run(vcpu); 495 495 process_rdmsr(vcpu, msr_index); 496 496 } 497 497 498 498 static void run_guest_then_process_wrmsr(struct kvm_vcpu *vcpu, 499 - uint32_t msr_index) 499 + u32 msr_index) 500 500 { 501 501 vcpu_run(vcpu); 502 502 process_wrmsr(vcpu, msr_index); 503 503 } 504 504 505 - static uint64_t run_guest_then_process_ucall(struct kvm_vcpu *vcpu) 505 + static u64 run_guest_then_process_ucall(struct kvm_vcpu *vcpu) 506 506 { 507 507 vcpu_run(vcpu); 508 508 return process_ucall(vcpu); ··· 519 519 KVM_ONE_VCPU_TEST(user_msr, msr_filter_allow, guest_code_filter_allow) 520 520 { 521 521 struct kvm_vm *vm = vcpu->vm; 522 - uint64_t cmd; 522 + u64 cmd; 523 523 int rc; 524 524 525 525 rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR); ··· 732 732 .flags = KVM_MSR_FILTER_READ, 733 733 .nmsrs = 1, 734 734 .base = 0, 735 - .bitmap = (uint8_t *)&deny_bits, 735 + .bitmap = (u8 *)&deny_bits, 736 736 }, 737 737 }, 738 738 };
+2 -2
tools/testing/selftests/kvm/x86/vmx_apic_access_test.c
··· 38 38 { 39 39 #define L2_GUEST_STACK_SIZE 64 40 40 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 41 - uint32_t control; 41 + u32 control; 42 42 43 43 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 44 44 GUEST_ASSERT(load_vmcs(vmx_pages)); ··· 72 72 int main(int argc, char *argv[]) 73 73 { 74 74 unsigned long apic_access_addr = ~0ul; 75 - vm_vaddr_t vmx_pages_gva; 75 + gva_t vmx_pages_gva; 76 76 unsigned long high_gpa; 77 77 struct vmx_pages *vmx; 78 78 bool done = false;
+2 -2
tools/testing/selftests/kvm/x86/vmx_apicv_updates_test.c
··· 33 33 { 34 34 #define L2_GUEST_STACK_SIZE 64 35 35 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 36 - uint32_t control; 36 + u32 control; 37 37 38 38 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 39 39 GUEST_ASSERT(load_vmcs(vmx_pages)); ··· 110 110 111 111 int main(int argc, char *argv[]) 112 112 { 113 - vm_vaddr_t vmx_pages_gva; 113 + gva_t vmx_pages_gva; 114 114 struct vmx_pages *vmx; 115 115 struct kvm_vcpu *vcpu; 116 116 struct kvm_vm *vm;
+1 -1
tools/testing/selftests/kvm/x86/vmx_invalid_nested_guest_state.c
··· 52 52 53 53 int main(int argc, char *argv[]) 54 54 { 55 - vm_vaddr_t vmx_pages_gva; 55 + gva_t vmx_pages_gva; 56 56 struct kvm_sregs sregs; 57 57 struct kvm_vcpu *vcpu; 58 58 struct kvm_run *run;
+10 -12
tools/testing/selftests/kvm/x86/vmx_msrs_test.c
··· 12 12 #include "kvm_util.h" 13 13 #include "vmx.h" 14 14 15 - static void vmx_fixed1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index, 16 - uint64_t mask) 15 + static void vmx_fixed1_msr_test(struct kvm_vcpu *vcpu, u32 msr_index, u64 mask) 17 16 { 18 - uint64_t val = vcpu_get_msr(vcpu, msr_index); 19 - uint64_t bit; 17 + u64 val = vcpu_get_msr(vcpu, msr_index); 18 + u64 bit; 20 19 21 20 mask &= val; 22 21 ··· 25 26 } 26 27 } 27 28 28 - static void vmx_fixed0_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index, 29 - uint64_t mask) 29 + static void vmx_fixed0_msr_test(struct kvm_vcpu *vcpu, u32 msr_index, u64 mask) 30 30 { 31 - uint64_t val = vcpu_get_msr(vcpu, msr_index); 32 - uint64_t bit; 31 + u64 val = vcpu_get_msr(vcpu, msr_index); 32 + u64 bit; 33 33 34 34 mask = ~mask | val; 35 35 ··· 38 40 } 39 41 } 40 42 41 - static void vmx_fixed0and1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index) 43 + static void vmx_fixed0and1_msr_test(struct kvm_vcpu *vcpu, u32 msr_index) 42 44 { 43 45 vmx_fixed0_msr_test(vcpu, msr_index, GENMASK_ULL(31, 0)); 44 46 vmx_fixed1_msr_test(vcpu, msr_index, GENMASK_ULL(63, 32)); ··· 66 68 } 67 69 68 70 static void __ia32_feature_control_msr_test(struct kvm_vcpu *vcpu, 69 - uint64_t msr_bit, 71 + u64 msr_bit, 70 72 struct kvm_x86_cpu_feature feature) 71 73 { 72 - uint64_t val; 74 + u64 val; 73 75 74 76 vcpu_clear_cpuid_feature(vcpu, feature); 75 77 ··· 88 90 89 91 static void ia32_feature_control_msr_test(struct kvm_vcpu *vcpu) 90 92 { 91 - uint64_t supported_bits = FEAT_CTL_LOCKED | 93 + u64 supported_bits = FEAT_CTL_LOCKED | 92 94 FEAT_CTL_VMX_ENABLED_INSIDE_SMX | 93 95 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX | 94 96 FEAT_CTL_SGX_LC_ENABLED |
+2 -2
tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c
··· 30 30 #define L2_GUEST_STACK_SIZE 64 31 31 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 32 32 u64 guest_cr4; 33 - vm_paddr_t pml5_pa, pml4_pa; 33 + gpa_t pml5_pa, pml4_pa; 34 34 u64 *pml5; 35 35 u64 exit_reason; 36 36 ··· 73 73 74 74 int main(int argc, char *argv[]) 75 75 { 76 - vm_vaddr_t vmx_pages_gva = 0; 76 + gva_t vmx_pages_gva = 0; 77 77 struct kvm_vm *vm; 78 78 struct kvm_vcpu *vcpu; 79 79 struct kvm_x86_state *state;
+6 -6
tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c
··· 52 52 .pebs_format = -1, 53 53 }; 54 54 55 - static void guest_test_perf_capabilities_gp(uint64_t val) 55 + static void guest_test_perf_capabilities_gp(u64 val) 56 56 { 57 - uint8_t vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val); 57 + u8 vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val); 58 58 59 59 __GUEST_ASSERT(vector == GP_VECTOR, 60 60 "Expected #GP for value '0x%lx', got %s", 61 61 val, ex_str(vector)); 62 62 } 63 63 64 - static void guest_code(uint64_t current_val) 64 + static void guest_code(u64 current_val) 65 65 { 66 66 int i; 67 67 ··· 129 129 130 130 KVM_ONE_VCPU_TEST(vmx_pmu_caps, fungible_perf_capabilities, guest_code) 131 131 { 132 - const uint64_t fungible_caps = host_cap.capabilities & ~immutable_caps.capabilities; 132 + const u64 fungible_caps = host_cap.capabilities & ~immutable_caps.capabilities; 133 133 int bit; 134 134 135 135 for_each_set_bit(bit, &fungible_caps, 64) { ··· 148 148 */ 149 149 KVM_ONE_VCPU_TEST(vmx_pmu_caps, immutable_perf_capabilities, guest_code) 150 150 { 151 - const uint64_t reserved_caps = (~host_cap.capabilities | 151 + const u64 reserved_caps = (~host_cap.capabilities | 152 152 immutable_caps.capabilities) & 153 153 ~format_caps.capabilities; 154 154 union perf_capabilities val = host_cap; ··· 210 210 211 211 KVM_ONE_VCPU_TEST(vmx_pmu_caps, perf_capabilities_unsupported, guest_code) 212 212 { 213 - uint64_t val; 213 + u64 val; 214 214 int i, r; 215 215 216 216 vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
+1 -1
tools/testing/selftests/kvm/x86/vmx_preemption_timer_test.c
··· 152 152 153 153 int main(int argc, char *argv[]) 154 154 { 155 - vm_vaddr_t vmx_pages_gva = 0; 155 + gva_t vmx_pages_gva = 0; 156 156 157 157 struct kvm_regs regs1, regs2; 158 158 struct kvm_vm *vm;
+32 -32
tools/testing/selftests/kvm/x86/xapic_ipi_test.c
··· 48 48 * Incremented in the IPI handler. Provides evidence to the sender that the IPI 49 49 * arrived at the destination 50 50 */ 51 - static volatile uint64_t ipis_rcvd; 51 + static volatile u64 ipis_rcvd; 52 52 53 53 /* Data struct shared between host main thread and vCPUs */ 54 54 struct test_data_page { 55 - uint32_t halter_apic_id; 56 - volatile uint64_t hlt_count; 57 - volatile uint64_t wake_count; 58 - uint64_t ipis_sent; 59 - uint64_t migrations_attempted; 60 - uint64_t migrations_completed; 61 - uint32_t icr; 62 - uint32_t icr2; 63 - uint32_t halter_tpr; 64 - uint32_t halter_ppr; 55 + u32 halter_apic_id; 56 + volatile u64 hlt_count; 57 + volatile u64 wake_count; 58 + u64 ipis_sent; 59 + u64 migrations_attempted; 60 + u64 migrations_completed; 61 + u32 icr; 62 + u32 icr2; 63 + u32 halter_tpr; 64 + u32 halter_ppr; 65 65 66 66 /* 67 67 * Record local version register as a cross-check that APIC access ··· 69 69 * arch/x86/kvm/lapic.c). If test is failing, check that values match 70 70 * to determine whether APIC access exits are working. 71 71 */ 72 - uint32_t halter_lvr; 72 + u32 halter_lvr; 73 73 }; 74 74 75 75 struct thread_params { 76 76 struct test_data_page *data; 77 77 struct kvm_vcpu *vcpu; 78 - uint64_t *pipis_rcvd; /* host address of ipis_rcvd global */ 78 + u64 *pipis_rcvd; /* host address of ipis_rcvd global */ 79 79 }; 80 80 81 81 void verify_apic_base_addr(void) 82 82 { 83 - uint64_t msr = rdmsr(MSR_IA32_APICBASE); 84 - uint64_t base = GET_APIC_BASE(msr); 83 + u64 msr = rdmsr(MSR_IA32_APICBASE); 84 + u64 base = GET_APIC_BASE(msr); 85 85 86 86 GUEST_ASSERT(base == APIC_DEFAULT_GPA); 87 87 } ··· 125 125 126 126 static void sender_guest_code(struct test_data_page *data) 127 127 { 128 - uint64_t last_wake_count; 129 - uint64_t last_hlt_count; 130 - uint64_t last_ipis_rcvd_count; 131 - uint32_t icr_val; 132 - uint32_t icr2_val; 133 - uint64_t tsc_start; 128 + u64 last_wake_count; 129 + u64 last_hlt_count; 130 + u64 last_ipis_rcvd_count; 131 + u32 icr_val; 132 + u32 icr2_val; 133 + u64 tsc_start; 134 134 135 135 verify_apic_base_addr(); 136 136 xapic_enable(); ··· 248 248 } 249 249 250 250 void do_migrations(struct test_data_page *data, int run_secs, int delay_usecs, 251 - uint64_t *pipis_rcvd) 251 + u64 *pipis_rcvd) 252 252 { 253 253 long pages_not_moved; 254 254 unsigned long nodemask = 0; ··· 259 259 int i; 260 260 int from, to; 261 261 unsigned long bit; 262 - uint64_t hlt_count; 263 - uint64_t wake_count; 264 - uint64_t ipis_sent; 262 + u64 hlt_count; 263 + u64 wake_count; 264 + u64 ipis_sent; 265 265 266 266 fprintf(stderr, "Calling migrate_pages every %d microseconds\n", 267 267 delay_usecs); ··· 393 393 int run_secs = 0; 394 394 int delay_usecs = 0; 395 395 struct test_data_page *data; 396 - vm_vaddr_t test_data_page_vaddr; 396 + gva_t test_data_page_gva; 397 397 bool migrate = false; 398 398 pthread_t threads[2]; 399 399 struct thread_params params[2]; 400 400 struct kvm_vm *vm; 401 - uint64_t *pipis_rcvd; 401 + u64 *pipis_rcvd; 402 402 403 403 get_cmdline_args(argc, argv, &run_secs, &migrate, &delay_usecs); 404 404 if (run_secs <= 0) ··· 414 414 415 415 params[1].vcpu = vm_vcpu_add(vm, 1, sender_guest_code); 416 416 417 - test_data_page_vaddr = vm_vaddr_alloc_page(vm); 418 - data = addr_gva2hva(vm, test_data_page_vaddr); 417 + test_data_page_gva = vm_alloc_page(vm); 418 + data = addr_gva2hva(vm, test_data_page_gva); 419 419 memset(data, 0, sizeof(*data)); 420 420 params[0].data = data; 421 421 params[1].data = data; 422 422 423 - vcpu_args_set(params[0].vcpu, 1, test_data_page_vaddr); 424 - vcpu_args_set(params[1].vcpu, 1, test_data_page_vaddr); 423 + vcpu_args_set(params[0].vcpu, 1, test_data_page_gva); 424 + vcpu_args_set(params[1].vcpu, 1, test_data_page_gva); 425 425 426 - pipis_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ipis_rcvd); 426 + pipis_rcvd = (u64 *)addr_gva2hva(vm, (u64)&ipis_rcvd); 427 427 params[0].pipis_rcvd = pipis_rcvd; 428 428 params[1].pipis_rcvd = pipis_rcvd; 429 429
+10 -10
tools/testing/selftests/kvm/x86/xapic_state_test.c
··· 23 23 xapic_enable(); 24 24 25 25 while (1) { 26 - uint64_t val = (u64)xapic_read_reg(APIC_IRR) | 26 + u64 val = (u64)xapic_read_reg(APIC_IRR) | 27 27 (u64)xapic_read_reg(APIC_IRR + 0x10) << 32; 28 28 29 29 xapic_write_reg(APIC_ICR2, val >> 32); ··· 43 43 x2apic_enable(); 44 44 45 45 do { 46 - uint64_t val = x2apic_read_reg(APIC_IRR) | 46 + u64 val = x2apic_read_reg(APIC_IRR) | 47 47 x2apic_read_reg(APIC_IRR + 0x10) << 32; 48 48 49 49 if (val & X2APIC_RSVD_BITS_MASK) { ··· 56 56 } while (1); 57 57 } 58 58 59 - static void ____test_icr(struct xapic_vcpu *x, uint64_t val) 59 + static void ____test_icr(struct xapic_vcpu *x, u64 val) 60 60 { 61 61 struct kvm_vcpu *vcpu = x->vcpu; 62 62 struct kvm_lapic_state xapic; 63 63 struct ucall uc; 64 - uint64_t icr; 64 + u64 icr; 65 65 66 66 /* 67 67 * Tell the guest what ICR value to write. Use the IRR to pass info, ··· 93 93 TEST_ASSERT_EQ(icr, val & ~APIC_ICR_BUSY); 94 94 } 95 95 96 - static void __test_icr(struct xapic_vcpu *x, uint64_t val) 96 + static void __test_icr(struct xapic_vcpu *x, u64 val) 97 97 { 98 98 /* 99 99 * The BUSY bit is reserved on both AMD and Intel, but only AMD treats ··· 109 109 static void test_icr(struct xapic_vcpu *x) 110 110 { 111 111 struct kvm_vcpu *vcpu = x->vcpu; 112 - uint64_t icr, i, j; 112 + u64 icr, i, j; 113 113 114 114 icr = APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_FIXED; 115 115 for (i = 0; i <= 0xff; i++) ··· 142 142 __test_icr(x, -1ull & ~APIC_DM_FIXED_MASK); 143 143 } 144 144 145 - static void __test_apic_id(struct kvm_vcpu *vcpu, uint64_t apic_base) 145 + static void __test_apic_id(struct kvm_vcpu *vcpu, u64 apic_base) 146 146 { 147 - uint32_t apic_id, expected; 147 + u32 apic_id, expected; 148 148 struct kvm_lapic_state xapic; 149 149 150 150 vcpu_set_msr(vcpu, MSR_IA32_APICBASE, apic_base); ··· 170 170 */ 171 171 static void test_apic_id(void) 172 172 { 173 - const uint32_t NR_VCPUS = 3; 173 + const u32 NR_VCPUS = 3; 174 174 struct kvm_vcpu *vcpus[NR_VCPUS]; 175 - uint64_t apic_base; 175 + u64 apic_base; 176 176 struct kvm_vm *vm; 177 177 int i; 178 178
+12 -12
tools/testing/selftests/kvm/x86/xapic_tpr_test.c
··· 58 58 if (is_x2apic) { 59 59 x2apic_write_reg(APIC_SELF_IPI, IRQ_VECTOR); 60 60 } else { 61 - uint32_t icr, icr2; 61 + u32 icr, icr2; 62 62 63 63 icr = APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 64 64 IRQ_VECTOR; ··· 69 69 } 70 70 } 71 71 72 - static uint8_t tpr_guest_tpr_get(void) 72 + static u8 tpr_guest_tpr_get(void) 73 73 { 74 - uint32_t taskpri; 74 + u32 taskpri; 75 75 76 76 if (is_x2apic) 77 77 taskpri = x2apic_read_reg(APIC_TASKPRI); ··· 81 81 return GET_APIC_PRI(taskpri); 82 82 } 83 83 84 - static uint8_t tpr_guest_ppr_get(void) 84 + static u8 tpr_guest_ppr_get(void) 85 85 { 86 - uint32_t procpri; 86 + u32 procpri; 87 87 88 88 if (is_x2apic) 89 89 procpri = x2apic_read_reg(APIC_PROCPRI); ··· 93 93 return GET_APIC_PRI(procpri); 94 94 } 95 95 96 - static uint8_t tpr_guest_cr8_get(void) 96 + static u8 tpr_guest_cr8_get(void) 97 97 { 98 - uint64_t cr8; 98 + u64 cr8; 99 99 100 100 asm volatile ("mov %%cr8, %[cr8]\n\t" : [cr8] "=r"(cr8)); 101 101 ··· 104 104 105 105 static void tpr_guest_check_tpr_ppr_cr8_equal(void) 106 106 { 107 - uint8_t tpr; 107 + u8 tpr; 108 108 109 109 tpr = tpr_guest_tpr_get(); 110 110 ··· 157 157 GUEST_DONE(); 158 158 } 159 159 160 - static uint8_t lapic_tpr_get(struct kvm_lapic_state *xapic) 160 + static u8 lapic_tpr_get(struct kvm_lapic_state *xapic) 161 161 { 162 162 return GET_APIC_PRI(*((u32 *)&xapic->regs[APIC_TASKPRI])); 163 163 } 164 164 165 - static void lapic_tpr_set(struct kvm_lapic_state *xapic, uint8_t val) 165 + static void lapic_tpr_set(struct kvm_lapic_state *xapic, u8 val) 166 166 { 167 167 u32 *taskpri = (u32 *)&xapic->regs[APIC_TASKPRI]; 168 168 169 169 *taskpri = SET_APIC_PRI(*taskpri, val); 170 170 } 171 171 172 - static uint8_t sregs_tpr(struct kvm_sregs *sregs) 172 + static u8 sregs_tpr(struct kvm_sregs *sregs) 173 173 { 174 174 return sregs->cr8 & GENMASK(3, 0); 175 175 } ··· 197 197 static void test_tpr_set_tpr_for_irq(struct kvm_vcpu *vcpu, bool mask) 198 198 { 199 199 struct kvm_lapic_state xapic; 200 - uint8_t tpr; 200 + u8 tpr; 201 201 202 202 static_assert(IRQ_VECTOR >= 16, "invalid IRQ vector number"); 203 203 tpr = IRQ_VECTOR / 16;
+4 -4
tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c
··· 21 21 */ 22 22 #define ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0, xfeatures, dependencies) \ 23 23 do { \ 24 - uint64_t __supported = (supported_xcr0) & ((xfeatures) | (dependencies)); \ 24 + u64 __supported = (supported_xcr0) & ((xfeatures) | (dependencies)); \ 25 25 \ 26 26 __GUEST_ASSERT((__supported & (xfeatures)) != (xfeatures) || \ 27 27 __supported == ((xfeatures) | (dependencies)), \ ··· 39 39 */ 40 40 #define ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0, xfeatures) \ 41 41 do { \ 42 - uint64_t __supported = (supported_xcr0) & (xfeatures); \ 42 + u64 __supported = (supported_xcr0) & (xfeatures); \ 43 43 \ 44 44 __GUEST_ASSERT(!__supported || __supported == (xfeatures), \ 45 45 "supported = 0x%lx, xfeatures = 0x%llx", \ ··· 48 48 49 49 static void guest_code(void) 50 50 { 51 - uint64_t initial_xcr0; 52 - uint64_t supported_xcr0; 51 + u64 initial_xcr0; 52 + u64 supported_xcr0; 53 53 int i, vector; 54 54 55 55 set_cr4(get_cr4() | X86_CR4_OSXSAVE);
+11 -11
tools/testing/selftests/kvm/x86/xen_shinfo_test.c
··· 116 116 } __attribute__((__packed__)); 117 117 118 118 struct vcpu_runstate_info { 119 - uint32_t state; 120 - uint64_t state_entry_time; 121 - uint64_t time[5]; /* Extra field for overrun check */ 119 + u32 state; 120 + u64 state_entry_time; 121 + u64 time[5]; /* Extra field for overrun check */ 122 122 }; 123 123 124 124 struct compat_vcpu_runstate_info { 125 - uint32_t state; 126 - uint64_t state_entry_time; 127 - uint64_t time[5]; 125 + u32 state; 126 + u64 state_entry_time; 127 + u64 time[5]; 128 128 } __attribute__((__packed__)); 129 129 130 130 struct arch_vcpu_info { ··· 133 133 }; 134 134 135 135 struct vcpu_info { 136 - uint8_t evtchn_upcall_pending; 137 - uint8_t evtchn_upcall_mask; 136 + u8 evtchn_upcall_pending; 137 + u8 evtchn_upcall_mask; 138 138 unsigned long evtchn_pending_sel; 139 139 struct arch_vcpu_info arch; 140 140 struct pvclock_vcpu_time_info time; ··· 145 145 unsigned long evtchn_pending[64]; 146 146 unsigned long evtchn_mask[64]; 147 147 struct pvclock_wall_clock wc; 148 - uint32_t wc_sec_hi; 148 + u32 wc_sec_hi; 149 149 /* arch_shared_info here */ 150 150 }; 151 151 ··· 658 658 printf("Testing RUNSTATE_ADJUST\n"); 659 659 rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST; 660 660 memset(&rst.u, 0, sizeof(rst.u)); 661 - rst.u.runstate.state = (uint64_t)-1; 661 + rst.u.runstate.state = (u64)-1; 662 662 rst.u.runstate.time_blocked = 663 663 0x5a - rs->time[RUNSTATE_blocked]; 664 664 rst.u.runstate.time_offline = ··· 1113 1113 /* Don't change the address, just trigger a write */ 1114 1114 struct kvm_xen_vcpu_attr adj = { 1115 1115 .type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST, 1116 - .u.runstate.state = (uint64_t)-1 1116 + .u.runstate.state = (u64)-1 1117 1117 }; 1118 1118 vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &adj); 1119 1119
+1 -1
tools/testing/selftests/kvm/x86/xss_msr_test.c
··· 17 17 bool xss_in_msr_list; 18 18 struct kvm_vm *vm; 19 19 struct kvm_vcpu *vcpu; 20 - uint64_t xss_val; 20 + u64 xss_val; 21 21 int i, r; 22 22 23 23 /* Create VM */