Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

KVM: selftests: Use u32 instead of uint32_t

Use u32 instead of uint32_t to make the KVM selftests code more concise
and more similar to the kernel (since selftests are primarily developed
by kernel developers).

This commit was generated with the following command:

git ls-files tools/testing/selftests/kvm | xargs sed -i 's/uint32_t/u32/g'

Then by manually adjusting whitespace to make checkpatch.pl happy.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
Link: https://patch.msgid.link/20260420212004.3938325-7-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>

authored by

David Matlack and committed by
Sean Christopherson
0c3a8774 286e8903

+642 -646
+3 -3
tools/testing/selftests/kvm/arch_timer.c
··· 78 78 return NULL; 79 79 } 80 80 81 - static uint32_t test_get_pcpu(void) 81 + static u32 test_get_pcpu(void) 82 82 { 83 - uint32_t pcpu; 83 + u32 pcpu; 84 84 unsigned int nproc_conf; 85 85 cpu_set_t online_cpuset; 86 86 ··· 98 98 static int test_migrate_vcpu(unsigned int vcpu_idx) 99 99 { 100 100 int ret; 101 - uint32_t new_pcpu = test_get_pcpu(); 101 + u32 new_pcpu = test_get_pcpu(); 102 102 103 103 pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu); 104 104
+3 -3
tools/testing/selftests/kvm/arm64/arch_timer.c
··· 105 105 static void guest_irq_handler(struct ex_regs *regs) 106 106 { 107 107 unsigned int intid = gic_get_and_ack_irq(); 108 - uint32_t cpu = guest_get_vcpuid(); 108 + u32 cpu = guest_get_vcpuid(); 109 109 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; 110 110 111 111 guest_validate_irq(intid, shared_data); ··· 116 116 static void guest_run_stage(struct test_vcpu_shared_data *shared_data, 117 117 enum guest_stage stage) 118 118 { 119 - uint32_t irq_iter, config_iter; 119 + u32 irq_iter, config_iter; 120 120 121 121 shared_data->guest_stage = stage; 122 122 shared_data->nr_iter = 0; ··· 140 140 141 141 static void guest_code(void) 142 142 { 143 - uint32_t cpu = guest_get_vcpuid(); 143 + u32 cpu = guest_get_vcpuid(); 144 144 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; 145 145 146 146 local_irq_disable();
+13 -13
tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c
··· 29 29 static const int32_t TVAL_MIN = INT32_MIN; 30 30 31 31 /* After how much time we say there is no IRQ. */ 32 - static const uint32_t TIMEOUT_NO_IRQ_US = 50000; 32 + static const u32 TIMEOUT_NO_IRQ_US = 50000; 33 33 34 34 /* Counter value to use as the starting one for most tests. Set to CVAL_MAX/2 */ 35 35 static u64 DEF_CNT; 36 36 37 37 /* Number of runs. */ 38 - static const uint32_t NR_TEST_ITERS_DEF = 5; 38 + static const u32 NR_TEST_ITERS_DEF = 5; 39 39 40 40 /* Default wait test time in ms. */ 41 - static const uint32_t WAIT_TEST_MS = 10; 41 + static const u32 WAIT_TEST_MS = 10; 42 42 43 43 /* Default "long" wait test time in ms. */ 44 - static const uint32_t LONG_WAIT_TEST_MS = 100; 44 + static const u32 LONG_WAIT_TEST_MS = 100; 45 45 46 46 /* Shared with IRQ handler. */ 47 47 struct test_vcpu_shared_data { ··· 115 115 TIMER_TVAL, 116 116 }; 117 117 118 - static void assert_irqs_handled(uint32_t n) 118 + static void assert_irqs_handled(u32 n) 119 119 { 120 120 int h = atomic_read(&shared_data.handled); 121 121 ··· 147 147 unsigned int intid = gic_get_and_ack_irq(); 148 148 enum arch_timer timer; 149 149 u64 cnt, cval; 150 - uint32_t ctl; 150 + u32 ctl; 151 151 bool timer_condition, istatus; 152 152 153 153 if (intid == IAR_SPURIOUS) { ··· 179 179 } 180 180 181 181 static void set_cval_irq(enum arch_timer timer, u64 cval_cycles, 182 - uint32_t ctl) 182 + u32 ctl) 183 183 { 184 184 atomic_set(&shared_data.handled, 0); 185 185 atomic_set(&shared_data.spurious, 0); ··· 188 188 } 189 189 190 190 static void set_tval_irq(enum arch_timer timer, u64 tval_cycles, 191 - uint32_t ctl) 191 + u32 ctl) 192 192 { 193 193 atomic_set(&shared_data.handled, 0); 194 194 atomic_set(&shared_data.spurious, 0); ··· 196 196 timer_set_ctl(timer, ctl); 197 197 } 198 198 199 - static void set_xval_irq(enum arch_timer timer, u64 xval, uint32_t ctl, 199 + static void set_xval_irq(enum arch_timer timer, u64 xval, u32 ctl, 200 200 enum timer_view tv) 201 201 { 202 202 switch (tv) { ··· 845 845 846 846 static cpu_set_t default_cpuset; 847 847 848 - static uint32_t next_pcpu(void) 848 + static u32 next_pcpu(void) 849 849 { 850 - uint32_t max = get_nprocs(); 851 - uint32_t cur = sched_getcpu(); 852 - uint32_t next = cur; 850 + u32 max = get_nprocs(); 851 + u32 cur = sched_getcpu(); 852 + u32 next = cur; 853 853 cpu_set_t cpuset = default_cpuset; 854 854 855 855 TEST_ASSERT(max > 1, "Need at least two physical cpus");
+6 -6
tools/testing/selftests/kvm/arm64/debug-exceptions.c
··· 151 151 152 152 static void install_wp(uint8_t wpn, u64 addr) 153 153 { 154 - uint32_t wcr; 154 + u32 wcr; 155 155 156 156 wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E; 157 157 write_dbgwcr(wpn, wcr); ··· 164 164 165 165 static void install_hw_bp(uint8_t bpn, u64 addr) 166 166 { 167 - uint32_t bcr; 167 + u32 bcr; 168 168 169 169 bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E; 170 170 write_dbgbcr(bpn, bcr); ··· 177 177 static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, u64 addr, 178 178 u64 ctx) 179 179 { 180 - uint32_t wcr; 180 + u32 wcr; 181 181 u64 ctx_bcr; 182 182 183 183 /* Setup a context-aware breakpoint for Linked Context ID Match */ ··· 188 188 189 189 /* Setup a linked watchpoint (linked to the context-aware breakpoint) */ 190 190 wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E | 191 - DBGWCR_WT_LINK | ((uint32_t)ctx_bp << DBGWCR_LBN_SHIFT); 191 + DBGWCR_WT_LINK | ((u32)ctx_bp << DBGWCR_LBN_SHIFT); 192 192 write_dbgwcr(addr_wp, wcr); 193 193 write_dbgwvr(addr_wp, addr); 194 194 isb(); ··· 199 199 void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, u64 addr, 200 200 u64 ctx) 201 201 { 202 - uint32_t addr_bcr, ctx_bcr; 202 + u32 addr_bcr, ctx_bcr; 203 203 204 204 /* Setup a context-aware breakpoint for Linked Context ID Match */ 205 205 ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E | ··· 213 213 */ 214 214 addr_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E | 215 215 DBGBCR_BT_ADDR_LINK_CTX | 216 - ((uint32_t)ctx_bp << DBGBCR_LBN_SHIFT); 216 + ((u32)ctx_bp << DBGBCR_LBN_SHIFT); 217 217 write_dbgbcr(addr_bp, addr_bcr); 218 218 write_dbgbvr(addr_bp, addr); 219 219 isb();
+3 -3
tools/testing/selftests/kvm/arm64/hypercalls.c
··· 59 59 static int stage = TEST_STAGE_REG_IFACE; 60 60 61 61 struct test_hvc_info { 62 - uint32_t func_id; 62 + u32 func_id; 63 63 u64 arg1; 64 64 }; 65 65 ··· 152 152 } 153 153 154 154 struct st_time { 155 - uint32_t rev; 156 - uint32_t attr; 155 + u32 rev; 156 + u32 attr; 157 157 u64 st_time; 158 158 }; 159 159
+3 -3
tools/testing/selftests/kvm/arm64/page_fault_test.c
··· 59 59 void (*iabt_handler)(struct ex_regs *regs); 60 60 void (*mmio_handler)(struct kvm_vm *vm, struct kvm_run *run); 61 61 void (*fail_vcpu_run_handler)(int ret); 62 - uint32_t pt_memslot_flags; 63 - uint32_t data_memslot_flags; 62 + u32 pt_memslot_flags; 63 + u32 data_memslot_flags; 64 64 bool skip; 65 65 struct event_cnt expected_events; 66 66 }; ··· 510 510 events.fail_vcpu_runs += 1; 511 511 } 512 512 513 - typedef uint32_t aarch64_insn_t; 513 + typedef u32 aarch64_insn_t; 514 514 extern aarch64_insn_t __exec_test[2]; 515 515 516 516 noinline void __return_0x77(void)
+1 -1
tools/testing/selftests/kvm/arm64/psci_test.c
··· 61 61 return res.a0; 62 62 } 63 63 64 - static u64 psci_features(uint32_t func_id) 64 + static u64 psci_features(u32 func_id) 65 65 { 66 66 struct arm_smccc_res res; 67 67
+4 -4
tools/testing/selftests/kvm/arm64/set_id_regs.c
··· 43 43 }; 44 44 45 45 struct test_feature_reg { 46 - uint32_t reg; 46 + u32 reg; 47 47 const struct reg_ftr_bits *ftr_bits; 48 48 }; 49 49 ··· 457 457 458 458 for (int i = 0; i < ARRAY_SIZE(test_regs); i++) { 459 459 const struct reg_ftr_bits *ftr_bits = test_regs[i].ftr_bits; 460 - uint32_t reg_id = test_regs[i].reg; 460 + u32 reg_id = test_regs[i].reg; 461 461 u64 reg = KVM_ARM64_SYS_REG(reg_id); 462 462 int idx; 463 463 ··· 643 643 ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac no longer 0xF\n"); 644 644 } 645 645 646 - static u64 reset_mutable_bits(uint32_t id, u64 val) 646 + static u64 reset_mutable_bits(u32 id, u64 val) 647 647 { 648 648 struct test_feature_reg *reg = NULL; 649 649 ··· 771 771 ksft_test_result_pass("%s\n", __func__); 772 772 } 773 773 774 - static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, uint32_t encoding) 774 + static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, u32 encoding) 775 775 { 776 776 size_t idx = encoding_to_range_idx(encoding); 777 777 u64 observed;
+5 -5
tools/testing/selftests/kvm/arm64/smccc_filter.c
··· 37 37 for (conduit = test_runs_at_el2() ? SMC_INSN : HVC_INSN; \ 38 38 conduit <= SMC_INSN; conduit++) 39 39 40 - static void guest_main(uint32_t func_id, enum smccc_conduit conduit) 40 + static void guest_main(u32 func_id, enum smccc_conduit conduit) 41 41 { 42 42 struct arm_smccc_res res; 43 43 ··· 49 49 GUEST_SYNC(res.a0); 50 50 } 51 51 52 - static int __set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions, 52 + static int __set_smccc_filter(struct kvm_vm *vm, u32 start, u32 nr_functions, 53 53 enum kvm_smccc_filter_action action) 54 54 { 55 55 struct kvm_smccc_filter filter = { ··· 62 62 KVM_ARM_VM_SMCCC_FILTER, &filter); 63 63 } 64 64 65 - static void set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions, 65 + static void set_smccc_filter(struct kvm_vm *vm, u32 start, u32 nr_functions, 66 66 enum kvm_smccc_filter_action action) 67 67 { 68 68 int ret = __set_smccc_filter(vm, start, nr_functions, action); ··· 112 112 { 113 113 struct kvm_vcpu *vcpu; 114 114 struct kvm_vm *vm = setup_vm(&vcpu); 115 - uint32_t smc64_fn; 115 + u32 smc64_fn; 116 116 int r; 117 117 118 118 r = __set_smccc_filter(vm, ARM_SMCCC_ARCH_WORKAROUND_1, ··· 217 217 } 218 218 } 219 219 220 - static void expect_call_fwd_to_user(struct kvm_vcpu *vcpu, uint32_t func_id, 220 + static void expect_call_fwd_to_user(struct kvm_vcpu *vcpu, u32 func_id, 221 221 enum smccc_conduit conduit) 222 222 { 223 223 struct kvm_run *run = vcpu->run;
+15 -15
tools/testing/selftests/kvm/arm64/vgic_init.c
··· 27 27 struct vm_gic { 28 28 struct kvm_vm *vm; 29 29 int gic_fd; 30 - uint32_t gic_dev_type; 30 + u32 gic_dev_type; 31 31 }; 32 32 33 33 static u64 max_phys_size; ··· 39 39 static void v3_redist_reg_get_errno(int gicv3_fd, int vcpu, int offset, 40 40 int want, const char *msg) 41 41 { 42 - uint32_t ignored_val; 42 + u32 ignored_val; 43 43 int ret = __kvm_device_attr_get(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS, 44 44 REG_OFFSET(vcpu, offset), &ignored_val); 45 45 46 46 TEST_ASSERT(ret && errno == want, "%s; want errno = %d", msg, want); 47 47 } 48 48 49 - static void v3_redist_reg_get(int gicv3_fd, int vcpu, int offset, uint32_t want, 49 + static void v3_redist_reg_get(int gicv3_fd, int vcpu, int offset, u32 want, 50 50 const char *msg) 51 51 { 52 - uint32_t val; 52 + u32 val; 53 53 54 54 kvm_device_attr_get(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS, 55 55 REG_OFFSET(vcpu, offset), &val); ··· 71 71 return __vcpu_run(vcpu) ? -errno : 0; 72 72 } 73 73 74 - static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type, 75 - uint32_t nr_vcpus, 74 + static struct vm_gic vm_gic_create_with_vcpus(u32 gic_dev_type, 75 + u32 nr_vcpus, 76 76 struct kvm_vcpu *vcpus[]) 77 77 { 78 78 struct vm_gic v; ··· 84 84 return v; 85 85 } 86 86 87 - static struct vm_gic vm_gic_create_barebones(uint32_t gic_dev_type) 87 + static struct vm_gic vm_gic_create_barebones(u32 gic_dev_type) 88 88 { 89 89 struct vm_gic v; 90 90 ··· 332 332 * VGIC KVM device is created and initialized before the secondary CPUs 333 333 * get created 334 334 */ 335 - static void test_vgic_then_vcpus(uint32_t gic_dev_type) 335 + static void test_vgic_then_vcpus(u32 gic_dev_type) 336 336 { 337 337 struct kvm_vcpu *vcpus[NR_VCPUS]; 338 338 struct vm_gic v; ··· 353 353 } 354 354 355 355 /* All the VCPUs are created before the VGIC KVM device gets initialized */ 356 - static void test_vcpus_then_vgic(uint32_t gic_dev_type) 356 + static void test_vcpus_then_vgic(u32 gic_dev_type) 357 357 { 358 358 struct kvm_vcpu *vcpus[NR_VCPUS]; 359 359 struct vm_gic v; ··· 518 518 } 519 519 520 520 static struct vm_gic vm_gic_v3_create_with_vcpuids(int nr_vcpus, 521 - uint32_t vcpuids[]) 521 + u32 vcpuids[]) 522 522 { 523 523 struct vm_gic v; 524 524 int i; ··· 544 544 */ 545 545 static void test_v3_last_bit_redist_regions(void) 546 546 { 547 - uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 }; 547 + u32 vcpuids[] = { 0, 3, 5, 4, 1, 2 }; 548 548 struct vm_gic v; 549 549 u64 addr; 550 550 ··· 578 578 /* Test last bit with legacy region */ 579 579 static void test_v3_last_bit_single_rdist(void) 580 580 { 581 - uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 }; 581 + u32 vcpuids[] = { 0, 3, 5, 4, 1, 2 }; 582 582 struct vm_gic v; 583 583 u64 addr; 584 584 ··· 717 717 /* 718 718 * Returns 0 if it's possible to create GIC device of a given type (V2 or V3). 719 719 */ 720 - int test_kvm_device(uint32_t gic_dev_type) 720 + int test_kvm_device(u32 gic_dev_type) 721 721 { 722 722 struct kvm_vcpu *vcpus[NR_VCPUS]; 723 723 struct vm_gic v; 724 - uint32_t other; 724 + u32 other; 725 725 int ret; 726 726 727 727 v.vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus); ··· 968 968 kvm_vm_free(vm); 969 969 } 970 970 971 - void run_tests(uint32_t gic_dev_type) 971 + void run_tests(u32 gic_dev_type) 972 972 { 973 973 test_vcpus_then_vgic(gic_dev_type); 974 974 test_vgic_then_vcpus(gic_dev_type);
+46 -45
tools/testing/selftests/kvm/arm64/vgic_irq.c
··· 24 24 * function. 25 25 */ 26 26 struct test_args { 27 - uint32_t nr_irqs; /* number of KVM supported IRQs. */ 27 + u32 nr_irqs; /* number of KVM supported IRQs. */ 28 28 bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */ 29 29 bool level_sensitive; /* 1 is level, 0 is edge */ 30 30 int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */ 31 31 bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */ 32 - uint32_t shared_data; 32 + u32 shared_data; 33 33 }; 34 34 35 35 /* ··· 64 64 65 65 struct kvm_inject_args { 66 66 kvm_inject_cmd cmd; 67 - uint32_t first_intid; 68 - uint32_t num; 67 + u32 first_intid; 68 + u32 num; 69 69 int level; 70 70 bool expect_failure; 71 71 }; 72 72 73 73 /* Used on the guest side to perform the hypercall. */ 74 - static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, 75 - uint32_t num, int level, bool expect_failure); 74 + static void kvm_inject_call(kvm_inject_cmd cmd, u32 first_intid, 75 + u32 num, int level, bool expect_failure); 76 76 77 77 /* Used on the host side to get the hypercall info. */ 78 78 static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc, ··· 134 134 135 135 /* Shared between the guest main thread and the IRQ handlers. */ 136 136 volatile u64 irq_handled; 137 - volatile uint32_t irqnr_received[MAX_SPI + 1]; 137 + volatile u32 irqnr_received[MAX_SPI + 1]; 138 138 139 139 static void reset_stats(void) 140 140 { ··· 159 159 isb(); 160 160 } 161 161 162 - static void guest_set_irq_line(uint32_t intid, uint32_t level); 162 + static void guest_set_irq_line(u32 intid, u32 level); 163 163 164 164 static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive) 165 165 { 166 - uint32_t intid = gic_get_and_ack_irq(); 166 + u32 intid = gic_get_and_ack_irq(); 167 167 168 168 if (intid == IAR_SPURIOUS) 169 169 return; ··· 189 189 GUEST_ASSERT(!gic_irq_get_pending(intid)); 190 190 } 191 191 192 - static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, 193 - uint32_t num, int level, bool expect_failure) 192 + static void kvm_inject_call(kvm_inject_cmd cmd, u32 first_intid, 193 + u32 num, int level, bool expect_failure) 194 194 { 195 195 struct kvm_inject_args args = { 196 196 .cmd = cmd, ··· 204 204 205 205 #define GUEST_ASSERT_IAR_EMPTY() \ 206 206 do { \ 207 - uint32_t _intid; \ 207 + u32 _intid; \ 208 208 _intid = gic_get_and_ack_irq(); \ 209 209 GUEST_ASSERT(_intid == IAR_SPURIOUS); \ 210 210 } while (0) ··· 237 237 gic_set_priority(i, IRQ_DEFAULT_PRIO_REG); 238 238 } 239 239 240 - static void guest_set_irq_line(uint32_t intid, uint32_t level) 240 + static void guest_set_irq_line(u32 intid, u32 level) 241 241 { 242 242 kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false); 243 243 } 244 244 245 245 static void test_inject_fail(struct test_args *args, 246 - uint32_t intid, kvm_inject_cmd cmd) 246 + u32 intid, kvm_inject_cmd cmd) 247 247 { 248 248 reset_stats(); 249 249 ··· 255 255 } 256 256 257 257 static void guest_inject(struct test_args *args, 258 - uint32_t first_intid, uint32_t num, 259 - kvm_inject_cmd cmd) 258 + u32 first_intid, u32 num, 259 + kvm_inject_cmd cmd) 260 260 { 261 - uint32_t i; 261 + u32 i; 262 262 263 263 reset_stats(); 264 264 ··· 292 292 * deactivated yet. 293 293 */ 294 294 static void guest_restore_active(struct test_args *args, 295 - uint32_t first_intid, uint32_t num, 296 - kvm_inject_cmd cmd) 295 + u32 first_intid, u32 num, 296 + kvm_inject_cmd cmd) 297 297 { 298 - uint32_t prio, intid, ap1r; 298 + u32 prio, intid, ap1r; 299 299 int i; 300 300 301 301 /* ··· 342 342 * This function should only be used in test_inject_preemption (with IRQs 343 343 * masked). 344 344 */ 345 - static uint32_t wait_for_and_activate_irq(void) 345 + static u32 wait_for_and_activate_irq(void) 346 346 { 347 - uint32_t intid; 347 + u32 intid; 348 348 349 349 do { 350 350 asm volatile("wfi" : : : "memory"); ··· 360 360 * interrupts for the whole test. 361 361 */ 362 362 static void test_inject_preemption(struct test_args *args, 363 - uint32_t first_intid, int num, 363 + u32 first_intid, int num, 364 364 const unsigned long *exclude, 365 365 kvm_inject_cmd cmd) 366 366 { 367 - uint32_t intid, prio, step = KVM_PRIO_STEPS; 367 + u32 intid, prio, step = KVM_PRIO_STEPS; 368 368 int i; 369 369 370 370 /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs ··· 379 379 local_irq_disable(); 380 380 381 381 for (i = 0; i < num; i++) { 382 - uint32_t tmp; 382 + u32 tmp; 383 383 intid = i + first_intid; 384 384 385 385 if (exclude && test_bit(i, exclude)) ··· 431 431 432 432 static void test_injection(struct test_args *args, struct kvm_inject_desc *f) 433 433 { 434 - uint32_t nr_irqs = args->nr_irqs; 434 + u32 nr_irqs = args->nr_irqs; 435 435 436 436 if (f->sgi) { 437 437 guest_inject(args, MIN_SGI, 1, f->cmd); ··· 451 451 static void test_injection_failure(struct test_args *args, 452 452 struct kvm_inject_desc *f) 453 453 { 454 - uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, }; 454 + u32 bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, }; 455 455 int i; 456 456 457 457 for (i = 0; i < ARRAY_SIZE(bad_intid); i++) ··· 490 490 491 491 static void guest_code(struct test_args *args) 492 492 { 493 - uint32_t i, nr_irqs = args->nr_irqs; 493 + u32 i, nr_irqs = args->nr_irqs; 494 494 bool level_sensitive = args->level_sensitive; 495 495 struct kvm_inject_desc *f, *inject_fns; 496 496 ··· 529 529 GUEST_DONE(); 530 530 } 531 531 532 - static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level, 533 - struct test_args *test_args, bool expect_failure) 532 + static void kvm_irq_line_check(struct kvm_vm *vm, u32 intid, int level, 533 + struct test_args *test_args, bool expect_failure) 534 534 { 535 535 int ret; 536 536 ··· 548 548 } 549 549 } 550 550 551 - void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level, 552 - bool expect_failure) 551 + void kvm_irq_set_level_info_check(int gic_fd, u32 intid, int level, 552 + bool expect_failure) 553 553 { 554 554 if (!expect_failure) { 555 555 kvm_irq_set_level_info(gic_fd, intid, level); ··· 573 573 } 574 574 575 575 static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm, 576 - uint32_t intid, uint32_t num, uint32_t kvm_max_routes, 577 - bool expect_failure) 576 + u32 intid, u32 num, 577 + u32 kvm_max_routes, 578 + bool expect_failure) 578 579 { 579 580 struct kvm_irq_routing *routing; 580 581 int ret; ··· 603 602 } 604 603 } 605 604 606 - static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid, 605 + static void kvm_irq_write_ispendr_check(int gic_fd, u32 intid, 607 606 struct kvm_vcpu *vcpu, 608 607 bool expect_failure) 609 608 { ··· 619 618 } 620 619 621 620 static void kvm_routing_and_irqfd_check(struct kvm_vm *vm, 622 - uint32_t intid, uint32_t num, uint32_t kvm_max_routes, 623 - bool expect_failure) 621 + u32 intid, u32 num, u32 kvm_max_routes, 622 + bool expect_failure) 624 623 { 625 624 int fd[MAX_SPI]; 626 625 u64 val; ··· 674 673 struct test_args *test_args) 675 674 { 676 675 kvm_inject_cmd cmd = inject_args->cmd; 677 - uint32_t intid = inject_args->first_intid; 678 - uint32_t num = inject_args->num; 676 + u32 intid = inject_args->first_intid; 677 + u32 num = inject_args->num; 679 678 int level = inject_args->level; 680 679 bool expect_failure = inject_args->expect_failure; 681 680 struct kvm_vm *vm = vcpu->vm; 682 681 u64 tmp; 683 - uint32_t i; 682 + u32 i; 684 683 685 684 /* handles the valid case: intid=0xffffffff num=1 */ 686 685 assert(intid < UINT_MAX - num || num == 1); ··· 746 745 args->eoi_split); 747 746 } 748 747 749 - static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) 748 + static void test_vgic(u32 nr_irqs, bool level_sensitive, bool eoi_split) 750 749 { 751 750 struct ucall uc; 752 751 int gic_fd; ··· 811 810 gic_set_priority_mask(CPU_PRIO_MASK); 812 811 813 812 if (cpuid == 0) { 814 - uint32_t intid; 813 + u32 intid; 815 814 816 815 local_irq_disable(); 817 816 ··· 849 848 850 849 static void guest_code_group_en(struct test_args *args, int cpuid) 851 850 { 852 - uint32_t intid; 851 + u32 intid; 853 852 854 853 gic_init(GIC_V3, 2); 855 854 ··· 897 896 898 897 static void guest_code_timer_spi(struct test_args *args, int cpuid) 899 898 { 900 - uint32_t intid; 899 + u32 intid; 901 900 u64 val; 902 901 903 902 gic_init(GIC_V3, 2); ··· 1034 1033 1035 1034 int main(int argc, char **argv) 1036 1035 { 1037 - uint32_t nr_irqs = 64; 1036 + u32 nr_irqs = 64; 1038 1037 bool default_args = true; 1039 1038 bool level_sensitive = false; 1040 1039 int opt;
+4 -4
tools/testing/selftests/kvm/arm64/vgic_v5.c
··· 17 17 struct vm_gic { 18 18 struct kvm_vm *vm; 19 19 int gic_fd; 20 - uint32_t gic_dev_type; 20 + u32 gic_dev_type; 21 21 }; 22 22 23 23 static u64 max_phys_size; ··· 96 96 kvm_vm_free(v->vm); 97 97 } 98 98 99 - static void test_vgic_v5_ppis(uint32_t gic_dev_type) 99 + static void test_vgic_v5_ppis(u32 gic_dev_type) 100 100 { 101 101 struct kvm_vcpu *vcpus[NR_VCPUS]; 102 102 struct ucall uc; ··· 173 173 /* 174 174 * Returns 0 if it's possible to create GIC device of a given type (V5). 175 175 */ 176 - int test_kvm_device(uint32_t gic_dev_type) 176 + int test_kvm_device(u32 gic_dev_type) 177 177 { 178 178 struct kvm_vcpu *vcpus[NR_VCPUS]; 179 179 struct vm_gic v; ··· 199 199 return 0; 200 200 } 201 201 202 - void run_tests(uint32_t gic_dev_type) 202 + void run_tests(u32 gic_dev_type) 203 203 { 204 204 pr_info("Test VGICv5 PPIs\n"); 205 205 test_vgic_v5_ppis(gic_dev_type);
+11 -11
tools/testing/selftests/kvm/coalesced_io_test.c
··· 14 14 15 15 struct kvm_coalesced_io { 16 16 struct kvm_coalesced_mmio_ring *ring; 17 - uint32_t ring_size; 17 + u32 ring_size; 18 18 u64 mmio_gpa; 19 19 u64 *mmio; 20 20 ··· 70 70 71 71 static void vcpu_run_and_verify_io_exit(struct kvm_vcpu *vcpu, 72 72 struct kvm_coalesced_io *io, 73 - uint32_t ring_start, 74 - uint32_t expected_exit) 73 + u32 ring_start, 74 + u32 expected_exit) 75 75 { 76 76 const bool want_pio = expected_exit == KVM_EXIT_IO; 77 77 struct kvm_coalesced_mmio_ring *ring = io->ring; 78 78 struct kvm_run *run = vcpu->run; 79 - uint32_t pio_value; 79 + u32 pio_value; 80 80 81 81 WRITE_ONCE(ring->first, ring_start); 82 82 WRITE_ONCE(ring->last, ring_start); ··· 88 88 * data_offset is garbage, e.g. an MMIO gpa. 89 89 */ 90 90 if (run->exit_reason == KVM_EXIT_IO) 91 - pio_value = *(uint32_t *)((void *)run + run->io.data_offset); 91 + pio_value = *(u32 *)((void *)run + run->io.data_offset); 92 92 else 93 93 pio_value = 0; 94 94 ··· 111 111 112 112 static void vcpu_run_and_verify_coalesced_io(struct kvm_vcpu *vcpu, 113 113 struct kvm_coalesced_io *io, 114 - uint32_t ring_start, 115 - uint32_t expected_exit) 114 + u32 ring_start, 115 + u32 expected_exit) 116 116 { 117 117 struct kvm_coalesced_mmio_ring *ring = io->ring; 118 118 int i; ··· 124 124 ring->first, ring->last, io->ring_size, ring_start); 125 125 126 126 for (i = 0; i < io->ring_size - 1; i++) { 127 - uint32_t idx = (ring->first + i) % io->ring_size; 127 + u32 idx = (ring->first + i) % io->ring_size; 128 128 struct kvm_coalesced_mmio *entry = &ring->coalesced_mmio[idx]; 129 129 130 130 #ifdef __x86_64__ 131 131 if (i & 1) 132 132 TEST_ASSERT(entry->phys_addr == io->pio_port && 133 133 entry->len == 4 && entry->pio && 134 - *(uint32_t *)entry->data == io->pio_port + i, 134 + *(u32 *)entry->data == io->pio_port + i, 135 135 "Wanted 4-byte port I/O 0x%x = 0x%x in entry %u, got %u-byte %s 0x%llx = 0x%x", 136 136 io->pio_port, io->pio_port + i, i, 137 137 entry->len, entry->pio ? "PIO" : "MMIO", 138 - entry->phys_addr, *(uint32_t *)entry->data); 138 + entry->phys_addr, *(u32 *)entry->data); 139 139 else 140 140 #endif 141 141 TEST_ASSERT(entry->phys_addr == io->mmio_gpa && ··· 148 148 } 149 149 150 150 static void test_coalesced_io(struct kvm_vcpu *vcpu, 151 - struct kvm_coalesced_io *io, uint32_t ring_start) 151 + struct kvm_coalesced_io *io, u32 ring_start) 152 152 { 153 153 struct kvm_coalesced_mmio_ring *ring = io->ring; 154 154
+1 -1
tools/testing/selftests/kvm/dirty_log_perf_test.c
··· 97 97 bool partition_vcpu_memory_access; 98 98 enum vm_mem_backing_src_type backing_src; 99 99 int slots; 100 - uint32_t write_percent; 100 + u32 write_percent; 101 101 bool random_access; 102 102 }; 103 103
+18 -18
tools/testing/selftests/kvm/dirty_log_test.c
··· 236 236 /* Logging mode for current run */ 237 237 static enum log_mode_t host_log_mode; 238 238 static pthread_t vcpu_thread; 239 - static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT; 239 + static u32 test_dirty_ring_count = TEST_DIRTY_RING_COUNT; 240 240 241 241 static bool clear_log_supported(void) 242 242 { ··· 255 255 } 256 256 257 257 static void dirty_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, 258 - void *bitmap, uint32_t num_pages, 259 - uint32_t *unused) 258 + void *bitmap, u32 num_pages, 259 + u32 *unused) 260 260 { 261 261 kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap); 262 262 } 263 263 264 264 static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, 265 - void *bitmap, uint32_t num_pages, 266 - uint32_t *unused) 265 + void *bitmap, u32 num_pages, 266 + u32 *unused) 267 267 { 268 268 kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap); 269 269 kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages); ··· 298 298 static void dirty_ring_create_vm_done(struct kvm_vm *vm) 299 299 { 300 300 u64 pages; 301 - uint32_t limit; 301 + u32 limit; 302 302 303 303 /* 304 304 * We rely on vcpu exit due to full dirty ring state. Adjust ··· 333 333 smp_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET); 334 334 } 335 335 336 - static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns, 337 - int slot, void *bitmap, 338 - uint32_t num_pages, uint32_t *fetch_index) 336 + static u32 dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns, 337 + int slot, void *bitmap, 338 + u32 num_pages, u32 *fetch_index) 339 339 { 340 340 struct kvm_dirty_gfn *cur; 341 - uint32_t count = 0; 341 + u32 count = 0; 342 342 343 343 while (true) { 344 344 cur = &dirty_gfns[*fetch_index % test_dirty_ring_count]; ··· 359 359 } 360 360 361 361 static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, 362 - void *bitmap, uint32_t num_pages, 363 - uint32_t *ring_buf_idx) 362 + void *bitmap, u32 num_pages, 363 + u32 *ring_buf_idx) 364 364 { 365 - uint32_t count, cleared; 365 + u32 count, cleared; 366 366 367 367 /* Only have one vcpu */ 368 368 count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu), ··· 404 404 void (*create_vm_done)(struct kvm_vm *vm); 405 405 /* Hook to collect the dirty pages into the bitmap provided */ 406 406 void (*collect_dirty_pages) (struct kvm_vcpu *vcpu, int slot, 407 - void *bitmap, uint32_t num_pages, 408 - uint32_t *ring_buf_idx); 407 + void *bitmap, u32 num_pages, 408 + u32 *ring_buf_idx); 409 409 /* Hook to call when after each vcpu run */ 410 410 void (*after_vcpu_run)(struct kvm_vcpu *vcpu); 411 411 } log_modes[LOG_MODE_NUM] = { ··· 459 459 } 460 460 461 461 static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, 462 - void *bitmap, uint32_t num_pages, 463 - uint32_t *ring_buf_idx) 462 + void *bitmap, u32 num_pages, 463 + u32 *ring_buf_idx) 464 464 { 465 465 struct log_mode *mode = &log_modes[host_log_mode]; 466 466 ··· 601 601 struct kvm_vcpu *vcpu; 602 602 struct kvm_vm *vm; 603 603 unsigned long *bmap[2]; 604 - uint32_t ring_buf_idx = 0; 604 + u32 ring_buf_idx = 0; 605 605 int sem_val; 606 606 607 607 if (!log_mode_supported()) {
+3 -3
tools/testing/selftests/kvm/guest_print_test.c
··· 29 29 TYPE(test_type_u64, U64u, "%lu", u64) \ 30 30 TYPE(test_type_x64, U64x, "0x%lx", u64) \ 31 31 TYPE(test_type_X64, U64X, "0x%lX", u64) \ 32 - TYPE(test_type_u32, U32u, "%u", uint32_t) \ 33 - TYPE(test_type_x32, U32x, "0x%x", uint32_t) \ 34 - TYPE(test_type_X32, U32X, "0x%X", uint32_t) \ 32 + TYPE(test_type_u32, U32u, "%u", u32) \ 33 + TYPE(test_type_x32, U32x, "0x%x", u32) \ 34 + TYPE(test_type_X32, U32X, "0x%X", u32) \ 35 35 TYPE(test_type_int, INT, "%d", int) \ 36 36 TYPE(test_type_char, CHAR, "%c", char) \ 37 37 TYPE(test_type_str, STR, "'%s'", const char *) \
+3 -3
tools/testing/selftests/kvm/hardware_disable_test.c
··· 80 80 TEST_ASSERT(r == 0, "%s: failed to join thread", __func__); 81 81 } 82 82 83 - static void run_test(uint32_t run) 83 + static void run_test(u32 run) 84 84 { 85 85 struct kvm_vcpu *vcpu; 86 86 struct kvm_vm *vm; ··· 88 88 pthread_t threads[VCPU_NUM]; 89 89 pthread_t throw_away; 90 90 void *b; 91 - uint32_t i, j; 91 + u32 i, j; 92 92 93 93 CPU_ZERO(&cpu_set); 94 94 for (i = 0; i < VCPU_NUM; i++) ··· 149 149 150 150 int main(int argc, char **argv) 151 151 { 152 - uint32_t i; 152 + u32 i; 153 153 int s, r; 154 154 pid_t pid; 155 155
+5 -5
tools/testing/selftests/kvm/include/arm64/arch_timer.h
··· 26 26 #define cycles_to_usec(cycles) \ 27 27 ((u64)(cycles) * 1000000 / timer_get_cntfrq()) 28 28 29 - static inline uint32_t timer_get_cntfrq(void) 29 + static inline u32 timer_get_cntfrq(void) 30 30 { 31 31 return read_sysreg(cntfrq_el0); 32 32 } ··· 111 111 return 0; 112 112 } 113 113 114 - static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl) 114 + static inline void timer_set_ctl(enum arch_timer timer, u32 ctl) 115 115 { 116 116 switch (timer) { 117 117 case VIRTUAL: ··· 127 127 isb(); 128 128 } 129 129 130 - static inline uint32_t timer_get_ctl(enum arch_timer timer) 130 + static inline u32 timer_get_ctl(enum arch_timer timer) 131 131 { 132 132 switch (timer) { 133 133 case VIRTUAL: ··· 142 142 return 0; 143 143 } 144 144 145 - static inline void timer_set_next_cval_ms(enum arch_timer timer, uint32_t msec) 145 + static inline void timer_set_next_cval_ms(enum arch_timer timer, u32 msec) 146 146 { 147 147 u64 now_ct = timer_get_cntct(timer); 148 148 u64 next_ct = now_ct + msec_to_cycles(msec); ··· 150 150 timer_set_cval(timer, next_ct); 151 151 } 152 152 153 - static inline void timer_set_next_tval_ms(enum arch_timer timer, uint32_t msec) 153 + static inline void timer_set_next_tval_ms(enum arch_timer timer, u32 msec) 154 154 { 155 155 timer_set_tval(timer, msec_to_cycles(msec)); 156 156 }
+1 -1
tools/testing/selftests/kvm/include/arm64/gic.h
··· 49 49 */ 50 50 void gic_set_eoi_split(bool split); 51 51 void gic_set_priority_mask(u64 mask); 52 - void gic_set_priority(uint32_t intid, uint32_t prio); 52 + void gic_set_priority(u32 intid, u32 prio); 53 53 void gic_irq_set_active(unsigned int intid); 54 54 void gic_irq_clear_active(unsigned int intid); 55 55 bool gic_irq_get_active(unsigned int intid);
+5 -5
tools/testing/selftests/kvm/include/arm64/processor.h
··· 128 128 #define PTE_ADDR_51_50_LPA2_SHIFT 8 129 129 130 130 void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init); 131 - struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 131 + struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id, 132 132 struct kvm_vcpu_init *init, void *guest_code); 133 133 134 134 struct ex_regs { ··· 167 167 (v) == VECTOR_SYNC_LOWER_64 || \ 168 168 (v) == VECTOR_SYNC_LOWER_32) 169 169 170 - void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k, 171 - uint32_t *ipa16k, uint32_t *ipa64k); 170 + void aarch64_get_supported_page_sizes(u32 ipa, u32 *ipa4k, 171 + u32 *ipa16k, u32 *ipa64k); 172 172 173 173 void vm_init_descriptor_tables(struct kvm_vm *vm); 174 174 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu); ··· 287 287 * @res: pointer to write the return values from registers x0-x3 288 288 * 289 289 */ 290 - void smccc_hvc(uint32_t function_id, u64 arg0, u64 arg1, 290 + void smccc_hvc(u32 function_id, u64 arg0, u64 arg1, 291 291 u64 arg2, u64 arg3, u64 arg4, u64 arg5, 292 292 u64 arg6, struct arm_smccc_res *res); 293 293 ··· 298 298 * @res: pointer to write the return values from registers x0-x3 299 299 * 300 300 */ 301 - void smccc_smc(uint32_t function_id, u64 arg0, u64 arg1, 301 + void smccc_smc(u32 function_id, u64 arg0, u64 arg1, 302 302 u64 arg2, u64 arg3, u64 arg4, u64 arg5, 303 303 u64 arg6, struct arm_smccc_res *res); 304 304
+8 -8
tools/testing/selftests/kvm/include/arm64/vgic.h
··· 17 17 index) 18 18 19 19 bool kvm_supports_vgic_v3(void); 20 - int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs); 20 + int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs); 21 21 void __vgic_v3_init(int fd); 22 - int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs); 22 + int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs); 23 23 24 24 #define VGIC_MAX_RESERVED 1023 25 25 26 - void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level); 27 - int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level); 26 + void kvm_irq_set_level_info(int gic_fd, u32 intid, int level); 27 + int _kvm_irq_set_level_info(int gic_fd, u32 intid, int level); 28 28 29 - void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level); 30 - int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level); 29 + void kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level); 30 + int _kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level); 31 31 32 32 /* The vcpu arg only applies to private interrupts. */ 33 - void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu); 34 - void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu); 33 + void kvm_irq_write_ispendr(int gic_fd, u32 intid, struct kvm_vcpu *vcpu); 34 + void kvm_irq_write_isactiver(int gic_fd, u32 intid, struct kvm_vcpu *vcpu); 35 35 36 36 #define KVM_IRQCHIP_NUM_PINS (1020 - 32) 37 37
+61 -63
tools/testing/selftests/kvm/include/kvm_util.h
··· 58 58 59 59 struct kvm_vcpu { 60 60 struct list_head list; 61 - uint32_t id; 61 + u32 id; 62 62 int fd; 63 63 struct kvm_vm *vm; 64 64 struct kvm_run *run; ··· 70 70 #endif 71 71 struct kvm_binary_stats stats; 72 72 struct kvm_dirty_gfn *dirty_gfns; 73 - uint32_t fetch_index; 74 - uint32_t dirty_gfns_count; 73 + u32 fetch_index; 74 + u32 dirty_gfns_count; 75 75 }; 76 76 77 77 struct userspace_mem_regions { ··· 113 113 bool has_irqchip; 114 114 gpa_t ucall_mmio_addr; 115 115 gva_t handlers; 116 - uint32_t dirty_ring_size; 116 + u32 dirty_ring_size; 117 117 u64 gpa_tag_mask; 118 118 119 119 /* ··· 132 132 * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE] 133 133 * memslot. 134 134 */ 135 - uint32_t memslots[NR_MEM_REGIONS]; 135 + u32 memslots[NR_MEM_REGIONS]; 136 136 }; 137 137 138 138 struct vcpu_reg_sublist { ··· 164 164 else 165 165 166 166 struct userspace_mem_region * 167 - memslot2region(struct kvm_vm *vm, uint32_t memslot); 167 + memslot2region(struct kvm_vm *vm, u32 memslot); 168 168 169 169 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, 170 170 enum kvm_mem_region_type type) ··· 213 213 }; 214 214 215 215 struct vm_shape { 216 - uint32_t type; 216 + u32 type; 217 217 uint8_t mode; 218 218 uint8_t pad0; 219 219 uint16_t pad1; ··· 404 404 return ret; 405 405 } 406 406 407 - static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, u64 arg0) 407 + static inline int __vm_enable_cap(struct kvm_vm *vm, u32 cap, u64 arg0) 408 408 { 409 409 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 410 410 411 411 return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); 412 412 } 413 413 414 - static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, u64 arg0) 414 + static inline void vm_enable_cap(struct kvm_vm *vm, u32 cap, u64 arg0) 415 415 { 416 416 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 417 417 ··· 466 466 vm_guest_mem_fallocate(vm, gpa, size, false); 467 467 } 468 468 469 - void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size); 470 - const char *vm_guest_mode_string(uint32_t i); 469 + void vm_enable_dirty_ring(struct kvm_vm *vm, u32 ring_size); 470 + const char *vm_guest_mode_string(u32 i); 471 471 472 472 void kvm_vm_free(struct kvm_vm *vmp); 473 473 void kvm_vm_restart(struct kvm_vm *vmp); ··· 485 485 } 486 486 487 487 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, 488 - u64 first_page, uint32_t num_pages) 488 + u64 first_page, u32 num_pages) 489 489 { 490 490 struct kvm_clear_dirty_log args = { 491 491 .dirty_bitmap = log, ··· 497 497 vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args); 498 498 } 499 499 500 - static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) 500 + static inline u32 kvm_vm_reset_dirty_ring(struct kvm_vm *vm) 501 501 { 502 502 return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL); 503 503 } ··· 536 536 return fd; 537 537 } 538 538 539 - static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, 540 - uint32_t flags) 539 + static inline int __kvm_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd, 540 + u32 flags) 541 541 { 542 542 struct kvm_irqfd irqfd = { 543 543 .fd = eventfd, ··· 549 549 return __vm_ioctl(vm, KVM_IRQFD, &irqfd); 550 550 } 551 551 552 - static inline void kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, 553 - uint32_t flags) 552 + static inline void kvm_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd, u32 flags) 554 553 { 555 554 int ret = __kvm_irqfd(vm, gsi, eventfd, flags); 556 555 557 556 TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_IRQFD, ret, vm); 558 557 } 559 558 560 - static inline void kvm_assign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd) 559 + static inline void kvm_assign_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd) 561 560 { 562 561 kvm_irqfd(vm, gsi, eventfd, 0); 563 562 } 564 563 565 - static inline void kvm_deassign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd) 564 + static inline void kvm_deassign_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd) 566 565 { 567 566 kvm_irqfd(vm, gsi, eventfd, KVM_IRQFD_FLAG_DEASSIGN); 568 567 } ··· 684 685 return fd; 685 686 } 686 687 687 - void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 688 + void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags, 688 689 u64 gpa, u64 size, void *hva); 689 - int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 690 + int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags, 690 691 u64 gpa, u64 size, void *hva); 691 - void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 692 + void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags, 692 693 u64 gpa, u64 size, void *hva, 693 - uint32_t guest_memfd, u64 guest_memfd_offset); 694 - int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 694 + u32 guest_memfd, u64 guest_memfd_offset); 695 + int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags, 695 696 u64 gpa, u64 size, void *hva, 696 - uint32_t guest_memfd, u64 guest_memfd_offset); 697 + u32 guest_memfd, u64 guest_memfd_offset); 697 698 698 699 void vm_userspace_mem_region_add(struct kvm_vm *vm, 699 700 enum vm_mem_backing_src_type src_type, 700 - u64 gpa, uint32_t slot, u64 npages, 701 - uint32_t flags); 701 + u64 gpa, u32 slot, u64 npages, u32 flags); 702 702 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, 703 - u64 gpa, uint32_t slot, u64 npages, uint32_t flags, 703 + u64 gpa, u32 slot, u64 npages, u32 flags, 704 704 int guest_memfd_fd, u64 guest_memfd_offset); 705 705 706 706 #ifndef vm_arch_has_protected_memory ··· 709 711 } 710 712 #endif 711 713 712 - void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); 713 - void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot); 714 - void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, u64 new_gpa); 715 - void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); 716 - struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 714 + void vm_mem_region_set_flags(struct kvm_vm *vm, u32 slot, u32 flags); 715 + void vm_mem_region_reload(struct kvm_vm *vm, u32 slot); 716 + void vm_mem_region_move(struct kvm_vm *vm, u32 slot, u64 new_gpa); 717 + void vm_mem_region_delete(struct kvm_vm *vm, u32 slot); 718 + struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id); 717 719 void vm_populate_vaddr_bitmap(struct kvm_vm *vm); 718 720 gva_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, gva_t vaddr_min); 719 721 gva_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, gva_t vaddr_min); ··· 752 754 void vcpu_run_complete_io(struct kvm_vcpu *vcpu); 753 755 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu); 754 756 755 - static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap, 757 + static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, u32 cap, 756 758 u64 arg0) 757 759 { 758 760 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; ··· 880 882 return fd; 881 883 } 882 884 883 - int __kvm_has_device_attr(int dev_fd, uint32_t group, u64 attr); 885 + int __kvm_has_device_attr(int dev_fd, u32 group, u64 attr); 884 886 885 - static inline void kvm_has_device_attr(int dev_fd, uint32_t group, u64 attr) 887 + static inline void kvm_has_device_attr(int dev_fd, u32 group, u64 attr) 886 888 { 887 889 int ret = __kvm_has_device_attr(dev_fd, group, attr); 888 890 889 891 TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno); 890 892 } 891 893 892 - int __kvm_device_attr_get(int dev_fd, uint32_t group, u64 attr, void *val); 894 + int __kvm_device_attr_get(int dev_fd, u32 group, u64 attr, void *val); 893 895 894 - static inline void kvm_device_attr_get(int dev_fd, uint32_t group, 896 + static inline void kvm_device_attr_get(int dev_fd, u32 group, 895 897 u64 attr, void *val) 896 898 { 897 899 int ret = __kvm_device_attr_get(dev_fd, group, attr, val); ··· 899 901 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret)); 900 902 } 901 903 902 - int __kvm_device_attr_set(int dev_fd, uint32_t group, u64 attr, void *val); 904 + int __kvm_device_attr_set(int dev_fd, u32 group, u64 attr, void *val); 903 905 904 - static inline void kvm_device_attr_set(int dev_fd, uint32_t group, 906 + static inline void kvm_device_attr_set(int dev_fd, u32 group, 905 907 u64 attr, void *val) 906 908 { 907 909 int ret = __kvm_device_attr_set(dev_fd, group, attr, val); ··· 909 911 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret)); 910 912 } 911 913 912 - static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, 914 + static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, u32 group, 913 915 u64 attr) 914 916 { 915 917 return __kvm_has_device_attr(vcpu->fd, group, attr); 916 918 } 917 919 918 - static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, 920 + static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, u32 group, 919 921 u64 attr) 920 922 { 921 923 kvm_has_device_attr(vcpu->fd, group, attr); 922 924 } 923 925 924 - static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, 926 + static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, u32 group, 925 927 u64 attr, void *val) 926 928 { 927 929 return __kvm_device_attr_get(vcpu->fd, group, attr, val); 928 930 } 929 931 930 - static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, 932 + static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, u32 group, 931 933 u64 attr, void *val) 932 934 { 933 935 kvm_device_attr_get(vcpu->fd, group, attr, val); 934 936 } 935 937 936 - static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, 938 + static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, u32 group, 937 939 u64 attr, void *val) 938 940 { 939 941 return __kvm_device_attr_set(vcpu->fd, group, attr, val); 940 942 } 941 943 942 - static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, 944 + static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, u32 group, 943 945 u64 attr, void *val) 944 946 { 945 947 kvm_device_attr_set(vcpu->fd, group, attr, val); ··· 977 979 */ 978 980 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...); 979 981 980 - void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); 981 - int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); 982 + void kvm_irq_line(struct kvm_vm *vm, u32 irq, int level); 983 + int _kvm_irq_line(struct kvm_vm *vm, u32 irq, int level); 982 984 983 985 #define KVM_MAX_IRQ_ROUTES 4096 984 986 985 987 struct kvm_irq_routing *kvm_gsi_routing_create(void); 986 988 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, 987 - uint32_t gsi, uint32_t pin); 989 + u32 gsi, u32 pin); 988 990 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); 989 991 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); 990 992 991 993 const char *exit_reason_str(unsigned int exit_reason); 992 994 993 - gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t paddr_min, uint32_t memslot); 995 + gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t paddr_min, u32 memslot); 994 996 gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 995 - gpa_t paddr_min, uint32_t memslot, 997 + gpa_t paddr_min, u32 memslot, 996 998 bool protected); 997 999 gpa_t vm_alloc_page_table(struct kvm_vm *vm); 998 1000 999 1001 static inline gpa_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 1000 - gpa_t paddr_min, uint32_t memslot) 1002 + gpa_t paddr_min, u32 memslot) 1001 1003 { 1002 1004 /* 1003 1005 * By default, allocate memory as protected for VMs that support ··· 1015 1017 * calculate the amount of memory needed for per-vCPU data, e.g. stacks. 1016 1018 */ 1017 1019 struct kvm_vm *____vm_create(struct vm_shape shape); 1018 - struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, 1020 + struct kvm_vm *__vm_create(struct vm_shape shape, u32 nr_runnable_vcpus, 1019 1021 u64 nr_extra_pages); 1020 1022 1021 1023 static inline struct kvm_vm *vm_create_barebones(void) ··· 1033 1035 return ____vm_create(shape); 1034 1036 } 1035 1037 1036 - static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus) 1038 + static inline struct kvm_vm *vm_create(u32 nr_runnable_vcpus) 1037 1039 { 1038 1040 return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0); 1039 1041 } 1040 1042 1041 - struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus, 1043 + struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, u32 nr_vcpus, 1042 1044 u64 extra_mem_pages, 1043 1045 void *guest_code, struct kvm_vcpu *vcpus[]); 1044 1046 1045 - static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus, 1047 + static inline struct kvm_vm *vm_create_with_vcpus(u32 nr_vcpus, 1046 1048 void *guest_code, 1047 1049 struct kvm_vcpu *vcpus[]) 1048 1050 { ··· 1083 1085 1084 1086 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm); 1085 1087 1086 - void kvm_set_files_rlimit(uint32_t nr_vcpus); 1088 + void kvm_set_files_rlimit(u32 nr_vcpus); 1087 1089 1088 1090 int __pin_task_to_cpu(pthread_t task, int cpu); 1089 1091 ··· 1114 1116 } 1115 1117 1116 1118 void kvm_print_vcpu_pinning_help(void); 1117 - void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], 1119 + void kvm_parse_vcpu_pinning(const char *pcpus_string, u32 vcpu_to_pcpu[], 1118 1120 int nr_vcpus); 1119 1121 1120 1122 unsigned long vm_compute_max_gfn(struct kvm_vm *vm); ··· 1170 1172 * vm - Virtual Machine 1171 1173 * vcpu_id - The id of the VCPU to add to the VM. 1172 1174 */ 1173 - struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 1175 + struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id); 1174 1176 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code); 1175 1177 1176 - static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 1178 + static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id, 1177 1179 void *guest_code) 1178 1180 { 1179 1181 struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id); ··· 1184 1186 } 1185 1187 1186 1188 /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */ 1187 - struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id); 1189 + struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, u32 vcpu_id); 1188 1190 1189 1191 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm, 1190 - uint32_t vcpu_id) 1192 + u32 vcpu_id) 1191 1193 { 1192 1194 return vm_arch_vcpu_recreate(vm, vcpu_id); 1193 1195 } ··· 1294 1296 1295 1297 bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t paddr); 1296 1298 1297 - uint32_t guest_get_vcpuid(void); 1299 + u32 guest_get_vcpuid(void); 1298 1300 1299 1301 bool kvm_arch_has_default_irqchip(void); 1300 1302
+5 -5
tools/testing/selftests/kvm/include/memstress.h
··· 35 35 u64 gpa; 36 36 u64 size; 37 37 u64 guest_page_size; 38 - uint32_t random_seed; 39 - uint32_t write_percent; 38 + u32 random_seed; 39 + u32 write_percent; 40 40 41 41 /* Run vCPUs in L2 instead of L1, if the architecture supports it. */ 42 42 bool nested; ··· 45 45 /* True if all vCPUs are pinned to pCPUs */ 46 46 bool pin_vcpus; 47 47 /* The vCPU=>pCPU pinning map. Only valid if pin_vcpus is true. */ 48 - uint32_t vcpu_to_pcpu[KVM_MAX_VCPUS]; 48 + u32 vcpu_to_pcpu[KVM_MAX_VCPUS]; 49 49 50 50 /* Test is done, stop running vCPUs. */ 51 51 bool stop_vcpus; ··· 61 61 bool partition_vcpu_memory_access); 62 62 void memstress_destroy_vm(struct kvm_vm *vm); 63 63 64 - void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent); 64 + void memstress_set_write_percent(struct kvm_vm *vm, u32 write_percent); 65 65 void memstress_set_random_access(struct kvm_vm *vm, bool random_access); 66 66 67 67 void memstress_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct memstress_vcpu_args *)); 68 68 void memstress_join_vcpu_threads(int vcpus); 69 - void memstress_guest_code(uint32_t vcpu_id); 69 + void memstress_guest_code(u32 vcpu_id); 70 70 71 71 u64 memstress_nested_pages(int nr_vcpus); 72 72 void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);
+1 -1
tools/testing/selftests/kvm/include/riscv/arch_timer.h
··· 47 47 csr_clear(CSR_SIE, IE_TIE); 48 48 } 49 49 50 - static inline void timer_set_next_cmp_ms(uint32_t msec) 50 + static inline void timer_set_next_cmp_ms(u32 msec) 51 51 { 52 52 u64 now_ct = timer_get_cycles(); 53 53 u64 next_ct = now_ct + msec_to_cycles(msec);
+10 -10
tools/testing/selftests/kvm/include/test_util.h
··· 109 109 struct timespec timespec_div(struct timespec ts, int divisor); 110 110 111 111 struct guest_random_state { 112 - uint32_t seed; 112 + u32 seed; 113 113 }; 114 114 115 - extern uint32_t guest_random_seed; 115 + extern u32 guest_random_seed; 116 116 extern struct guest_random_state guest_rng; 117 117 118 - struct guest_random_state new_guest_random_state(uint32_t seed); 119 - uint32_t guest_random_u32(struct guest_random_state *state); 118 + struct guest_random_state new_guest_random_state(u32 seed); 119 + u32 guest_random_u32(struct guest_random_state *state); 120 120 121 121 static inline bool __guest_random_bool(struct guest_random_state *state, 122 122 uint8_t percent) ··· 160 160 161 161 struct vm_mem_backing_src_alias { 162 162 const char *name; 163 - uint32_t flag; 163 + u32 flag; 164 164 }; 165 165 166 166 #define MIN_RUN_DELAY_NS 200000UL ··· 168 168 bool thp_configured(void); 169 169 size_t get_trans_hugepagesz(void); 170 170 size_t get_def_hugetlb_pagesz(void); 171 - const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i); 172 - size_t get_backing_src_pagesz(uint32_t i); 173 - bool is_backing_src_hugetlb(uint32_t i); 171 + const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(u32 i); 172 + size_t get_backing_src_pagesz(u32 i); 173 + bool is_backing_src_hugetlb(u32 i); 174 174 void backing_src_help(const char *flag); 175 175 enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name); 176 176 long get_run_delay(void); ··· 217 217 218 218 int atoi_paranoid(const char *num_str); 219 219 220 - static inline uint32_t atoi_positive(const char *name, const char *num_str) 220 + static inline u32 atoi_positive(const char *name, const char *num_str) 221 221 { 222 222 int num = atoi_paranoid(num_str); 223 223 ··· 225 225 return num; 226 226 } 227 227 228 - static inline uint32_t atoi_non_negative(const char *name, const char *num_str) 228 + static inline u32 atoi_non_negative(const char *name, const char *num_str) 229 229 { 230 230 int num = atoi_paranoid(num_str); 231 231
+6 -6
tools/testing/selftests/kvm/include/timer_test.h
··· 18 18 19 19 /* Timer test cmdline parameters */ 20 20 struct test_args { 21 - uint32_t nr_vcpus; 22 - uint32_t nr_iter; 23 - uint32_t timer_period_ms; 24 - uint32_t migration_freq_ms; 25 - uint32_t timer_err_margin_us; 21 + u32 nr_vcpus; 22 + u32 nr_iter; 23 + u32 timer_period_ms; 24 + u32 migration_freq_ms; 25 + u32 timer_err_margin_us; 26 26 /* Members of struct kvm_arm_counter_offset */ 27 27 u64 counter_offset; 28 28 u64 reserved; ··· 30 30 31 31 /* Shared variables between host and guest */ 32 32 struct test_vcpu_shared_data { 33 - uint32_t nr_iter; 33 + u32 nr_iter; 34 34 int guest_stage; 35 35 u64 xcnt; 36 36 };
+5 -5
tools/testing/selftests/kvm/include/x86/apic.h
··· 79 79 void xapic_enable(void); 80 80 void x2apic_enable(void); 81 81 82 - static inline uint32_t get_bsp_flag(void) 82 + static inline u32 get_bsp_flag(void) 83 83 { 84 84 return rdmsr(MSR_IA32_APICBASE) & MSR_IA32_APICBASE_BSP; 85 85 } 86 86 87 - static inline uint32_t xapic_read_reg(unsigned int reg) 87 + static inline u32 xapic_read_reg(unsigned int reg) 88 88 { 89 - return ((volatile uint32_t *)APIC_DEFAULT_GPA)[reg >> 2]; 89 + return ((volatile u32 *)APIC_DEFAULT_GPA)[reg >> 2]; 90 90 } 91 91 92 - static inline void xapic_write_reg(unsigned int reg, uint32_t val) 92 + static inline void xapic_write_reg(unsigned int reg, u32 val) 93 93 { 94 - ((volatile uint32_t *)APIC_DEFAULT_GPA)[reg >> 2] = val; 94 + ((volatile u32 *)APIC_DEFAULT_GPA)[reg >> 2] = val; 95 95 } 96 96 97 97 static inline u64 x2apic_read_reg(unsigned int reg)
+1 -1
tools/testing/selftests/kvm/include/x86/evmcs.h
··· 11 11 #include "vmx.h" 12 12 13 13 #define u16 uint16_t 14 - #define u32 uint32_t 14 + #define u32 u32 15 15 #define u64 u64 16 16 17 17 #define EVMCS_VERSION 1
+50 -50
tools/testing/selftests/kvm/include/x86/processor.h
··· 403 403 uint16_t base0; 404 404 unsigned base1:8, type:4, s:1, dpl:2, p:1; 405 405 unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8; 406 - uint32_t base3; 407 - uint32_t zero1; 406 + u32 base3; 407 + u32 zero1; 408 408 } __attribute__((packed)); 409 409 410 410 struct desc_ptr { ··· 437 437 438 438 static inline u64 rdtsc(void) 439 439 { 440 - uint32_t eax, edx; 440 + u32 eax, edx; 441 441 u64 tsc_val; 442 442 /* 443 443 * The lfence is to wait (on Intel CPUs) until all previous ··· 450 450 return tsc_val; 451 451 } 452 452 453 - static inline u64 rdtscp(uint32_t *aux) 453 + static inline u64 rdtscp(u32 *aux) 454 454 { 455 - uint32_t eax, edx; 455 + u32 eax, edx; 456 456 457 457 __asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux)); 458 458 return ((u64)edx) << 32 | eax; 459 459 } 460 460 461 - static inline u64 rdmsr(uint32_t msr) 461 + static inline u64 rdmsr(u32 msr) 462 462 { 463 - uint32_t a, d; 463 + u32 a, d; 464 464 465 465 __asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory"); 466 466 467 467 return a | ((u64)d << 32); 468 468 } 469 469 470 - static inline void wrmsr(uint32_t msr, u64 value) 470 + static inline void wrmsr(u32 msr, u64 value) 471 471 { 472 - uint32_t a = value; 473 - uint32_t d = value >> 32; 472 + u32 a = value; 473 + u32 d = value >> 32; 474 474 475 475 __asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory"); 476 476 } ··· 651 651 return idt; 652 652 } 653 653 654 - static inline void outl(uint16_t port, uint32_t value) 654 + static inline void outl(uint16_t port, u32 value) 655 655 { 656 656 __asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value)); 657 657 } 658 658 659 - static inline void __cpuid(uint32_t function, uint32_t index, 660 - uint32_t *eax, uint32_t *ebx, 661 - uint32_t *ecx, uint32_t *edx) 659 + static inline void __cpuid(u32 function, u32 index, 660 + u32 *eax, u32 *ebx, 661 + u32 *ecx, u32 *edx) 662 662 { 663 663 *eax = function; 664 664 *ecx = index; ··· 672 672 : "memory"); 673 673 } 674 674 675 - static inline void cpuid(uint32_t function, 676 - uint32_t *eax, uint32_t *ebx, 677 - uint32_t *ecx, uint32_t *edx) 675 + static inline void cpuid(u32 function, 676 + u32 *eax, u32 *ebx, 677 + u32 *ecx, u32 *edx) 678 678 { 679 679 return __cpuid(function, 0, eax, ebx, ecx, edx); 680 680 } 681 681 682 - static inline uint32_t this_cpu_fms(void) 682 + static inline u32 this_cpu_fms(void) 683 683 { 684 - uint32_t eax, ebx, ecx, edx; 684 + u32 eax, ebx, ecx, edx; 685 685 686 686 cpuid(1, &eax, &ebx, &ecx, &edx); 687 687 return eax; 688 688 } 689 689 690 - static inline uint32_t this_cpu_family(void) 690 + static inline u32 this_cpu_family(void) 691 691 { 692 692 return x86_family(this_cpu_fms()); 693 693 } 694 694 695 - static inline uint32_t this_cpu_model(void) 695 + static inline u32 this_cpu_model(void) 696 696 { 697 697 return x86_model(this_cpu_fms()); 698 698 } 699 699 700 700 static inline bool this_cpu_vendor_string_is(const char *vendor) 701 701 { 702 - const uint32_t *chunk = (const uint32_t *)vendor; 703 - uint32_t eax, ebx, ecx, edx; 702 + const u32 *chunk = (const u32 *)vendor; 703 + u32 eax, ebx, ecx, edx; 704 704 705 705 cpuid(0, &eax, &ebx, &ecx, &edx); 706 706 return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]); ··· 724 724 return this_cpu_vendor_string_is("HygonGenuine"); 725 725 } 726 726 727 - static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index, 728 - uint8_t reg, uint8_t lo, uint8_t hi) 727 + static inline u32 __this_cpu_has(u32 function, u32 index, 728 + uint8_t reg, uint8_t lo, uint8_t hi) 729 729 { 730 - uint32_t gprs[4]; 730 + u32 gprs[4]; 731 731 732 732 __cpuid(function, index, 733 733 &gprs[KVM_CPUID_EAX], &gprs[KVM_CPUID_EBX], ··· 742 742 feature.reg, feature.bit, feature.bit); 743 743 } 744 744 745 - static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property) 745 + static inline u32 this_cpu_property(struct kvm_x86_cpu_property property) 746 746 { 747 747 return __this_cpu_has(property.function, property.index, 748 748 property.reg, property.lo_bit, property.hi_bit); ··· 750 750 751 751 static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property) 752 752 { 753 - uint32_t max_leaf; 753 + u32 max_leaf; 754 754 755 755 switch (property.function & 0xc0000000) { 756 756 case 0: ··· 770 770 771 771 static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature) 772 772 { 773 - uint32_t nr_bits; 773 + u32 nr_bits; 774 774 775 775 if (feature.f.reg == KVM_CPUID_EBX) { 776 776 nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); ··· 898 898 899 899 const struct kvm_msr_list *kvm_get_msr_index_list(void); 900 900 const struct kvm_msr_list *kvm_get_feature_msr_index_list(void); 901 - bool kvm_msr_is_in_save_restore_list(uint32_t msr_index); 901 + bool kvm_msr_is_in_save_restore_list(u32 msr_index); 902 902 u64 kvm_get_feature_msr(u64 msr_index); 903 903 904 904 static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu, ··· 954 954 } 955 955 956 956 const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, 957 - uint32_t function, uint32_t index); 957 + u32 function, u32 index); 958 958 const struct kvm_cpuid2 *kvm_get_supported_cpuid(void); 959 959 960 - static inline uint32_t kvm_cpu_fms(void) 960 + static inline u32 kvm_cpu_fms(void) 961 961 { 962 962 return get_cpuid_entry(kvm_get_supported_cpuid(), 0x1, 0)->eax; 963 963 } 964 964 965 - static inline uint32_t kvm_cpu_family(void) 965 + static inline u32 kvm_cpu_family(void) 966 966 { 967 967 return x86_family(kvm_cpu_fms()); 968 968 } 969 969 970 - static inline uint32_t kvm_cpu_model(void) 970 + static inline u32 kvm_cpu_model(void) 971 971 { 972 972 return x86_model(kvm_cpu_fms()); 973 973 } ··· 980 980 return kvm_cpuid_has(kvm_get_supported_cpuid(), feature); 981 981 } 982 982 983 - uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, 984 - struct kvm_x86_cpu_property property); 983 + u32 kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, 984 + struct kvm_x86_cpu_property property); 985 985 986 - static inline uint32_t kvm_cpu_property(struct kvm_x86_cpu_property property) 986 + static inline u32 kvm_cpu_property(struct kvm_x86_cpu_property property) 987 987 { 988 988 return kvm_cpuid_property(kvm_get_supported_cpuid(), property); 989 989 } 990 990 991 991 static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property) 992 992 { 993 - uint32_t max_leaf; 993 + u32 max_leaf; 994 994 995 995 switch (property.function & 0xc0000000) { 996 996 case 0: ··· 1010 1010 1011 1011 static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature) 1012 1012 { 1013 - uint32_t nr_bits; 1013 + u32 nr_bits; 1014 1014 1015 1015 if (feature.f.reg == KVM_CPUID_EBX) { 1016 1016 nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); ··· 1062 1062 } 1063 1063 1064 1064 static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu, 1065 - uint32_t function, 1066 - uint32_t index) 1065 + u32 function, 1066 + u32 index) 1067 1067 { 1068 1068 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first (or equivalent)"); 1069 1069 ··· 1074 1074 } 1075 1075 1076 1076 static inline struct kvm_cpuid_entry2 *vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu, 1077 - uint32_t function) 1077 + u32 function) 1078 1078 { 1079 1079 return __vcpu_get_cpuid_entry(vcpu, function, 0); 1080 1080 } ··· 1104 1104 1105 1105 void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu, 1106 1106 struct kvm_x86_cpu_property property, 1107 - uint32_t value); 1107 + u32 value); 1108 1108 void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr); 1109 1109 1110 - void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function); 1110 + void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, u32 function); 1111 1111 1112 1112 static inline bool vcpu_cpuid_has(struct kvm_vcpu *vcpu, 1113 1113 struct kvm_x86_cpu_feature feature) ··· 1161 1161 * is changing, etc. This is NOT an exhaustive list! The intent is to filter 1162 1162 * out MSRs that are not durable _and_ that a selftest wants to write. 1163 1163 */ 1164 - static inline bool is_durable_msr(uint32_t msr) 1164 + static inline bool is_durable_msr(u32 msr) 1165 1165 { 1166 1166 return msr != MSR_IA32_TSC; 1167 1167 } ··· 1203 1203 uint16_t dpl : 2; 1204 1204 uint16_t p : 1; 1205 1205 uint16_t offset1; 1206 - uint32_t offset2; uint32_t reserved; 1206 + u32 offset2; u32 reserved; 1207 1207 }; 1208 1208 1209 1209 void vm_install_exception_handler(struct kvm_vm *vm, int vector, ··· 1307 1307 }) 1308 1308 1309 1309 #define BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP) \ 1310 - static inline uint8_t insn##_safe ##_fep(uint32_t idx, u64 *val) \ 1310 + static inline uint8_t insn##_safe ##_fep(u32 idx, u64 *val) \ 1311 1311 { \ 1312 1312 u64 error_code; \ 1313 1313 uint8_t vector; \ 1314 - uint32_t a, d; \ 1314 + u32 a, d; \ 1315 1315 \ 1316 1316 asm volatile(KVM_ASM_SAFE##_FEP(#insn) \ 1317 1317 : "=a"(a), "=d"(d), \ ··· 1335 1335 BUILD_READ_U64_SAFE_HELPERS(rdpmc) 1336 1336 BUILD_READ_U64_SAFE_HELPERS(xgetbv) 1337 1337 1338 - static inline uint8_t wrmsr_safe(uint32_t msr, u64 val) 1338 + static inline uint8_t wrmsr_safe(u32 msr, u64 val) 1339 1339 { 1340 1340 return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr)); 1341 1341 } 1342 1342 1343 - static inline uint8_t xsetbv_safe(uint32_t index, u64 value) 1343 + static inline uint8_t xsetbv_safe(u32 index, u64 value) 1344 1344 { 1345 1345 u32 eax = value; 1346 1346 u32 edx = value >> 32;
+2 -2
tools/testing/selftests/kvm/include/x86/sev.h
··· 46 46 return is_sev_es_vm(vm) || vm->type == KVM_X86_SEV_VM; 47 47 } 48 48 49 - void sev_vm_launch(struct kvm_vm *vm, uint32_t policy); 49 + void sev_vm_launch(struct kvm_vm *vm, u32 policy); 50 50 void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement); 51 51 void sev_vm_launch_finish(struct kvm_vm *vm); 52 52 void snp_vm_launch_start(struct kvm_vm *vm, u64 policy); 53 53 void snp_vm_launch_update(struct kvm_vm *vm); 54 54 void snp_vm_launch_finish(struct kvm_vm *vm); 55 55 56 - struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code, 56 + struct kvm_vm *vm_sev_create_with_one_vcpu(u32 type, void *guest_code, 57 57 struct kvm_vcpu **cpu); 58 58 void vm_sev_launch(struct kvm_vm *vm, u64 policy, uint8_t *measurement); 59 59
+3 -3
tools/testing/selftests/kvm/include/x86/vmx.h
··· 285 285 }; 286 286 287 287 struct vmx_msr_entry { 288 - uint32_t index; 289 - uint32_t reserved; 288 + u32 index; 289 + u32 reserved; 290 290 u64 value; 291 291 } __attribute__ ((aligned(16))); 292 292 ··· 490 490 return ret; 491 491 } 492 492 493 - static inline uint32_t vmcs_revision(void) 493 + static inline u32 vmcs_revision(void) 494 494 { 495 495 return rdmsr(MSR_IA32_VMX_BASIC); 496 496 }
+1 -1
tools/testing/selftests/kvm/kvm_page_table_test.c
··· 63 63 static enum test_stage guest_test_stage; 64 64 65 65 /* Host variables */ 66 - static uint32_t nr_vcpus = 1; 66 + static u32 nr_vcpus = 1; 67 67 static struct test_args test_args; 68 68 static enum test_stage *current_stage; 69 69 static bool host_quit;
+1 -1
tools/testing/selftests/kvm/lib/arm64/gic.c
··· 50 50 51 51 void gic_init(enum gic_type type, unsigned int nr_cpus) 52 52 { 53 - uint32_t cpu = guest_get_vcpuid(); 53 + u32 cpu = guest_get_vcpuid(); 54 54 55 55 GUEST_ASSERT(type < GIC_TYPE_MAX); 56 56 GUEST_ASSERT(nr_cpus);
+11 -11
tools/testing/selftests/kvm/lib/arm64/gic_private.h
··· 13 13 void (*gic_irq_enable)(unsigned int intid); 14 14 void (*gic_irq_disable)(unsigned int intid); 15 15 u64 (*gic_read_iar)(void); 16 - void (*gic_write_eoir)(uint32_t irq); 17 - void (*gic_write_dir)(uint32_t irq); 16 + void (*gic_write_eoir)(u32 irq); 17 + void (*gic_write_dir)(u32 irq); 18 18 void (*gic_set_eoi_split)(bool split); 19 19 void (*gic_set_priority_mask)(u64 mask); 20 - void (*gic_set_priority)(uint32_t intid, uint32_t prio); 21 - void (*gic_irq_set_active)(uint32_t intid); 22 - void (*gic_irq_clear_active)(uint32_t intid); 23 - bool (*gic_irq_get_active)(uint32_t intid); 24 - void (*gic_irq_set_pending)(uint32_t intid); 25 - void (*gic_irq_clear_pending)(uint32_t intid); 26 - bool (*gic_irq_get_pending)(uint32_t intid); 27 - void (*gic_irq_set_config)(uint32_t intid, bool is_edge); 28 - void (*gic_irq_set_group)(uint32_t intid, bool group); 20 + void (*gic_set_priority)(u32 intid, u32 prio); 21 + void (*gic_irq_set_active)(u32 intid); 22 + void (*gic_irq_clear_active)(u32 intid); 23 + bool (*gic_irq_get_active)(u32 intid); 24 + void (*gic_irq_set_pending)(u32 intid); 25 + void (*gic_irq_clear_pending)(u32 intid); 26 + bool (*gic_irq_get_pending)(u32 intid); 27 + void (*gic_irq_set_config)(u32 intid, bool is_edge); 28 + void (*gic_irq_set_group)(u32 intid, bool group); 29 29 }; 30 30 31 31 extern const struct gic_common_ops gicv3_ops;
+40 -40
tools/testing/selftests/kvm/lib/arm64/gic_v3.c
··· 50 50 } 51 51 } 52 52 53 - static inline volatile void *gicr_base_cpu(uint32_t cpu) 53 + static inline volatile void *gicr_base_cpu(u32 cpu) 54 54 { 55 55 /* Align all the redistributors sequentially */ 56 56 return GICR_BASE_GVA + cpu * SZ_64K * 2; 57 57 } 58 58 59 - static void gicv3_gicr_wait_for_rwp(uint32_t cpu) 59 + static void gicv3_gicr_wait_for_rwp(u32 cpu) 60 60 { 61 61 unsigned int count = 100000; /* 1s */ 62 62 ··· 66 66 } 67 67 } 68 68 69 - static void gicv3_wait_for_rwp(uint32_t cpu_or_dist) 69 + static void gicv3_wait_for_rwp(u32 cpu_or_dist) 70 70 { 71 71 if (cpu_or_dist & DIST_BIT) 72 72 gicv3_gicd_wait_for_rwp(); ··· 99 99 return irqstat; 100 100 } 101 101 102 - static void gicv3_write_eoir(uint32_t irq) 102 + static void gicv3_write_eoir(u32 irq) 103 103 { 104 104 write_sysreg_s(irq, SYS_ICC_EOIR1_EL1); 105 105 isb(); 106 106 } 107 107 108 - static void gicv3_write_dir(uint32_t irq) 108 + static void gicv3_write_dir(u32 irq) 109 109 { 110 110 write_sysreg_s(irq, SYS_ICC_DIR_EL1); 111 111 isb(); ··· 118 118 119 119 static void gicv3_set_eoi_split(bool split) 120 120 { 121 - uint32_t val; 121 + u32 val; 122 122 123 123 /* 124 124 * All other fields are read-only, so no need to read CTLR first. In ··· 129 129 isb(); 130 130 } 131 131 132 - uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, u64 offset) 132 + u32 gicv3_reg_readl(u32 cpu_or_dist, u64 offset) 133 133 { 134 134 volatile void *base = cpu_or_dist & DIST_BIT ? GICD_BASE_GVA 135 135 : sgi_base_from_redist(gicr_base_cpu(cpu_or_dist)); 136 136 return readl(base + offset); 137 137 } 138 138 139 - void gicv3_reg_writel(uint32_t cpu_or_dist, u64 offset, uint32_t reg_val) 139 + void gicv3_reg_writel(u32 cpu_or_dist, u64 offset, u32 reg_val) 140 140 { 141 141 volatile void *base = cpu_or_dist & DIST_BIT ? GICD_BASE_GVA 142 142 : sgi_base_from_redist(gicr_base_cpu(cpu_or_dist)); 143 143 writel(reg_val, base + offset); 144 144 } 145 145 146 - uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, u64 offset, uint32_t mask) 146 + u32 gicv3_getl_fields(u32 cpu_or_dist, u64 offset, u32 mask) 147 147 { 148 148 return gicv3_reg_readl(cpu_or_dist, offset) & mask; 149 149 } 150 150 151 - void gicv3_setl_fields(uint32_t cpu_or_dist, u64 offset, 152 - uint32_t mask, uint32_t reg_val) 151 + void gicv3_setl_fields(u32 cpu_or_dist, u64 offset, 152 + u32 mask, u32 reg_val) 153 153 { 154 - uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask; 154 + u32 tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask; 155 155 156 156 tmp |= (reg_val & mask); 157 157 gicv3_reg_writel(cpu_or_dist, offset, tmp); ··· 165 165 * map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being 166 166 * marked as "Reserved" in the Distributor map. 167 167 */ 168 - static void gicv3_access_reg(uint32_t intid, u64 offset, 169 - uint32_t reg_bits, uint32_t bits_per_field, 170 - bool write, uint32_t *val) 168 + static void gicv3_access_reg(u32 intid, u64 offset, 169 + u32 reg_bits, u32 bits_per_field, 170 + bool write, u32 *val) 171 171 { 172 - uint32_t cpu = guest_get_vcpuid(); 172 + u32 cpu = guest_get_vcpuid(); 173 173 enum gicv3_intid_range intid_range = get_intid_range(intid); 174 - uint32_t fields_per_reg, index, mask, shift; 175 - uint32_t cpu_or_dist; 174 + u32 fields_per_reg, index, mask, shift; 175 + u32 cpu_or_dist; 176 176 177 177 GUEST_ASSERT(bits_per_field <= reg_bits); 178 178 GUEST_ASSERT(!write || *val < (1U << bits_per_field)); ··· 197 197 *val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift; 198 198 } 199 199 200 - static void gicv3_write_reg(uint32_t intid, u64 offset, 201 - uint32_t reg_bits, uint32_t bits_per_field, uint32_t val) 200 + static void gicv3_write_reg(u32 intid, u64 offset, 201 + u32 reg_bits, u32 bits_per_field, u32 val) 202 202 { 203 203 gicv3_access_reg(intid, offset, reg_bits, 204 204 bits_per_field, true, &val); 205 205 } 206 206 207 - static uint32_t gicv3_read_reg(uint32_t intid, u64 offset, 208 - uint32_t reg_bits, uint32_t bits_per_field) 207 + static u32 gicv3_read_reg(u32 intid, u64 offset, 208 + u32 reg_bits, u32 bits_per_field) 209 209 { 210 - uint32_t val; 210 + u32 val; 211 211 212 212 gicv3_access_reg(intid, offset, reg_bits, 213 213 bits_per_field, false, &val); 214 214 return val; 215 215 } 216 216 217 - static void gicv3_set_priority(uint32_t intid, uint32_t prio) 217 + static void gicv3_set_priority(u32 intid, u32 prio) 218 218 { 219 219 gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio); 220 220 } 221 221 222 222 /* Sets the intid to be level-sensitive or edge-triggered. */ 223 - static void gicv3_irq_set_config(uint32_t intid, bool is_edge) 223 + static void gicv3_irq_set_config(u32 intid, bool is_edge) 224 224 { 225 - uint32_t val; 225 + u32 val; 226 226 227 227 /* N/A for private interrupts. */ 228 228 GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE); ··· 230 230 gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val); 231 231 } 232 232 233 - static void gicv3_irq_enable(uint32_t intid) 233 + static void gicv3_irq_enable(u32 intid) 234 234 { 235 235 bool is_spi = get_intid_range(intid) == SPI_RANGE; 236 - uint32_t cpu = guest_get_vcpuid(); 236 + u32 cpu = guest_get_vcpuid(); 237 237 238 238 gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1); 239 239 gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu); 240 240 } 241 241 242 - static void gicv3_irq_disable(uint32_t intid) 242 + static void gicv3_irq_disable(u32 intid) 243 243 { 244 244 bool is_spi = get_intid_range(intid) == SPI_RANGE; 245 - uint32_t cpu = guest_get_vcpuid(); 245 + u32 cpu = guest_get_vcpuid(); 246 246 247 247 gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1); 248 248 gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu); 249 249 } 250 250 251 - static void gicv3_irq_set_active(uint32_t intid) 251 + static void gicv3_irq_set_active(u32 intid) 252 252 { 253 253 gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1); 254 254 } 255 255 256 - static void gicv3_irq_clear_active(uint32_t intid) 256 + static void gicv3_irq_clear_active(u32 intid) 257 257 { 258 258 gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1); 259 259 } 260 260 261 - static bool gicv3_irq_get_active(uint32_t intid) 261 + static bool gicv3_irq_get_active(u32 intid) 262 262 { 263 263 return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1); 264 264 } 265 265 266 - static void gicv3_irq_set_pending(uint32_t intid) 266 + static void gicv3_irq_set_pending(u32 intid) 267 267 { 268 268 gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1); 269 269 } 270 270 271 - static void gicv3_irq_clear_pending(uint32_t intid) 271 + static void gicv3_irq_clear_pending(u32 intid) 272 272 { 273 273 gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1); 274 274 } 275 275 276 - static bool gicv3_irq_get_pending(uint32_t intid) 276 + static bool gicv3_irq_get_pending(u32 intid) 277 277 { 278 278 return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1); 279 279 } 280 280 281 281 static void gicv3_enable_redist(volatile void *redist_base) 282 282 { 283 - uint32_t val = readl(redist_base + GICR_WAKER); 283 + u32 val = readl(redist_base + GICR_WAKER); 284 284 unsigned int count = 100000; /* 1s */ 285 285 286 286 val &= ~GICR_WAKER_ProcessorSleep; ··· 293 293 } 294 294 } 295 295 296 - static void gicv3_set_group(uint32_t intid, bool grp) 296 + static void gicv3_set_group(u32 intid, bool grp) 297 297 { 298 - uint32_t cpu_or_dist; 299 - uint32_t val; 298 + u32 cpu_or_dist; 299 + u32 val; 300 300 301 301 cpu_or_dist = (get_intid_range(intid) == SPI_RANGE) ? DIST_BIT : guest_get_vcpuid(); 302 302 val = gicv3_reg_readl(cpu_or_dist, GICD_IGROUPR + (intid / 32) * 4);
+11 -11
tools/testing/selftests/kvm/lib/arm64/processor.c
··· 413 413 vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (u64)guest_code); 414 414 } 415 415 416 - static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 416 + static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id, 417 417 struct kvm_vcpu_init *init) 418 418 { 419 419 size_t stack_size; ··· 432 432 return vcpu; 433 433 } 434 434 435 - struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 435 + struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id, 436 436 struct kvm_vcpu_init *init, void *guest_code) 437 437 { 438 438 struct kvm_vcpu *vcpu = __aarch64_vcpu_add(vm, vcpu_id, init); ··· 442 442 return vcpu; 443 443 } 444 444 445 - struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) 445 + struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) 446 446 { 447 447 return __aarch64_vcpu_add(vm, vcpu_id, NULL); 448 448 } ··· 563 563 handlers->exception_handlers[vector][0] = handler; 564 564 } 565 565 566 - uint32_t guest_get_vcpuid(void) 566 + u32 guest_get_vcpuid(void) 567 567 { 568 568 return read_sysreg(tpidr_el1); 569 569 } 570 570 571 - static uint32_t max_ipa_for_page_size(uint32_t vm_ipa, uint32_t gran, 572 - uint32_t not_sup_val, uint32_t ipa52_min_val) 571 + static u32 max_ipa_for_page_size(u32 vm_ipa, u32 gran, 572 + u32 not_sup_val, u32 ipa52_min_val) 573 573 { 574 574 if (gran == not_sup_val) 575 575 return 0; ··· 579 579 return min(vm_ipa, 48U); 580 580 } 581 581 582 - void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k, 583 - uint32_t *ipa16k, uint32_t *ipa64k) 582 + void aarch64_get_supported_page_sizes(u32 ipa, u32 *ipa4k, 583 + u32 *ipa16k, u32 *ipa64k) 584 584 { 585 585 struct kvm_vcpu_init preferred_init; 586 586 int kvm_fd, vm_fd, vcpu_fd, err; 587 587 u64 val; 588 - uint32_t gran; 588 + u32 gran; 589 589 struct kvm_one_reg reg = { 590 590 .id = KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1), 591 591 .addr = (u64)&val, ··· 646 646 : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7") 647 647 648 648 649 - void smccc_hvc(uint32_t function_id, u64 arg0, u64 arg1, 649 + void smccc_hvc(u32 function_id, u64 arg0, u64 arg1, 650 650 u64 arg2, u64 arg3, u64 arg4, u64 arg5, 651 651 u64 arg6, struct arm_smccc_res *res) 652 652 { ··· 654 654 arg6, res); 655 655 } 656 656 657 - void smccc_smc(uint32_t function_id, u64 arg0, u64 arg1, 657 + void smccc_smc(u32 function_id, u64 arg0, u64 arg1, 658 658 u64 arg2, u64 arg3, u64 arg4, u64 arg5, 659 659 u64 arg6, struct arm_smccc_res *res) 660 660 {
+11 -11
tools/testing/selftests/kvm/lib/arm64/vgic.c
··· 41 41 * redistributor regions of the guest. Since it depends on the number of 42 42 * vCPUs for the VM, it must be called after all the vCPUs have been created. 43 43 */ 44 - int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs) 44 + int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs) 45 45 { 46 46 int gic_fd; 47 47 u64 attr; ··· 77 77 KVM_DEV_ARM_VGIC_CTRL_INIT, NULL); 78 78 } 79 79 80 - int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs) 80 + int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs) 81 81 { 82 82 unsigned int nr_vcpus_created = 0; 83 83 struct list_head *iter; ··· 104 104 } 105 105 106 106 /* should only work for level sensitive interrupts */ 107 - int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level) 107 + int _kvm_irq_set_level_info(int gic_fd, u32 intid, int level) 108 108 { 109 109 u64 attr = 32 * (intid / 32); 110 110 u64 index = intid % 32; ··· 122 122 return ret; 123 123 } 124 124 125 - void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level) 125 + void kvm_irq_set_level_info(int gic_fd, u32 intid, int level) 126 126 { 127 127 int ret = _kvm_irq_set_level_info(gic_fd, intid, level); 128 128 129 129 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, ret)); 130 130 } 131 131 132 - int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) 132 + int _kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level) 133 133 { 134 - uint32_t irq = intid & KVM_ARM_IRQ_NUM_MASK; 134 + u32 irq = intid & KVM_ARM_IRQ_NUM_MASK; 135 135 136 136 TEST_ASSERT(!INTID_IS_SGI(intid), "KVM_IRQ_LINE's interface itself " 137 137 "doesn't allow injecting SGIs. There's no mask for it."); ··· 144 144 return _kvm_irq_line(vm, irq, level); 145 145 } 146 146 147 - void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) 147 + void kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level) 148 148 { 149 149 int ret = _kvm_arm_irq_line(vm, intid, level); 150 150 151 151 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret)); 152 152 } 153 153 154 - static void vgic_poke_irq(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu, 154 + static void vgic_poke_irq(int gic_fd, u32 intid, struct kvm_vcpu *vcpu, 155 155 u64 reg_off) 156 156 { 157 157 u64 reg = intid / 32; ··· 160 160 u64 val; 161 161 bool intid_is_private = INTID_IS_SGI(intid) || INTID_IS_PPI(intid); 162 162 163 - uint32_t group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 163 + u32 group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 164 164 : KVM_DEV_ARM_VGIC_GRP_DIST_REGS; 165 165 166 166 if (intid_is_private) { ··· 183 183 kvm_device_attr_set(gic_fd, group, attr, &val); 184 184 } 185 185 186 - void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu) 186 + void kvm_irq_write_ispendr(int gic_fd, u32 intid, struct kvm_vcpu *vcpu) 187 187 { 188 188 vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISPENDR); 189 189 } 190 190 191 - void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu) 191 + void kvm_irq_write_isactiver(int gic_fd, u32 intid, struct kvm_vcpu *vcpu) 192 192 { 193 193 vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISACTIVER); 194 194 }
+1 -1
tools/testing/selftests/kvm/lib/guest_modes.c
··· 20 20 #ifdef __aarch64__ 21 21 { 22 22 unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE); 23 - uint32_t ipa4k, ipa16k, ipa64k; 23 + u32 ipa4k, ipa16k, ipa64k; 24 24 int i; 25 25 26 26 aarch64_get_supported_page_sizes(limit, &ipa4k, &ipa16k, &ipa64k);
+3 -3
tools/testing/selftests/kvm/lib/guest_sprintf.c
··· 35 35 ({ \ 36 36 int __res; \ 37 37 \ 38 - __res = ((u64)n) % (uint32_t) base; \ 39 - n = ((u64)n) / (uint32_t) base; \ 38 + __res = ((u64)n) % (u32)base; \ 39 + n = ((u64)n) / (u32)base; \ 40 40 __res; \ 41 41 }) 42 42 ··· 292 292 } else if (flags & SIGN) 293 293 num = va_arg(args, int); 294 294 else 295 - num = va_arg(args, uint32_t); 295 + num = va_arg(args, u32); 296 296 str = number(str, end, num, base, field_width, precision, flags); 297 297 } 298 298
+38 -39
tools/testing/selftests/kvm/lib/kvm_util.c
··· 20 20 21 21 #define KVM_UTIL_MIN_PFN 2 22 22 23 - uint32_t guest_random_seed; 23 + u32 guest_random_seed; 24 24 struct guest_random_state guest_rng; 25 - static uint32_t last_guest_seed; 25 + static u32 last_guest_seed; 26 26 27 27 static size_t vcpu_mmap_sz(void); 28 28 ··· 165 165 return (unsigned int)ret; 166 166 } 167 167 168 - void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) 168 + void vm_enable_dirty_ring(struct kvm_vm *vm, u32 ring_size) 169 169 { 170 170 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) 171 171 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); ··· 189 189 vm->stats.fd = -1; 190 190 } 191 191 192 - const char *vm_guest_mode_string(uint32_t i) 192 + const char *vm_guest_mode_string(u32 i) 193 193 { 194 194 static const char * const strings[] = { 195 195 [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages", ··· 397 397 } 398 398 399 399 static u64 vm_nr_pages_required(enum vm_guest_mode mode, 400 - uint32_t nr_runnable_vcpus, 400 + u32 nr_runnable_vcpus, 401 401 u64 extra_mem_pages) 402 402 { 403 403 u64 page_size = vm_guest_mode_params[mode].page_size; ··· 435 435 return vm_adjust_num_guest_pages(mode, nr_pages); 436 436 } 437 437 438 - void kvm_set_files_rlimit(uint32_t nr_vcpus) 438 + void kvm_set_files_rlimit(u32 nr_vcpus) 439 439 { 440 440 /* 441 441 * Each vCPU will open two file descriptors: the vCPU itself and the ··· 476 476 #endif 477 477 } 478 478 479 - struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, 479 + struct kvm_vm *__vm_create(struct vm_shape shape, u32 nr_runnable_vcpus, 480 480 u64 nr_extra_pages) 481 481 { 482 482 u64 nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus, ··· 546 546 * extra_mem_pages is only used to calculate the maximum page table size, 547 547 * no real memory allocation for non-slot0 memory in this function. 548 548 */ 549 - struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus, 549 + struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, u32 nr_vcpus, 550 550 u64 extra_mem_pages, 551 551 void *guest_code, struct kvm_vcpu *vcpus[]) 552 552 { ··· 614 614 } 615 615 616 616 __weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, 617 - uint32_t vcpu_id) 617 + u32 vcpu_id) 618 618 { 619 619 return __vm_vcpu_add(vm, vcpu_id); 620 620 } ··· 636 636 return pthread_setaffinity_np(task, sizeof(cpuset), &cpuset); 637 637 } 638 638 639 - static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask) 639 + static u32 parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask) 640 640 { 641 - uint32_t pcpu = atoi_non_negative("CPU number", cpu_str); 641 + u32 pcpu = atoi_non_negative("CPU number", cpu_str); 642 642 643 643 TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask), 644 644 "Not allowed to run on pCPU '%d', check cgroups?", pcpu); ··· 662 662 " (default: no pinning)\n", name, name); 663 663 } 664 664 665 - void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], 665 + void kvm_parse_vcpu_pinning(const char *pcpus_string, u32 vcpu_to_pcpu[], 666 666 int nr_vcpus) 667 667 { 668 668 cpu_set_t allowed_mask; ··· 918 918 } 919 919 920 920 921 - int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 921 + int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags, 922 922 u64 gpa, u64 size, void *hva) 923 923 { 924 924 struct kvm_userspace_memory_region region = { ··· 932 932 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region); 933 933 } 934 934 935 - void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 935 + void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags, 936 936 u64 gpa, u64 size, void *hva) 937 937 { 938 938 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva); ··· 945 945 __TEST_REQUIRE(kvm_has_cap(KVM_CAP_USER_MEMORY2), \ 946 946 "KVM selftests now require KVM_SET_USER_MEMORY_REGION2 (introduced in v6.8)") 947 947 948 - int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 948 + int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags, 949 949 u64 gpa, u64 size, void *hva, 950 - uint32_t guest_memfd, u64 guest_memfd_offset) 950 + u32 guest_memfd, u64 guest_memfd_offset) 951 951 { 952 952 struct kvm_userspace_memory_region2 region = { 953 953 .slot = slot, ··· 964 964 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, &region); 965 965 } 966 966 967 - void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 967 + void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags, 968 968 u64 gpa, u64 size, void *hva, 969 - uint32_t guest_memfd, u64 guest_memfd_offset) 969 + u32 guest_memfd, u64 guest_memfd_offset) 970 970 { 971 971 int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva, 972 972 guest_memfd, guest_memfd_offset); ··· 978 978 979 979 /* FIXME: This thing needs to be ripped apart and rewritten. */ 980 980 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, 981 - u64 gpa, uint32_t slot, u64 npages, uint32_t flags, 981 + u64 gpa, u32 slot, u64 npages, u32 flags, 982 982 int guest_memfd, u64 guest_memfd_offset) 983 983 { 984 984 int ret; ··· 1085 1085 1086 1086 if (flags & KVM_MEM_GUEST_MEMFD) { 1087 1087 if (guest_memfd < 0) { 1088 - uint32_t guest_memfd_flags = 0; 1088 + u32 guest_memfd_flags = 0; 1089 1089 TEST_ASSERT(!guest_memfd_offset, 1090 1090 "Offset must be zero when creating new guest_memfd"); 1091 1091 guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags); ··· 1141 1141 1142 1142 void vm_userspace_mem_region_add(struct kvm_vm *vm, 1143 1143 enum vm_mem_backing_src_type src_type, 1144 - u64 gpa, uint32_t slot, u64 npages, 1145 - uint32_t flags) 1144 + u64 gpa, u32 slot, u64 npages, u32 flags) 1146 1145 { 1147 1146 vm_mem_add(vm, src_type, gpa, slot, npages, flags, -1, 0); 1148 1147 } ··· 1162 1163 * memory slot ID). 1163 1164 */ 1164 1165 struct userspace_mem_region * 1165 - memslot2region(struct kvm_vm *vm, uint32_t memslot) 1166 + memslot2region(struct kvm_vm *vm, u32 memslot) 1166 1167 { 1167 1168 struct userspace_mem_region *region; 1168 1169 ··· 1193 1194 * Sets the flags of the memory region specified by the value of slot, 1194 1195 * to the values given by flags. 1195 1196 */ 1196 - void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) 1197 + void vm_mem_region_set_flags(struct kvm_vm *vm, u32 slot, u32 flags) 1197 1198 { 1198 1199 int ret; 1199 1200 struct userspace_mem_region *region; ··· 1209 1210 ret, errno, slot, flags); 1210 1211 } 1211 1212 1212 - void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot) 1213 + void vm_mem_region_reload(struct kvm_vm *vm, u32 slot) 1213 1214 { 1214 1215 struct userspace_mem_region *region = memslot2region(vm, slot); 1215 1216 struct kvm_userspace_memory_region2 tmp = region->region; ··· 1233 1234 * 1234 1235 * Change the gpa of a memory region. 1235 1236 */ 1236 - void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, u64 new_gpa) 1237 + void vm_mem_region_move(struct kvm_vm *vm, u32 slot, u64 new_gpa) 1237 1238 { 1238 1239 struct userspace_mem_region *region; 1239 1240 int ret; ··· 1262 1263 * 1263 1264 * Delete a memory region. 1264 1265 */ 1265 - void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) 1266 + void vm_mem_region_delete(struct kvm_vm *vm, u32 slot) 1266 1267 { 1267 1268 struct userspace_mem_region *region = memslot2region(vm, slot); 1268 1269 ··· 1316 1317 return ret; 1317 1318 } 1318 1319 1319 - static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id) 1320 + static bool vcpu_exists(struct kvm_vm *vm, u32 vcpu_id) 1320 1321 { 1321 1322 struct kvm_vcpu *vcpu; 1322 1323 ··· 1332 1333 * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id. 1333 1334 * No additional vCPU setup is done. Returns the vCPU. 1334 1335 */ 1335 - struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) 1336 + struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) 1336 1337 { 1337 1338 struct kvm_vcpu *vcpu; 1338 1339 ··· 1776 1777 1777 1778 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu) 1778 1779 { 1779 - uint32_t page_size = getpagesize(); 1780 - uint32_t size = vcpu->vm->dirty_ring_size; 1780 + u32 page_size = getpagesize(); 1781 + u32 size = vcpu->vm->dirty_ring_size; 1781 1782 1782 1783 TEST_ASSERT(size > 0, "Should enable dirty ring first"); 1783 1784 ··· 1806 1807 * Device Ioctl 1807 1808 */ 1808 1809 1809 - int __kvm_has_device_attr(int dev_fd, uint32_t group, u64 attr) 1810 + int __kvm_has_device_attr(int dev_fd, u32 group, u64 attr) 1810 1811 { 1811 1812 struct kvm_device_attr attribute = { 1812 1813 .group = group, ··· 1841 1842 return err ? : create_dev.fd; 1842 1843 } 1843 1844 1844 - int __kvm_device_attr_get(int dev_fd, uint32_t group, u64 attr, void *val) 1845 + int __kvm_device_attr_get(int dev_fd, u32 group, u64 attr, void *val) 1845 1846 { 1846 1847 struct kvm_device_attr kvmattr = { 1847 1848 .group = group, ··· 1853 1854 return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr); 1854 1855 } 1855 1856 1856 - int __kvm_device_attr_set(int dev_fd, uint32_t group, u64 attr, void *val) 1857 + int __kvm_device_attr_set(int dev_fd, u32 group, u64 attr, void *val) 1857 1858 { 1858 1859 struct kvm_device_attr kvmattr = { 1859 1860 .group = group, ··· 1869 1870 * IRQ related functions. 1870 1871 */ 1871 1872 1872 - int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) 1873 + int _kvm_irq_line(struct kvm_vm *vm, u32 irq, int level) 1873 1874 { 1874 1875 struct kvm_irq_level irq_level = { 1875 1876 .irq = irq, ··· 1879 1880 return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level); 1880 1881 } 1881 1882 1882 - void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) 1883 + void kvm_irq_line(struct kvm_vm *vm, u32 irq, int level) 1883 1884 { 1884 1885 int ret = _kvm_irq_line(vm, irq, level); 1885 1886 ··· 1901 1902 } 1902 1903 1903 1904 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, 1904 - uint32_t gsi, uint32_t pin) 1905 + u32 gsi, u32 pin) 1905 1906 { 1906 1907 int i; 1907 1908 ··· 2087 2088 * not enough pages are available at or above paddr_min. 2088 2089 */ 2089 2090 gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 2090 - gpa_t paddr_min, uint32_t memslot, 2091 + gpa_t paddr_min, u32 memslot, 2091 2092 bool protected) 2092 2093 { 2093 2094 struct userspace_mem_region *region; ··· 2132 2133 return base * vm->page_size; 2133 2134 } 2134 2135 2135 - gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t paddr_min, uint32_t memslot) 2136 + gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t paddr_min, u32 memslot) 2136 2137 { 2137 2138 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); 2138 2139 }
+3 -3
tools/testing/selftests/kvm/lib/loongarch/processor.c
··· 118 118 119 119 void virt_arch_pg_map(struct kvm_vm *vm, u64 vaddr, u64 paddr) 120 120 { 121 - uint32_t prot_bits; 121 + u32 prot_bits; 122 122 u64 *ptep; 123 123 124 124 TEST_ASSERT((vaddr % vm->page_size) == 0, ··· 223 223 handlers->exception_handlers[vector] = handler; 224 224 } 225 225 226 - uint32_t guest_get_vcpuid(void) 226 + u32 guest_get_vcpuid(void) 227 227 { 228 228 return csr_read(LOONGARCH_CSR_CPUID); 229 229 } ··· 369 369 loongarch_set_csr(vcpu, LOONGARCH_CSR_TMID, vcpu->id); 370 370 } 371 371 372 - struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) 372 + struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) 373 373 { 374 374 size_t stack_size; 375 375 u64 stack_vaddr;
+2 -2
tools/testing/selftests/kvm/lib/memstress.c
··· 44 44 * Continuously write to the first 8 bytes of each page in the 45 45 * specified region. 46 46 */ 47 - void memstress_guest_code(uint32_t vcpu_idx) 47 + void memstress_guest_code(u32 vcpu_idx) 48 48 { 49 49 struct memstress_args *args = &memstress_args; 50 50 struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx]; ··· 232 232 kvm_vm_free(vm); 233 233 } 234 234 235 - void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent) 235 + void memstress_set_write_percent(struct kvm_vm *vm, u32 write_percent) 236 236 { 237 237 memstress_args.write_percent = write_percent; 238 238 sync_global_to_guest(vm, memstress_args.write_percent);
+3 -3
tools/testing/selftests/kvm/lib/riscv/processor.c
··· 45 45 PGTBL_L3_INDEX_MASK, 46 46 }; 47 47 48 - static uint32_t pte_index_shift[] = { 48 + static u32 pte_index_shift[] = { 49 49 PGTBL_L0_INDEX_SHIFT, 50 50 PGTBL_L1_INDEX_SHIFT, 51 51 PGTBL_L2_INDEX_SHIFT, ··· 311 311 vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code); 312 312 } 313 313 314 - struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) 314 + struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) 315 315 { 316 316 int r; 317 317 size_t stack_size; ··· 470 470 handlers->exception_handlers[1][0] = handler; 471 471 } 472 472 473 - uint32_t guest_get_vcpuid(void) 473 + u32 guest_get_vcpuid(void) 474 474 { 475 475 return csr_read(CSR_SSCRATCH); 476 476 }
+1 -1
tools/testing/selftests/kvm/lib/s390/processor.c
··· 160 160 vcpu->run->psw_addr = (uintptr_t)guest_code; 161 161 } 162 162 163 - struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) 163 + struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) 164 164 { 165 165 size_t stack_size = DEFAULT_STACK_PGS * getpagesize(); 166 166 u64 stack_vaddr;
+2 -2
tools/testing/selftests/kvm/lib/sparsebit.c
··· 80 80 * typedef u64 sparsebit_num_t; 81 81 * 82 82 * sparsebit_idx_t idx; 83 - * uint32_t mask; 83 + * u32 mask; 84 84 * sparsebit_num_t num_after; 85 85 * 86 86 * The idx member contains the bit index of the first bit described by this ··· 162 162 163 163 #define DUMP_LINE_MAX 100 /* Does not include indent amount */ 164 164 165 - typedef uint32_t mask_t; 165 + typedef u32 mask_t; 166 166 #define MASK_BITS (sizeof(mask_t) * CHAR_BIT) 167 167 168 168 struct node {
+7 -7
tools/testing/selftests/kvm/lib/test_util.c
··· 30 30 * Park-Miller LCG using standard constants. 31 31 */ 32 32 33 - struct guest_random_state new_guest_random_state(uint32_t seed) 33 + struct guest_random_state new_guest_random_state(u32 seed) 34 34 { 35 35 struct guest_random_state s = {.seed = seed}; 36 36 return s; 37 37 } 38 38 39 - uint32_t guest_random_u32(struct guest_random_state *state) 39 + u32 guest_random_u32(struct guest_random_state *state) 40 40 { 41 - state->seed = (u64)state->seed * 48271 % ((uint32_t)(1 << 31) - 1); 41 + state->seed = (u64)state->seed * 48271 % ((u32)(1 << 31) - 1); 42 42 return state->seed; 43 43 } 44 44 ··· 225 225 #define ANON_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS) 226 226 #define ANON_HUGE_FLAGS (ANON_FLAGS | MAP_HUGETLB) 227 227 228 - const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i) 228 + const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(u32 i) 229 229 { 230 230 static const struct vm_mem_backing_src_alias aliases[] = { 231 231 [VM_MEM_SRC_ANONYMOUS] = { ··· 317 317 318 318 #define MAP_HUGE_PAGE_SIZE(x) (1ULL << ((x >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK)) 319 319 320 - size_t get_backing_src_pagesz(uint32_t i) 320 + size_t get_backing_src_pagesz(u32 i) 321 321 { 322 - uint32_t flag = vm_mem_backing_src_alias(i)->flag; 322 + u32 flag = vm_mem_backing_src_alias(i)->flag; 323 323 324 324 switch (i) { 325 325 case VM_MEM_SRC_ANONYMOUS: ··· 335 335 } 336 336 } 337 337 338 - bool is_backing_src_hugetlb(uint32_t i) 338 + bool is_backing_src_hugetlb(u32 i) 339 339 { 340 340 return !!(vm_mem_backing_src_alias(i)->flag & MAP_HUGETLB); 341 341 }
+12 -12
tools/testing/selftests/kvm/lib/x86/processor.c
··· 525 525 */ 526 526 void tdp_identity_map_default_memslots(struct kvm_vm *vm) 527 527 { 528 - uint32_t s, memslot = 0; 528 + u32 s, memslot = 0; 529 529 sparsebit_idx_t i, last; 530 530 struct userspace_mem_region *region = memslot2region(vm, memslot); 531 531 ··· 821 821 vcpu_regs_set(vcpu, &regs); 822 822 } 823 823 824 - struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) 824 + struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) 825 825 { 826 826 struct kvm_mp_state mp_state; 827 827 struct kvm_regs regs; ··· 872 872 return vcpu; 873 873 } 874 874 875 - struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id) 875 + struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, u32 vcpu_id) 876 876 { 877 877 struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); 878 878 ··· 907 907 return kvm_supported_cpuid; 908 908 } 909 909 910 - static uint32_t __kvm_cpu_has(const struct kvm_cpuid2 *cpuid, 911 - uint32_t function, uint32_t index, 912 - uint8_t reg, uint8_t lo, uint8_t hi) 910 + static u32 __kvm_cpu_has(const struct kvm_cpuid2 *cpuid, 911 + u32 function, u32 index, 912 + uint8_t reg, uint8_t lo, uint8_t hi) 913 913 { 914 914 const struct kvm_cpuid_entry2 *entry; 915 915 int i; ··· 936 936 feature.reg, feature.bit, feature.bit); 937 937 } 938 938 939 - uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, 940 - struct kvm_x86_cpu_property property) 939 + u32 kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, 940 + struct kvm_x86_cpu_property property) 941 941 { 942 942 return __kvm_cpu_has(cpuid, property.function, property.index, 943 943 property.reg, property.lo_bit, property.hi_bit); ··· 1019 1019 1020 1020 void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu, 1021 1021 struct kvm_x86_cpu_property property, 1022 - uint32_t value) 1022 + u32 value) 1023 1023 { 1024 1024 struct kvm_cpuid_entry2 *entry; 1025 1025 ··· 1034 1034 TEST_ASSERT_EQ(kvm_cpuid_property(vcpu->cpuid, property), value); 1035 1035 } 1036 1036 1037 - void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function) 1037 + void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, u32 function) 1038 1038 { 1039 1039 struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, function); 1040 1040 ··· 1196 1196 return list; 1197 1197 } 1198 1198 1199 - bool kvm_msr_is_in_save_restore_list(uint32_t msr_index) 1199 + bool kvm_msr_is_in_save_restore_list(u32 msr_index) 1200 1200 { 1201 1201 const struct kvm_msr_list *list = kvm_get_msr_index_list(); 1202 1202 int i; ··· 1327 1327 } 1328 1328 1329 1329 const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, 1330 - uint32_t function, uint32_t index) 1330 + u32 function, u32 index) 1331 1331 { 1332 1332 int i; 1333 1333
+2 -2
tools/testing/selftests/kvm/lib/x86/sev.c
··· 79 79 vm_sev_ioctl(vm, KVM_SEV_INIT2, &init); 80 80 } 81 81 82 - void sev_vm_launch(struct kvm_vm *vm, uint32_t policy) 82 + void sev_vm_launch(struct kvm_vm *vm, u32 policy) 83 83 { 84 84 struct kvm_sev_launch_start launch_start = { 85 85 .policy = policy, ··· 158 158 vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_FINISH, &launch_finish); 159 159 } 160 160 161 - struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code, 161 + struct kvm_vm *vm_sev_create_with_one_vcpu(u32 type, void *guest_code, 162 162 struct kvm_vcpu **cpu) 163 163 { 164 164 struct vm_shape shape = {
+5 -5
tools/testing/selftests/kvm/lib/x86/vmx.c
··· 160 160 wrmsr(MSR_IA32_FEAT_CTL, feature_control | required); 161 161 162 162 /* Enter VMX root operation. */ 163 - *(uint32_t *)(vmx->vmxon) = vmcs_revision(); 163 + *(u32 *)(vmx->vmxon) = vmcs_revision(); 164 164 if (vmxon(vmx->vmxon_gpa)) 165 165 return false; 166 166 ··· 170 170 bool load_vmcs(struct vmx_pages *vmx) 171 171 { 172 172 /* Load a VMCS. */ 173 - *(uint32_t *)(vmx->vmcs) = vmcs_revision(); 173 + *(u32 *)(vmx->vmcs) = vmcs_revision(); 174 174 if (vmclear(vmx->vmcs_gpa)) 175 175 return false; 176 176 ··· 178 178 return false; 179 179 180 180 /* Setup shadow VMCS, do not load it yet. */ 181 - *(uint32_t *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul; 181 + *(u32 *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul; 182 182 if (vmclear(vmx->shadow_vmcs_gpa)) 183 183 return false; 184 184 ··· 200 200 */ 201 201 static inline void init_vmcs_control_fields(struct vmx_pages *vmx) 202 202 { 203 - uint32_t sec_exec_ctl = 0; 203 + u32 sec_exec_ctl = 0; 204 204 205 205 vmwrite(VIRTUAL_PROCESSOR_ID, 0); 206 206 vmwrite(POSTED_INTR_NV, 0); ··· 259 259 */ 260 260 static inline void init_vmcs_host_state(void) 261 261 { 262 - uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS); 262 + u32 exit_controls = vmreadz(VM_EXIT_CONTROLS); 263 263 264 264 vmwrite(HOST_ES_SELECTOR, get_es()); 265 265 vmwrite(HOST_CS_SELECTOR, get_cs());
+10 -10
tools/testing/selftests/kvm/loongarch/arch_timer.c
··· 27 27 static void guest_irq_handler(struct ex_regs *regs) 28 28 { 29 29 unsigned int intid; 30 - uint32_t cpu = guest_get_vcpuid(); 30 + u32 cpu = guest_get_vcpuid(); 31 31 u64 xcnt, val, cfg, xcnt_diff_us; 32 32 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; 33 33 ··· 62 62 WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1); 63 63 } 64 64 65 - static void guest_test_period_timer(uint32_t cpu) 65 + static void guest_test_period_timer(u32 cpu) 66 66 { 67 - uint32_t irq_iter, config_iter; 67 + u32 irq_iter, config_iter; 68 68 u64 us; 69 69 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; 70 70 ··· 86 86 irq_iter); 87 87 } 88 88 89 - static void guest_test_oneshot_timer(uint32_t cpu) 89 + static void guest_test_oneshot_timer(u32 cpu) 90 90 { 91 - uint32_t irq_iter, config_iter; 91 + u32 irq_iter, config_iter; 92 92 u64 us; 93 93 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; 94 94 ··· 112 112 } 113 113 } 114 114 115 - static void guest_test_emulate_timer(uint32_t cpu) 115 + static void guest_test_emulate_timer(u32 cpu) 116 116 { 117 - uint32_t config_iter; 117 + u32 config_iter; 118 118 u64 xcnt_diff_us, us; 119 119 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; 120 120 ··· 136 136 local_irq_enable(); 137 137 } 138 138 139 - static void guest_time_count_test(uint32_t cpu) 139 + static void guest_time_count_test(u32 cpu) 140 140 { 141 - uint32_t config_iter; 141 + u32 config_iter; 142 142 unsigned long start, end, prev, us; 143 143 144 144 /* Assuming that test case starts to run in 1 second */ ··· 165 165 166 166 static void guest_code(void) 167 167 { 168 - uint32_t cpu = guest_get_vcpuid(); 168 + u32 cpu = guest_get_vcpuid(); 169 169 170 170 /* must run at first */ 171 171 guest_time_count_test(cpu);
+3 -3
tools/testing/selftests/kvm/loongarch/pmu_test.c
··· 15 15 /* Check PMU support */ 16 16 static bool has_pmu_support(void) 17 17 { 18 - uint32_t cfg6; 18 + u32 cfg6; 19 19 20 20 /* Read CPUCFG6 to check PMU */ 21 21 cfg6 = read_cpucfg(LOONGARCH_CPUCFG6); ··· 34 34 /* Dump PMU capabilities */ 35 35 static void dump_pmu_caps(void) 36 36 { 37 - uint32_t cfg6; 37 + u32 cfg6; 38 38 int nr_counters, counter_bits; 39 39 40 40 cfg6 = read_cpucfg(LOONGARCH_CPUCFG6); ··· 51 51 static void guest_pmu_base_test(void) 52 52 { 53 53 int i; 54 - uint32_t cfg6, pmnum; 54 + u32 cfg6, pmnum; 55 55 u64 cnt[4]; 56 56 57 57 cfg6 = read_cpucfg(LOONGARCH_CPUCFG6);
+25 -25
tools/testing/selftests/kvm/memslot_perf_test.c
··· 85 85 struct kvm_vm *vm; 86 86 struct kvm_vcpu *vcpu; 87 87 pthread_t vcpu_thread; 88 - uint32_t nslots; 88 + u32 nslots; 89 89 u64 npages; 90 90 u64 pages_per_slot; 91 91 void **hva_slots; ··· 95 95 }; 96 96 97 97 struct sync_area { 98 - uint32_t guest_page_size; 98 + u32 guest_page_size; 99 99 atomic_bool start_flag; 100 100 atomic_bool exit_flag; 101 101 atomic_bool sync_flag; ··· 189 189 static void *vm_gpa2hva(struct vm_data *data, u64 gpa, u64 *rempages) 190 190 { 191 191 u64 gpage, pgoffs; 192 - uint32_t slot, slotoffs; 192 + u32 slot, slotoffs; 193 193 void *base; 194 - uint32_t guest_page_size = data->vm->page_size; 194 + u32 guest_page_size = data->vm->page_size; 195 195 196 196 TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate"); 197 197 TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size, ··· 220 220 return (uint8_t *)base + slotoffs * guest_page_size + pgoffs; 221 221 } 222 222 223 - static u64 vm_slot2gpa(struct vm_data *data, uint32_t slot) 223 + static u64 vm_slot2gpa(struct vm_data *data, u32 slot) 224 224 { 225 - uint32_t guest_page_size = data->vm->page_size; 225 + u32 guest_page_size = data->vm->page_size; 226 226 227 227 TEST_ASSERT(slot < data->nslots, "Too high slot number"); 228 228 ··· 243 243 return data; 244 244 } 245 245 246 - static bool check_slot_pages(uint32_t host_page_size, uint32_t guest_page_size, 246 + static bool check_slot_pages(u32 host_page_size, u32 guest_page_size, 247 247 u64 pages_per_slot, u64 rempages) 248 248 { 249 249 if (!pages_per_slot) ··· 259 259 } 260 260 261 261 262 - static u64 get_max_slots(struct vm_data *data, uint32_t host_page_size) 262 + static u64 get_max_slots(struct vm_data *data, u32 host_page_size) 263 263 { 264 - uint32_t guest_page_size = data->vm->page_size; 264 + u32 guest_page_size = data->vm->page_size; 265 265 u64 mempages, pages_per_slot, rempages; 266 266 u64 slots; 267 267 ··· 287 287 { 288 288 u64 mempages, rempages; 289 289 u64 guest_addr; 290 - uint32_t slot, host_page_size, guest_page_size; 290 + u32 slot, host_page_size, guest_page_size; 291 291 struct timespec tstart; 292 292 struct sync_area *sync; 293 293 ··· 448 448 static void guest_code_test_memslot_move(void) 449 449 { 450 450 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; 451 - uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); 451 + u32 page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); 452 452 uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr); 453 453 454 454 GUEST_SYNC(0); ··· 477 477 static void guest_code_test_memslot_map(void) 478 478 { 479 479 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; 480 - uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); 480 + u32 page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); 481 481 482 482 GUEST_SYNC(0); 483 483 ··· 544 544 static void guest_code_test_memslot_rw(void) 545 545 { 546 546 struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; 547 - uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); 547 + u32 page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); 548 548 549 549 GUEST_SYNC(0); 550 550 ··· 579 579 struct sync_area *sync, 580 580 u64 *maxslots, bool isactive) 581 581 { 582 - uint32_t guest_page_size = data->vm->page_size; 582 + u32 guest_page_size = data->vm->page_size; 583 583 u64 movesrcgpa, movetestgpa; 584 584 585 585 #ifdef __x86_64__ ··· 639 639 u64 offsp, u64 count) 640 640 { 641 641 u64 gpa, ctr; 642 - uint32_t guest_page_size = data->vm->page_size; 642 + u32 guest_page_size = data->vm->page_size; 643 643 644 644 for (gpa = MEM_TEST_GPA + offsp * guest_page_size, ctr = 0; ctr < count; ) { 645 645 u64 npages; ··· 665 665 { 666 666 u64 gpa; 667 667 u64 *val; 668 - uint32_t guest_page_size = data->vm->page_size; 668 + u32 guest_page_size = data->vm->page_size; 669 669 670 670 if (!map_unmap_verify) 671 671 return; ··· 680 680 681 681 static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync) 682 682 { 683 - uint32_t guest_page_size = data->vm->page_size; 683 + u32 guest_page_size = data->vm->page_size; 684 684 u64 guest_pages = MEM_TEST_MAP_SIZE / guest_page_size; 685 685 686 686 /* ··· 720 720 struct sync_area *sync, 721 721 u64 chunk) 722 722 { 723 - uint32_t guest_page_size = data->vm->page_size; 723 + u32 guest_page_size = data->vm->page_size; 724 724 u64 guest_pages = MEM_TEST_UNMAP_SIZE / guest_page_size; 725 725 u64 ctr; 726 726 ··· 746 746 static void test_memslot_unmap_loop(struct vm_data *data, 747 747 struct sync_area *sync) 748 748 { 749 - uint32_t host_page_size = getpagesize(); 750 - uint32_t guest_page_size = data->vm->page_size; 749 + u32 host_page_size = getpagesize(); 750 + u32 guest_page_size = data->vm->page_size; 751 751 u64 guest_chunk_pages = guest_page_size >= host_page_size ? 752 752 1 : host_page_size / guest_page_size; 753 753 ··· 757 757 static void test_memslot_unmap_loop_chunked(struct vm_data *data, 758 758 struct sync_area *sync) 759 759 { 760 - uint32_t guest_page_size = data->vm->page_size; 760 + u32 guest_page_size = data->vm->page_size; 761 761 u64 guest_chunk_pages = MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size; 762 762 763 763 test_memslot_unmap_loop_common(data, sync, guest_chunk_pages); ··· 766 766 static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync) 767 767 { 768 768 u64 gptr; 769 - uint32_t guest_page_size = data->vm->page_size; 769 + u32 guest_page_size = data->vm->page_size; 770 770 771 771 for (gptr = MEM_TEST_GPA + guest_page_size / 2; 772 772 gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += guest_page_size) ··· 924 924 925 925 static bool check_memory_sizes(void) 926 926 { 927 - uint32_t host_page_size = getpagesize(); 928 - uint32_t guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size; 927 + u32 host_page_size = getpagesize(); 928 + u32 guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size; 929 929 930 930 if (host_page_size > SZ_64K || guest_page_size > SZ_64K) { 931 931 pr_info("Unsupported page size on host (0x%x) or guest (0x%x)\n", ··· 961 961 static bool parse_args(int argc, char *argv[], 962 962 struct test_args *targs) 963 963 { 964 - uint32_t max_mem_slots; 964 + u32 max_mem_slots; 965 965 int opt; 966 966 967 967 while ((opt = getopt(argc, argv, "hvdqs:f:e:l:r:")) != -1) {
+1 -1
tools/testing/selftests/kvm/pre_fault_memory_test.c
··· 34 34 struct slot_worker_data { 35 35 struct kvm_vm *vm; 36 36 u64 gpa; 37 - uint32_t flags; 37 + u32 flags; 38 38 bool worker_ready; 39 39 bool prefault_ready; 40 40 bool recreate_slot;
+3 -3
tools/testing/selftests/kvm/riscv/arch_timer.c
··· 19 19 { 20 20 u64 xcnt, xcnt_diff_us, cmp; 21 21 unsigned int intid = regs->cause & ~CAUSE_IRQ_FLAG; 22 - uint32_t cpu = guest_get_vcpuid(); 22 + u32 cpu = guest_get_vcpuid(); 23 23 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; 24 24 25 25 timer_irq_disable(); ··· 40 40 41 41 static void guest_run(struct test_vcpu_shared_data *shared_data) 42 42 { 43 - uint32_t irq_iter, config_iter; 43 + u32 irq_iter, config_iter; 44 44 45 45 shared_data->nr_iter = 0; 46 46 shared_data->guest_stage = 0; ··· 66 66 67 67 static void guest_code(void) 68 68 { 69 - uint32_t cpu = guest_get_vcpuid(); 69 + u32 cpu = guest_get_vcpuid(); 70 70 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; 71 71 72 72 timer_irq_disable();
+9 -9
tools/testing/selftests/kvm/s390/memop.c
··· 42 42 unsigned int _set_flags : 1; 43 43 unsigned int _sida_offset : 1; 44 44 unsigned int _ar : 1; 45 - uint32_t size; 45 + u32 size; 46 46 enum mop_target target; 47 47 enum mop_access_mode mode; 48 48 void *buf; 49 - uint32_t sida_offset; 49 + u32 sida_offset; 50 50 void *old; 51 51 uint8_t old_value[16]; 52 52 bool *cmpxchg_success; ··· 296 296 TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!") 297 297 298 298 static void default_write_read(struct test_info copy_cpu, struct test_info mop_cpu, 299 - enum mop_target mop_target, uint32_t size, uint8_t key) 299 + enum mop_target mop_target, u32 size, uint8_t key) 300 300 { 301 301 prepare_mem12(); 302 302 CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, ··· 308 308 } 309 309 310 310 static void default_read(struct test_info copy_cpu, struct test_info mop_cpu, 311 - enum mop_target mop_target, uint32_t size, uint8_t key) 311 + enum mop_target mop_target, u32 size, uint8_t key) 312 312 { 313 313 prepare_mem12(); 314 314 CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, GADDR_V(mem1)); ··· 487 487 case 2: 488 488 return (uint16_t)val; 489 489 case 4: 490 - return (uint32_t)val; 490 + return (u32)val; 491 491 case 8: 492 492 return (u64)val; 493 493 case 16: ··· 585 585 586 586 switch (size) { 587 587 case 4: { 588 - uint32_t old = *old_addr; 588 + u32 old = *old_addr; 589 589 590 590 asm volatile ("cs %[old],%[new],%[address]" 591 591 : [old] "+d" (old), 592 - [address] "+Q" (*(uint32_t *)(target)) 593 - : [new] "d" ((uint32_t)new) 592 + [address] "+Q" (*(u32 *)(target)) 593 + : [new] "d" ((u32)new) 594 594 : "cc" 595 595 ); 596 - ret = old == (uint32_t)*old_addr; 596 + ret = old == (u32)*old_addr; 597 597 *old_addr = old; 598 598 return ret; 599 599 }
+4 -4
tools/testing/selftests/kvm/set_memory_region_test.c
··· 345 345 346 346 static void test_invalid_memory_region_flags(void) 347 347 { 348 - uint32_t supported_flags = KVM_MEM_LOG_DIRTY_PAGES; 349 - const uint32_t v2_only_flags = KVM_MEM_GUEST_MEMFD; 348 + u32 supported_flags = KVM_MEM_LOG_DIRTY_PAGES; 349 + const u32 v2_only_flags = KVM_MEM_GUEST_MEMFD; 350 350 struct kvm_vm *vm; 351 351 int r, i; 352 352 ··· 410 410 { 411 411 int ret; 412 412 struct kvm_vm *vm; 413 - uint32_t max_mem_slots; 414 - uint32_t slot; 413 + u32 max_mem_slots; 414 + u32 slot; 415 415 void *mem, *mem_aligned, *mem_extra; 416 416 size_t alignment = 1; 417 417
+16 -16
tools/testing/selftests/kvm/steal_time.c
··· 42 42 static void guest_code(int cpu) 43 43 { 44 44 struct kvm_steal_time *st = st_gva[cpu]; 45 - uint32_t version; 45 + u32 version; 46 46 47 47 GUEST_ASSERT_EQ(rdmsr(MSR_KVM_STEAL_TIME), ((u64)st_gva[cpu] | KVM_MSR_ENABLED)); 48 48 ··· 67 67 return kvm_cpu_has(X86_FEATURE_KVM_STEAL_TIME); 68 68 } 69 69 70 - static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) 70 + static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) 71 71 { 72 72 /* ST_GPA_BASE is identity mapped */ 73 73 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); ··· 76 76 vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED); 77 77 } 78 78 79 - static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) 79 + static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) 80 80 { 81 81 struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); 82 82 ··· 118 118 #define PV_TIME_ST 0xc5000021 119 119 120 120 struct st_time { 121 - uint32_t rev; 122 - uint32_t attr; 121 + u32 rev; 122 + u32 attr; 123 123 u64 st_time; 124 124 }; 125 125 126 - static s64 smccc(uint32_t func, u64 arg) 126 + static s64 smccc(u32 func, u64 arg) 127 127 { 128 128 struct arm_smccc_res res; 129 129 ··· 175 175 return !__vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev); 176 176 } 177 177 178 - static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) 178 + static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) 179 179 { 180 180 struct kvm_vm *vm = vcpu->vm; 181 181 u64 st_ipa; ··· 194 194 vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev); 195 195 } 196 196 197 - static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) 197 + static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) 198 198 { 199 199 struct st_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); 200 200 ··· 242 242 static gpa_t st_gpa[NR_VCPUS]; 243 243 244 244 struct sta_struct { 245 - uint32_t sequence; 246 - uint32_t flags; 245 + u32 sequence; 246 + u32 flags; 247 247 u64 steal; 248 248 uint8_t preempted; 249 249 uint8_t pad[47]; ··· 272 272 static void guest_code(int cpu) 273 273 { 274 274 struct sta_struct *st = st_gva[cpu]; 275 - uint32_t sequence; 275 + u32 sequence; 276 276 long out_val = 0; 277 277 bool probe; 278 278 ··· 305 305 return enabled; 306 306 } 307 307 308 - static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) 308 + static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) 309 309 { 310 310 /* ST_GPA_BASE is identity mapped */ 311 311 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); ··· 314 314 sync_global_to_guest(vcpu->vm, st_gpa[i]); 315 315 } 316 316 317 - static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) 317 + static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) 318 318 { 319 319 struct sta_struct *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); 320 320 int i; ··· 388 388 389 389 static void guest_code(int cpu) 390 390 { 391 - uint32_t version; 391 + u32 version; 392 392 struct kvm_steal_time *st = st_gva[cpu]; 393 393 394 394 memset(st, 0, sizeof(*st)); ··· 428 428 return val & BIT(KVM_FEATURE_STEAL_TIME); 429 429 } 430 430 431 - static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) 431 + static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) 432 432 { 433 433 int err; 434 434 u64 st_gpa; ··· 451 451 TEST_ASSERT(err == 0, "Fail to set PV stealtime GPA"); 452 452 } 453 453 454 - static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) 454 + static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) 455 455 { 456 456 struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); 457 457
+2 -2
tools/testing/selftests/kvm/x86/amx_test.c
··· 82 82 83 83 static inline void __xsavec(struct xstate *xstate, u64 rfbm) 84 84 { 85 - uint32_t rfbm_lo = rfbm; 86 - uint32_t rfbm_hi = rfbm >> 32; 85 + u32 rfbm_lo = rfbm; 86 + u32 rfbm_hi = rfbm >> 32; 87 87 88 88 asm volatile("xsavec (%%rdi)" 89 89 : : "D" (xstate), "a" (rfbm_lo), "d" (rfbm_hi)
+1 -1
tools/testing/selftests/kvm/x86/aperfmperf_test.c
··· 35 35 return open_path_or_exit(path, O_RDONLY); 36 36 } 37 37 38 - static u64 read_dev_msr(int msr_fd, uint32_t msr) 38 + static u64 read_dev_msr(int msr_fd, u32 msr) 39 39 { 40 40 u64 data; 41 41 ssize_t rc;
+6 -6
tools/testing/selftests/kvm/x86/apic_bus_clock_test.c
··· 19 19 * timer frequency. 20 20 */ 21 21 static const struct { 22 - const uint32_t tdcr; 23 - const uint32_t divide_count; 22 + const u32 tdcr; 23 + const u32 divide_count; 24 24 } tdcrs[] = { 25 25 {0x0, 2}, 26 26 {0x1, 4}, ··· 42 42 xapic_enable(); 43 43 } 44 44 45 - static uint32_t apic_read_reg(unsigned int reg) 45 + static u32 apic_read_reg(unsigned int reg) 46 46 { 47 47 return is_x2apic ? x2apic_read_reg(reg) : xapic_read_reg(reg); 48 48 } 49 49 50 - static void apic_write_reg(unsigned int reg, uint32_t val) 50 + static void apic_write_reg(unsigned int reg, u32 val) 51 51 { 52 52 if (is_x2apic) 53 53 x2apic_write_reg(reg, val); ··· 58 58 static void apic_guest_code(u64 apic_hz, u64 delay_ms) 59 59 { 60 60 u64 tsc_hz = guest_tsc_khz * 1000; 61 - const uint32_t tmict = ~0u; 61 + const u32 tmict = ~0u; 62 62 u64 tsc0, tsc1, freq; 63 - uint32_t tmcct; 63 + u32 tmcct; 64 64 int i; 65 65 66 66 apic_enable();
+1 -1
tools/testing/selftests/kvm/x86/debug_regs.c
··· 16 16 #define IRQ_VECTOR 0xAA 17 17 18 18 /* For testing data access debug BP */ 19 - uint32_t guest_value; 19 + u32 guest_value; 20 20 21 21 extern unsigned char sw_bp, hw_bp, write_data, ss_start, bd_start; 22 22
+2 -2
tools/testing/selftests/kvm/x86/fastops_test.c
··· 15 15 "pop %[flags]\n\t" 16 16 17 17 #define flags_constraint(flags_val) [flags]"=r"(flags_val) 18 - #define bt_constraint(__bt_val) [bt_val]"rm"((uint32_t)__bt_val) 18 + #define bt_constraint(__bt_val) [bt_val]"rm"((u32)__bt_val) 19 19 20 20 #define guest_execute_fastop_1(FEP, insn, __val, __flags) \ 21 21 ({ \ ··· 187 187 { 188 188 guest_test_fastops(uint8_t, "b"); 189 189 guest_test_fastops(uint16_t, "w"); 190 - guest_test_fastops(uint32_t, "l"); 190 + guest_test_fastops(u32, "l"); 191 191 guest_test_fastops(u64, "q"); 192 192 193 193 GUEST_DONE();
+4 -4
tools/testing/selftests/kvm/x86/feature_msrs_test.c
··· 12 12 #include "kvm_util.h" 13 13 #include "processor.h" 14 14 15 - static bool is_kvm_controlled_msr(uint32_t msr) 15 + static bool is_kvm_controlled_msr(u32 msr) 16 16 { 17 17 return msr == MSR_IA32_VMX_CR0_FIXED1 || msr == MSR_IA32_VMX_CR4_FIXED1; 18 18 } ··· 21 21 * For VMX MSRs with a "true" variant, KVM requires userspace to set the "true" 22 22 * MSR, and doesn't allow setting the hidden version. 23 23 */ 24 - static bool is_hidden_vmx_msr(uint32_t msr) 24 + static bool is_hidden_vmx_msr(u32 msr) 25 25 { 26 26 switch (msr) { 27 27 case MSR_IA32_VMX_PINBASED_CTLS: ··· 34 34 } 35 35 } 36 36 37 - static bool is_quirked_msr(uint32_t msr) 37 + static bool is_quirked_msr(u32 msr) 38 38 { 39 39 return msr != MSR_AMD64_DE_CFG; 40 40 } 41 41 42 - static void test_feature_msr(uint32_t msr) 42 + static void test_feature_msr(u32 msr) 43 43 { 44 44 const u64 supported_mask = kvm_get_feature_msr(msr); 45 45 u64 reset_value = is_quirked_msr(msr) ? supported_mask : 0;
+1 -1
tools/testing/selftests/kvm/x86/hyperv_evmcs.c
··· 30 30 { 31 31 } 32 32 33 - static inline void rdmsr_from_l2(uint32_t msr) 33 + static inline void rdmsr_from_l2(u32 msr) 34 34 { 35 35 /* Currently, L1 doesn't preserve GPRs during vmexits. */ 36 36 __asm__ __volatile__ ("rdmsr" : : "c"(msr) :
+2 -2
tools/testing/selftests/kvm/x86/hyperv_features.c
··· 22 22 KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EBX, 0) 23 23 24 24 struct msr_data { 25 - uint32_t idx; 25 + u32 idx; 26 26 bool fault_expected; 27 27 bool write; 28 28 u64 write_val; ··· 34 34 bool ud_expected; 35 35 }; 36 36 37 - static bool is_write_only_msr(uint32_t msr) 37 + static bool is_write_only_msr(u32 msr) 38 38 { 39 39 return msr == HV_X64_MSR_EOI; 40 40 }
+1 -1
tools/testing/selftests/kvm/x86/hyperv_svm_test.c
··· 21 21 #define L2_GUEST_STACK_SIZE 256 22 22 23 23 /* Exit to L1 from L2 with RDMSR instruction */ 24 - static inline void rdmsr_from_l2(uint32_t msr) 24 + static inline void rdmsr_from_l2(u32 msr) 25 25 { 26 26 /* Currently, L1 doesn't preserve GPRs during vmexits. */ 27 27 __asm__ __volatile__ ("rdmsr" : : "c"(msr) :
+1 -1
tools/testing/selftests/kvm/x86/kvm_pv_test.c
··· 13 13 #include "processor.h" 14 14 15 15 struct msr_data { 16 - uint32_t idx; 16 + u32 idx; 17 17 const char *name; 18 18 }; 19 19
+5 -5
tools/testing/selftests/kvm/x86/nested_emulation_test.c
··· 14 14 struct emulated_instruction { 15 15 const char name[32]; 16 16 uint8_t opcode[15]; 17 - uint32_t exit_reason[NR_VIRTUALIZATION_FLAVORS]; 17 + u32 exit_reason[NR_VIRTUALIZATION_FLAVORS]; 18 18 }; 19 19 20 20 static struct emulated_instruction instructions[] = { ··· 36 36 static uint8_t l2_guest_code[sizeof(kvm_fep) + 15]; 37 37 static uint8_t *l2_instruction = &l2_guest_code[sizeof(kvm_fep)]; 38 38 39 - static uint32_t get_instruction_length(struct emulated_instruction *insn) 39 + static u32 get_instruction_length(struct emulated_instruction *insn) 40 40 { 41 - uint32_t i; 41 + u32 i; 42 42 43 43 for (i = 0; i < ARRAY_SIZE(insn->opcode) && insn->opcode[i]; i++) 44 44 ; ··· 81 81 82 82 for (i = 0; i < ARRAY_SIZE(instructions); i++) { 83 83 struct emulated_instruction *insn = &instructions[i]; 84 - uint32_t insn_len = get_instruction_length(insn); 85 - uint32_t exit_insn_len; 84 + u32 insn_len = get_instruction_length(insn); 85 + u32 exit_insn_len; 86 86 u32 exit_reason; 87 87 88 88 /*
+2 -2
tools/testing/selftests/kvm/x86/nested_exceptions_test.c
··· 72 72 } 73 73 74 74 static void svm_run_l2(struct svm_test_data *svm, void *l2_code, int vector, 75 - uint32_t error_code) 75 + u32 error_code) 76 76 { 77 77 struct vmcb *vmcb = svm->vmcb; 78 78 struct vmcb_control_area *ctrl = &vmcb->control; ··· 111 111 GUEST_DONE(); 112 112 } 113 113 114 - static void vmx_run_l2(void *l2_code, int vector, uint32_t error_code) 114 + static void vmx_run_l2(void *l2_code, int vector, u32 error_code) 115 115 { 116 116 GUEST_ASSERT(!vmwrite(GUEST_RIP, (u64)l2_code)); 117 117
+1 -1
tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c
··· 88 88 */ 89 89 if (this_cpu_has(X86_FEATURE_VMX)) { 90 90 struct vmx_pages *vmx_pages = data; 91 - uint32_t control; 91 + u32 control; 92 92 93 93 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 94 94 GUEST_ASSERT(load_vmcs(vmx_pages));
+1 -1
tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c
··· 106 106 static void l1_vmx_code(struct vmx_pages *vmx_pages) 107 107 { 108 108 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 109 - uint32_t control; 109 + u32 control; 110 110 111 111 /* check that L1's frequency looks alright before launching L2 */ 112 112 check_tsc_freq(UCHECK_L1);
+18 -18
tools/testing/selftests/kvm/x86/pmu_counters_test.c
··· 30 30 #define NUM_INSNS_RETIRED (NUM_LOOPS * NUM_INSNS_PER_LOOP + NUM_EXTRA_INSNS) 31 31 32 32 /* Track which architectural events are supported by hardware. */ 33 - static uint32_t hardware_pmu_arch_events; 33 + static u32 hardware_pmu_arch_events; 34 34 35 35 static uint8_t kvm_pmu_version; 36 36 static bool kvm_has_perf_caps; ··· 153 153 * Sanity check that in all cases, the event doesn't count when it's disabled, 154 154 * and that KVM correctly emulates the write of an arbitrary value. 155 155 */ 156 - static void guest_assert_event_count(uint8_t idx, uint32_t pmc, uint32_t pmc_msr) 156 + static void guest_assert_event_count(uint8_t idx, u32 pmc, u32 pmc_msr) 157 157 { 158 158 u64 count; 159 159 ··· 236 236 FEP "xor %%eax, %%eax\n\t" \ 237 237 FEP "xor %%edx, %%edx\n\t" \ 238 238 "wrmsr\n\t" \ 239 - :: "a"((uint32_t)_value), "d"(_value >> 32), \ 239 + :: "a"((u32)_value), "d"(_value >> 32), \ 240 240 "c"(_msr), "D"(_msr), [m]"m"(kvm_pmu_version) \ 241 241 ); \ 242 242 } while (0) ··· 255 255 guest_assert_event_count(_idx, _pmc, _pmc_msr); \ 256 256 } while (0) 257 257 258 - static void __guest_test_arch_event(uint8_t idx, uint32_t pmc, uint32_t pmc_msr, 259 - uint32_t ctrl_msr, u64 ctrl_msr_value) 258 + static void __guest_test_arch_event(uint8_t idx, u32 pmc, u32 pmc_msr, 259 + u32 ctrl_msr, u64 ctrl_msr_value) 260 260 { 261 261 GUEST_TEST_EVENT(idx, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, ""); 262 262 ··· 266 266 267 267 static void guest_test_arch_event(uint8_t idx) 268 268 { 269 - uint32_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); 270 - uint32_t pmu_version = guest_get_pmu_version(); 269 + u32 nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); 270 + u32 pmu_version = guest_get_pmu_version(); 271 271 /* PERF_GLOBAL_CTRL exists only for Architectural PMU Version 2+. */ 272 272 bool guest_has_perf_global_ctrl = pmu_version >= 2; 273 273 struct kvm_x86_pmu_feature gp_event, fixed_event; 274 - uint32_t base_pmc_msr; 274 + u32 base_pmc_msr; 275 275 unsigned int i; 276 276 277 277 /* The host side shouldn't invoke this without a guest PMU. */ ··· 329 329 } 330 330 331 331 static void test_arch_events(uint8_t pmu_version, u64 perf_capabilities, 332 - uint8_t length, uint32_t unavailable_mask) 332 + uint8_t length, u32 unavailable_mask) 333 333 { 334 334 struct kvm_vcpu *vcpu; 335 335 struct kvm_vm *vm; ··· 373 373 "Expected " #insn "(0x%x) to yield 0x%lx, got 0x%lx", \ 374 374 msr, expected, val); 375 375 376 - static void guest_test_rdpmc(uint32_t rdpmc_idx, bool expect_success, 376 + static void guest_test_rdpmc(u32 rdpmc_idx, bool expect_success, 377 377 u64 expected_val) 378 378 { 379 379 uint8_t vector; ··· 393 393 GUEST_ASSERT_PMC_VALUE(RDPMC, rdpmc_idx, val, expected_val); 394 394 } 395 395 396 - static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters, 397 - uint8_t nr_counters, uint32_t or_mask) 396 + static void guest_rd_wr_counters(u32 base_msr, uint8_t nr_possible_counters, 397 + uint8_t nr_counters, u32 or_mask) 398 398 { 399 399 const bool pmu_has_fast_mode = !guest_get_pmu_version(); 400 400 uint8_t i; ··· 405 405 * width of the counters. 406 406 */ 407 407 const u64 test_val = 0xffff; 408 - const uint32_t msr = base_msr + i; 408 + const u32 msr = base_msr + i; 409 409 410 410 /* 411 411 * Fixed counters are supported if the counter is less than the ··· 421 421 const u64 expected_val = expect_success ? test_val : 0; 422 422 const bool expect_gp = !expect_success && msr != MSR_P6_PERFCTR0 && 423 423 msr != MSR_P6_PERFCTR1; 424 - uint32_t rdpmc_idx; 424 + u32 rdpmc_idx; 425 425 uint8_t vector; 426 426 u64 val; 427 427 ··· 463 463 { 464 464 uint8_t pmu_version = guest_get_pmu_version(); 465 465 uint8_t nr_gp_counters = 0; 466 - uint32_t base_msr; 466 + u32 base_msr; 467 467 468 468 if (pmu_version) 469 469 nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); ··· 563 563 564 564 static void test_fixed_counters(uint8_t pmu_version, u64 perf_capabilities, 565 565 uint8_t nr_fixed_counters, 566 - uint32_t supported_bitmask) 566 + u32 supported_bitmask) 567 567 { 568 568 struct kvm_vcpu *vcpu; 569 569 struct kvm_vm *vm; ··· 588 588 uint8_t pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION); 589 589 unsigned int i; 590 590 uint8_t v, j; 591 - uint32_t k; 591 + u32 k; 592 592 593 593 const u64 perf_caps[] = { 594 594 0, ··· 602 602 * as alternating bit sequencues, e.g. to detect if KVM is checking the 603 603 * wrong bit(s). 604 604 */ 605 - const uint32_t unavailable_masks[] = { 605 + const u32 unavailable_masks[] = { 606 606 0x0, 607 607 0xffffffffu, 608 608 0xaaaaaaaau,
+10 -10
tools/testing/selftests/kvm/x86/pmu_event_filter_test.c
··· 75 75 * 76 76 * Return on success. GUEST_SYNC(0) on error. 77 77 */ 78 - static void check_msr(uint32_t msr, u64 bits_to_flip) 78 + static void check_msr(u32 msr, u64 bits_to_flip) 79 79 { 80 80 u64 v = rdmsr(msr) ^ bits_to_flip; 81 81 ··· 89 89 GUEST_SYNC(-EIO); 90 90 } 91 91 92 - static void run_and_measure_loop(uint32_t msr_base) 92 + static void run_and_measure_loop(u32 msr_base) 93 93 { 94 94 const u64 branches_retired = rdmsr(msr_base + 0); 95 95 const u64 insn_retired = rdmsr(msr_base + 1); ··· 378 378 379 379 static bool supports_event_mem_inst_retired(void) 380 380 { 381 - uint32_t eax, ebx, ecx, edx; 381 + u32 eax, ebx, ecx, edx; 382 382 383 383 cpuid(1, &eax, &ebx, &ecx, &edx); 384 384 if (x86_family(eax) == 0x6) { ··· 415 415 #define EXCLUDE_MASKED_ENTRY(event_select, mask, match) \ 416 416 KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, true) 417 417 418 - static void masked_events_guest_test(uint32_t msr_base) 418 + static void masked_events_guest_test(u32 msr_base) 419 419 { 420 420 /* 421 421 * The actual value of the counters don't determine the outcome of ··· 499 499 u64 amd_events[MAX_TEST_EVENTS]; 500 500 u64 amd_event_end; 501 501 const char *msg; 502 - uint32_t flags; 502 + u32 flags; 503 503 }; 504 504 505 505 /* ··· 669 669 } 670 670 671 671 static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, u64 event, 672 - uint32_t flags, uint32_t action) 672 + u32 flags, u32 action) 673 673 { 674 674 struct __kvm_pmu_event_filter f = { 675 675 .nevents = 1, ··· 746 746 } 747 747 748 748 static u64 test_with_fixed_counter_filter(struct kvm_vcpu *vcpu, 749 - uint32_t action, uint32_t bitmap) 749 + u32 action, u32 bitmap) 750 750 { 751 751 struct __kvm_pmu_event_filter f = { 752 752 .action = action, ··· 758 758 } 759 759 760 760 static u64 test_set_gp_and_fixed_event_filter(struct kvm_vcpu *vcpu, 761 - uint32_t action, 762 - uint32_t bitmap) 761 + u32 action, 762 + u32 bitmap) 763 763 { 764 764 struct __kvm_pmu_event_filter f = base_event_filter; 765 765 ··· 774 774 uint8_t nr_fixed_counters) 775 775 { 776 776 unsigned int i; 777 - uint32_t bitmap; 777 + u32 bitmap; 778 778 u64 count; 779 779 780 780 TEST_ASSERT(nr_fixed_counters < sizeof(bitmap) * 8,
+4 -4
tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
··· 366 366 } 367 367 } 368 368 369 - static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t nr_vcpus, 370 - uint32_t nr_memslots) 369 + static void test_mem_conversions(enum vm_mem_backing_src_type src_type, u32 nr_vcpus, 370 + u32 nr_memslots) 371 371 { 372 372 /* 373 373 * Allocate enough memory so that each vCPU's chunk of memory can be ··· 450 450 int main(int argc, char *argv[]) 451 451 { 452 452 enum vm_mem_backing_src_type src_type = DEFAULT_VM_MEM_SRC; 453 - uint32_t nr_memslots = 1; 454 - uint32_t nr_vcpus = 1; 453 + u32 nr_memslots = 1; 454 + u32 nr_vcpus = 1; 455 455 int opt; 456 456 457 457 TEST_REQUIRE(kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM));
+4 -4
tools/testing/selftests/kvm/x86/private_mem_kvm_exits_test.c
··· 27 27 return value; 28 28 } 29 29 30 - static uint32_t run_vcpu_get_exit_reason(struct kvm_vcpu *vcpu) 30 + static u32 run_vcpu_get_exit_reason(struct kvm_vcpu *vcpu) 31 31 { 32 32 int r; 33 33 ··· 50 50 struct kvm_vcpu *vcpu; 51 51 pthread_t vm_thread; 52 52 void *thread_return; 53 - uint32_t exit_reason; 53 + u32 exit_reason; 54 54 55 55 vm = vm_create_shape_with_one_vcpu(protected_vm_shape, &vcpu, 56 56 guest_repeatedly_read); ··· 72 72 vm_mem_region_delete(vm, EXITS_TEST_SLOT); 73 73 74 74 pthread_join(vm_thread, &thread_return); 75 - exit_reason = (uint32_t)(u64)thread_return; 75 + exit_reason = (u32)(u64)thread_return; 76 76 77 77 TEST_ASSERT_EQ(exit_reason, KVM_EXIT_MEMORY_FAULT); 78 78 TEST_ASSERT_EQ(vcpu->run->memory_fault.flags, KVM_MEMORY_EXIT_FLAG_PRIVATE); ··· 86 86 { 87 87 struct kvm_vm *vm; 88 88 struct kvm_vcpu *vcpu; 89 - uint32_t exit_reason; 89 + u32 exit_reason; 90 90 91 91 vm = vm_create_shape_with_one_vcpu(protected_vm_shape, &vcpu, 92 92 guest_repeatedly_read);
+3 -3
tools/testing/selftests/kvm/x86/set_boot_cpu_id.c
··· 86 86 } 87 87 } 88 88 89 - static struct kvm_vm *create_vm(uint32_t nr_vcpus, uint32_t bsp_vcpu_id, 89 + static struct kvm_vm *create_vm(u32 nr_vcpus, u32 bsp_vcpu_id, 90 90 struct kvm_vcpu *vcpus[]) 91 91 { 92 92 struct kvm_vm *vm; 93 - uint32_t i; 93 + u32 i; 94 94 95 95 vm = vm_create(nr_vcpus); 96 96 ··· 104 104 return vm; 105 105 } 106 106 107 - static void run_vm_bsp(uint32_t bsp_vcpu_id) 107 + static void run_vm_bsp(u32 bsp_vcpu_id) 108 108 { 109 109 struct kvm_vcpu *vcpus[2]; 110 110 struct kvm_vm *vm;
+2 -2
tools/testing/selftests/kvm/x86/sev_init2_tests.c
··· 94 94 "VM type is KVM_X86_SW_PROTECTED_VM"); 95 95 } 96 96 97 - void test_flags(uint32_t vm_type) 97 + void test_flags(u32 vm_type) 98 98 { 99 99 int i; 100 100 ··· 104 104 "invalid flag"); 105 105 } 106 106 107 - void test_features(uint32_t vm_type, u64 supported_features) 107 + void test_features(u32 vm_type, u64 supported_features) 108 108 { 109 109 int i; 110 110
+5 -5
tools/testing/selftests/kvm/x86/sev_smoke_test.c
··· 13 13 #include "linux/psp-sev.h" 14 14 #include "sev.h" 15 15 16 - static void guest_sev_test_msr(uint32_t msr) 16 + static void guest_sev_test_msr(u32 msr) 17 17 { 18 18 u64 val = rdmsr(msr); 19 19 ··· 104 104 abort(); 105 105 } 106 106 107 - static void test_sync_vmsa(uint32_t type, u64 policy) 107 + static void test_sync_vmsa(u32 type, u64 policy) 108 108 { 109 109 struct kvm_vcpu *vcpu; 110 110 struct kvm_vm *vm; ··· 150 150 kvm_vm_free(vm); 151 151 } 152 152 153 - static void test_sev(void *guest_code, uint32_t type, u64 policy) 153 + static void test_sev(void *guest_code, u32 type, u64 policy) 154 154 { 155 155 struct kvm_vcpu *vcpu; 156 156 struct kvm_vm *vm; ··· 201 201 __asm__ __volatile__("ud2"); 202 202 } 203 203 204 - static void test_sev_shutdown(uint32_t type, u64 policy) 204 + static void test_sev_shutdown(u32 type, u64 policy) 205 205 { 206 206 struct kvm_vcpu *vcpu; 207 207 struct kvm_vm *vm; ··· 218 218 kvm_vm_free(vm); 219 219 } 220 220 221 - static void test_sev_smoke(void *guest, uint32_t type, u64 policy) 221 + static void test_sev_smoke(void *guest, u32 type, u64 policy) 222 222 { 223 223 const u64 xf_mask = XFEATURE_MASK_X87_AVX; 224 224
+1 -1
tools/testing/selftests/kvm/x86/ucna_injection_test.c
··· 251 251 vcpu_ioctl(vcpu, KVM_X86_SETUP_MCE, &mcg_caps); 252 252 } 253 253 254 - static struct kvm_vcpu *create_vcpu_with_mce_cap(struct kvm_vm *vm, uint32_t vcpuid, 254 + static struct kvm_vcpu *create_vcpu_with_mce_cap(struct kvm_vm *vm, u32 vcpuid, 255 255 bool enable_cmci_p, void *guest_code) 256 256 { 257 257 struct kvm_vcpu *vcpu = vm_vcpu_add(vm, vcpuid, guest_code);
+14 -14
tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c
··· 142 142 * Note: Force test_rdmsr() to not be inlined to prevent the labels, 143 143 * rdmsr_start and rdmsr_end, from being defined multiple times. 144 144 */ 145 - static noinline u64 test_rdmsr(uint32_t msr) 145 + static noinline u64 test_rdmsr(u32 msr) 146 146 { 147 - uint32_t a, d; 147 + u32 a, d; 148 148 149 149 guest_exception_count = 0; 150 150 ··· 158 158 * Note: Force test_wrmsr() to not be inlined to prevent the labels, 159 159 * wrmsr_start and wrmsr_end, from being defined multiple times. 160 160 */ 161 - static noinline void test_wrmsr(uint32_t msr, u64 value) 161 + static noinline void test_wrmsr(u32 msr, u64 value) 162 162 { 163 - uint32_t a = value; 164 - uint32_t d = value >> 32; 163 + u32 a = value; 164 + u32 d = value >> 32; 165 165 166 166 guest_exception_count = 0; 167 167 ··· 176 176 * Note: Force test_em_rdmsr() to not be inlined to prevent the labels, 177 177 * rdmsr_start and rdmsr_end, from being defined multiple times. 178 178 */ 179 - static noinline u64 test_em_rdmsr(uint32_t msr) 179 + static noinline u64 test_em_rdmsr(u32 msr) 180 180 { 181 - uint32_t a, d; 181 + u32 a, d; 182 182 183 183 guest_exception_count = 0; 184 184 ··· 192 192 * Note: Force test_em_wrmsr() to not be inlined to prevent the labels, 193 193 * wrmsr_start and wrmsr_end, from being defined multiple times. 194 194 */ 195 - static noinline void test_em_wrmsr(uint32_t msr, u64 value) 195 + static noinline void test_em_wrmsr(u32 msr, u64 value) 196 196 { 197 - uint32_t a = value; 198 - uint32_t d = value >> 32; 197 + u32 a = value; 198 + u32 d = value >> 32; 199 199 200 200 guest_exception_count = 0; 201 201 ··· 391 391 } 392 392 } 393 393 394 - static void process_rdmsr(struct kvm_vcpu *vcpu, uint32_t msr_index) 394 + static void process_rdmsr(struct kvm_vcpu *vcpu, u32 msr_index) 395 395 { 396 396 struct kvm_run *run = vcpu->run; 397 397 ··· 423 423 } 424 424 } 425 425 426 - static void process_wrmsr(struct kvm_vcpu *vcpu, uint32_t msr_index) 426 + static void process_wrmsr(struct kvm_vcpu *vcpu, u32 msr_index) 427 427 { 428 428 struct kvm_run *run = vcpu->run; 429 429 ··· 489 489 } 490 490 491 491 static void run_guest_then_process_rdmsr(struct kvm_vcpu *vcpu, 492 - uint32_t msr_index) 492 + u32 msr_index) 493 493 { 494 494 vcpu_run(vcpu); 495 495 process_rdmsr(vcpu, msr_index); 496 496 } 497 497 498 498 static void run_guest_then_process_wrmsr(struct kvm_vcpu *vcpu, 499 - uint32_t msr_index) 499 + u32 msr_index) 500 500 { 501 501 vcpu_run(vcpu); 502 502 process_wrmsr(vcpu, msr_index);
+1 -1
tools/testing/selftests/kvm/x86/vmx_apic_access_test.c
··· 38 38 { 39 39 #define L2_GUEST_STACK_SIZE 64 40 40 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 41 - uint32_t control; 41 + u32 control; 42 42 43 43 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 44 44 GUEST_ASSERT(load_vmcs(vmx_pages));
+1 -1
tools/testing/selftests/kvm/x86/vmx_apicv_updates_test.c
··· 33 33 { 34 34 #define L2_GUEST_STACK_SIZE 64 35 35 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 36 - uint32_t control; 36 + u32 control; 37 37 38 38 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 39 39 GUEST_ASSERT(load_vmcs(vmx_pages));
+3 -5
tools/testing/selftests/kvm/x86/vmx_msrs_test.c
··· 12 12 #include "kvm_util.h" 13 13 #include "vmx.h" 14 14 15 - static void vmx_fixed1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index, 16 - u64 mask) 15 + static void vmx_fixed1_msr_test(struct kvm_vcpu *vcpu, u32 msr_index, u64 mask) 17 16 { 18 17 u64 val = vcpu_get_msr(vcpu, msr_index); 19 18 u64 bit; ··· 25 26 } 26 27 } 27 28 28 - static void vmx_fixed0_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index, 29 - u64 mask) 29 + static void vmx_fixed0_msr_test(struct kvm_vcpu *vcpu, u32 msr_index, u64 mask) 30 30 { 31 31 u64 val = vcpu_get_msr(vcpu, msr_index); 32 32 u64 bit; ··· 38 40 } 39 41 } 40 42 41 - static void vmx_fixed0and1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index) 43 + static void vmx_fixed0and1_msr_test(struct kvm_vcpu *vcpu, u32 msr_index) 42 44 { 43 45 vmx_fixed0_msr_test(vcpu, msr_index, GENMASK_ULL(31, 0)); 44 46 vmx_fixed1_msr_test(vcpu, msr_index, GENMASK_ULL(63, 32));
+8 -8
tools/testing/selftests/kvm/x86/xapic_ipi_test.c
··· 52 52 53 53 /* Data struct shared between host main thread and vCPUs */ 54 54 struct test_data_page { 55 - uint32_t halter_apic_id; 55 + u32 halter_apic_id; 56 56 volatile u64 hlt_count; 57 57 volatile u64 wake_count; 58 58 u64 ipis_sent; 59 59 u64 migrations_attempted; 60 60 u64 migrations_completed; 61 - uint32_t icr; 62 - uint32_t icr2; 63 - uint32_t halter_tpr; 64 - uint32_t halter_ppr; 61 + u32 icr; 62 + u32 icr2; 63 + u32 halter_tpr; 64 + u32 halter_ppr; 65 65 66 66 /* 67 67 * Record local version register as a cross-check that APIC access ··· 69 69 * arch/x86/kvm/lapic.c). If test is failing, check that values match 70 70 * to determine whether APIC access exits are working. 71 71 */ 72 - uint32_t halter_lvr; 72 + u32 halter_lvr; 73 73 }; 74 74 75 75 struct thread_params { ··· 128 128 u64 last_wake_count; 129 129 u64 last_hlt_count; 130 130 u64 last_ipis_rcvd_count; 131 - uint32_t icr_val; 132 - uint32_t icr2_val; 131 + u32 icr_val; 132 + u32 icr2_val; 133 133 u64 tsc_start; 134 134 135 135 verify_apic_base_addr();
+2 -2
tools/testing/selftests/kvm/x86/xapic_state_test.c
··· 144 144 145 145 static void __test_apic_id(struct kvm_vcpu *vcpu, u64 apic_base) 146 146 { 147 - uint32_t apic_id, expected; 147 + u32 apic_id, expected; 148 148 struct kvm_lapic_state xapic; 149 149 150 150 vcpu_set_msr(vcpu, MSR_IA32_APICBASE, apic_base); ··· 170 170 */ 171 171 static void test_apic_id(void) 172 172 { 173 - const uint32_t NR_VCPUS = 3; 173 + const u32 NR_VCPUS = 3; 174 174 struct kvm_vcpu *vcpus[NR_VCPUS]; 175 175 u64 apic_base; 176 176 struct kvm_vm *vm;
+3 -3
tools/testing/selftests/kvm/x86/xapic_tpr_test.c
··· 58 58 if (is_x2apic) { 59 59 x2apic_write_reg(APIC_SELF_IPI, IRQ_VECTOR); 60 60 } else { 61 - uint32_t icr, icr2; 61 + u32 icr, icr2; 62 62 63 63 icr = APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 64 64 IRQ_VECTOR; ··· 71 71 72 72 static uint8_t tpr_guest_tpr_get(void) 73 73 { 74 - uint32_t taskpri; 74 + u32 taskpri; 75 75 76 76 if (is_x2apic) 77 77 taskpri = x2apic_read_reg(APIC_TASKPRI); ··· 83 83 84 84 static uint8_t tpr_guest_ppr_get(void) 85 85 { 86 - uint32_t procpri; 86 + u32 procpri; 87 87 88 88 if (is_x2apic) 89 89 procpri = x2apic_read_reg(APIC_PROCPRI);
+3 -3
tools/testing/selftests/kvm/x86/xen_shinfo_test.c
··· 116 116 } __attribute__((__packed__)); 117 117 118 118 struct vcpu_runstate_info { 119 - uint32_t state; 119 + u32 state; 120 120 u64 state_entry_time; 121 121 u64 time[5]; /* Extra field for overrun check */ 122 122 }; 123 123 124 124 struct compat_vcpu_runstate_info { 125 - uint32_t state; 125 + u32 state; 126 126 u64 state_entry_time; 127 127 u64 time[5]; 128 128 } __attribute__((__packed__)); ··· 145 145 unsigned long evtchn_pending[64]; 146 146 unsigned long evtchn_mask[64]; 147 147 struct pvclock_wall_clock wc; 148 - uint32_t wc_sec_hi; 148 + u32 wc_sec_hi; 149 149 /* arch_shared_info here */ 150 150 }; 151 151