Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

libperf cpumap: Make index and nr types unsigned

The index into the cpumap array and the number of entries within the
array can never be negative, so let's make them unsigned. This is
prompted by reports that gcc 13 with -O6 is giving a
alloc-size-larger-than errors. The change makes the cpumap changes and
then updates the declaration of index variables throughout perf and
libperf to be unsigned. The two things are hard to separate as
compiler warnings about mixing signed and unsigned types breaks the
build.

Reported-by: Chingbin Li <liqb365@163.com>
Closes: https://lore.kernel.org/lkml/20260212025127.841090-1-liqb365@163.com/
Tested-by: Chingbin Li <liqb365@163.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>

authored by

Ian Rogers and committed by
Namhyung Kim
83c33836 7f8969aa

+108 -91
+23 -26
tools/lib/perf/cpumap.c
··· 15 15 16 16 #define MAX_NR_CPUS 4096 17 17 18 - void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus) 18 + void perf_cpu_map__set_nr(struct perf_cpu_map *map, unsigned int nr_cpus) 19 19 { 20 20 RC_CHK_ACCESS(map)->nr = nr_cpus; 21 21 } 22 22 23 - struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus) 23 + struct perf_cpu_map *perf_cpu_map__alloc(unsigned int nr_cpus) 24 24 { 25 25 RC_STRUCT(perf_cpu_map) *cpus; 26 26 struct perf_cpu_map *result; ··· 78 78 static struct perf_cpu_map *cpu_map__new_sysconf(void) 79 79 { 80 80 struct perf_cpu_map *cpus; 81 - int nr_cpus, nr_cpus_conf; 81 + long nr_cpus, nr_cpus_conf; 82 82 83 83 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); 84 84 if (nr_cpus < 0) ··· 86 86 87 87 nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF); 88 88 if (nr_cpus != nr_cpus_conf) { 89 - pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.", 89 + pr_warning("Number of online CPUs (%ld) differs from the number configured (%ld) the CPU map will only cover the first %ld CPUs.", 90 90 nr_cpus, nr_cpus_conf, nr_cpus); 91 91 } 92 92 93 93 cpus = perf_cpu_map__alloc(nr_cpus); 94 94 if (cpus != NULL) { 95 - int i; 96 - 97 - for (i = 0; i < nr_cpus; ++i) 95 + for (long i = 0; i < nr_cpus; ++i) 98 96 RC_CHK_ACCESS(cpus)->map[i].cpu = i; 99 97 } 100 98 ··· 130 132 return cpu_a->cpu - cpu_b->cpu; 131 133 } 132 134 133 - static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx) 135 + static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, unsigned int idx) 134 136 { 135 137 return RC_CHK_ACCESS(cpus)->map[idx]; 136 138 } 137 139 138 - static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus) 140 + static struct perf_cpu_map *cpu_map__trim_new(unsigned int nr_cpus, const struct perf_cpu *tmp_cpus) 139 141 { 140 142 size_t payload_size = nr_cpus * sizeof(struct perf_cpu); 141 143 struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus); 142 - int i, j; 143 144 144 145 if (cpus != NULL) { 146 + unsigned int j = 0; 147 + 145 148 memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size); 146 149 qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu); 147 150 /* Remove dups */ 148 - j = 0; 149 - for (i = 0; i < nr_cpus; i++) { 151 + for (unsigned int i = 0; i < nr_cpus; i++) { 150 152 if (i == 0 || 151 153 __perf_cpu_map__cpu(cpus, i).cpu != 152 154 __perf_cpu_map__cpu(cpus, i - 1).cpu) { ··· 165 167 struct perf_cpu_map *cpus = NULL; 166 168 unsigned long start_cpu, end_cpu = 0; 167 169 char *p = NULL; 168 - int i, nr_cpus = 0; 170 + unsigned int nr_cpus = 0, max_entries = 0; 169 171 struct perf_cpu *tmp_cpus = NULL, *tmp; 170 - int max_entries = 0; 171 172 172 173 if (!cpu_list) 173 174 return perf_cpu_map__new_online_cpus(); ··· 205 208 206 209 for (; start_cpu <= end_cpu; start_cpu++) { 207 210 /* check for duplicates */ 208 - for (i = 0; i < nr_cpus; i++) 211 + for (unsigned int i = 0; i < nr_cpus; i++) { 209 212 if (tmp_cpus[i].cpu == (int16_t)start_cpu) 210 213 goto invalid; 214 + } 211 215 212 216 if (nr_cpus == max_entries) { 213 217 max_entries += max(end_cpu - start_cpu + 1, 16UL); ··· 250 252 return cpus; 251 253 } 252 254 253 - static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus) 255 + static unsigned int __perf_cpu_map__nr(const struct perf_cpu_map *cpus) 254 256 { 255 257 return RC_CHK_ACCESS(cpus)->nr; 256 258 } 257 259 258 - struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx) 260 + struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, unsigned int idx) 259 261 { 260 262 struct perf_cpu result = { 261 263 .cpu = -1 ··· 267 269 return result; 268 270 } 269 271 270 - int perf_cpu_map__nr(const struct perf_cpu_map *cpus) 272 + unsigned int perf_cpu_map__nr(const struct perf_cpu_map *cpus) 271 273 { 272 274 return cpus ? __perf_cpu_map__nr(cpus) : 1; 273 275 } ··· 292 294 293 295 int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu) 294 296 { 295 - int low, high; 297 + unsigned int low, high; 296 298 297 299 if (!cpus) 298 300 return -1; ··· 322 324 323 325 bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_map *rhs) 324 326 { 325 - int nr; 327 + unsigned int nr; 326 328 327 329 if (lhs == rhs) 328 330 return true; ··· 334 336 if (nr != __perf_cpu_map__nr(rhs)) 335 337 return false; 336 338 337 - for (int idx = 0; idx < nr; idx++) { 339 + for (unsigned int idx = 0; idx < nr; idx++) { 338 340 if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu) 339 341 return false; 340 342 } ··· 351 353 struct perf_cpu cpu, result = { 352 354 .cpu = -1 353 355 }; 354 - int idx; 356 + unsigned int idx; 355 357 356 358 perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) { 357 359 result = cpu; ··· 382 384 if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a)) 383 385 return false; 384 386 385 - for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) { 387 + for (unsigned int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) { 386 388 if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu) 387 389 return false; 388 390 if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) { ··· 408 410 int perf_cpu_map__merge(struct perf_cpu_map **orig, struct perf_cpu_map *other) 409 411 { 410 412 struct perf_cpu *tmp_cpus; 411 - int tmp_len; 412 - int i, j, k; 413 + unsigned int tmp_len, i, j, k; 413 414 struct perf_cpu_map *merged; 414 415 415 416 if (perf_cpu_map__is_subset(*orig, other)) ··· 452 455 struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig, 453 456 struct perf_cpu_map *other) 454 457 { 455 - int i, j, k; 458 + unsigned int i, j, k; 456 459 struct perf_cpu_map *merged; 457 460 458 461 if (perf_cpu_map__is_subset(other, orig))
+6 -4
tools/lib/perf/evsel.c
··· 127 127 struct perf_thread_map *threads) 128 128 { 129 129 struct perf_cpu cpu; 130 - int idx, thread, err = 0; 130 + unsigned int idx; 131 + int thread, err = 0; 131 132 132 133 if (cpus == NULL) { 133 134 static struct perf_cpu_map *empty_cpu_map; ··· 461 460 int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread) 462 461 { 463 462 struct perf_cpu cpu __maybe_unused; 464 - int idx; 463 + unsigned int idx; 465 464 int err; 466 465 467 466 perf_cpu_map__for_each_cpu(cpu, idx, evsel->cpus) { ··· 500 499 501 500 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter) 502 501 { 503 - int err = 0, i; 502 + int err = 0; 504 503 505 - for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++) 504 + for (unsigned int i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++) { 506 505 err = perf_evsel__run_ioctl(evsel, 507 506 PERF_EVENT_IOC_SET_FILTER, 508 507 (void *)filter, i); 508 + } 509 509 return err; 510 510 } 511 511
+3 -3
tools/lib/perf/include/internal/cpumap.h
··· 16 16 DECLARE_RC_STRUCT(perf_cpu_map) { 17 17 refcount_t refcnt; 18 18 /** Length of the map array. */ 19 - int nr; 19 + unsigned int nr; 20 20 /** The CPU values. */ 21 21 struct perf_cpu map[]; 22 22 }; 23 23 24 - struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus); 24 + struct perf_cpu_map *perf_cpu_map__alloc(unsigned int nr_cpus); 25 25 int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu); 26 26 bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b); 27 27 28 - void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus); 28 + void perf_cpu_map__set_nr(struct perf_cpu_map *map, unsigned int nr_cpus); 29 29 30 30 static inline refcount_t *perf_cpu_map__refcnt(struct perf_cpu_map *map) 31 31 {
+2 -2
tools/lib/perf/include/perf/cpumap.h
··· 49 49 * perf_cpu_map__cpu - get the CPU value at the given index. Returns -1 if index 50 50 * is invalid. 51 51 */ 52 - LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); 52 + LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, unsigned int idx); 53 53 /** 54 54 * perf_cpu_map__nr - for an empty map returns 1, as perf_cpu_map__cpu returns a 55 55 * cpu of -1 for an invalid index, this makes an empty map ··· 57 57 * the result is the number CPUs in the map plus one if the 58 58 * "any CPU"/dummy value is present. 59 59 */ 60 - LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus); 60 + LIBPERF_API unsigned int perf_cpu_map__nr(const struct perf_cpu_map *cpus); 61 61 /** 62 62 * perf_cpu_map__has_any_cpu_or_is_empty - is map either empty or has the "any CPU"/dummy value. 63 63 */
+4 -3
tools/perf/arch/arm/util/cs-etm.c
··· 197 197 static int cs_etm_validate_config(struct perf_pmu *cs_etm_pmu, 198 198 struct evsel *evsel) 199 199 { 200 - int idx, err = 0; 200 + unsigned int idx; 201 + int err = 0; 201 202 struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus; 202 203 struct perf_cpu_map *intersect_cpus; 203 204 struct perf_cpu cpu; ··· 547 546 cs_etm_info_priv_size(struct auxtrace_record *itr, 548 547 struct evlist *evlist) 549 548 { 550 - int idx; 549 + unsigned int idx; 551 550 int etmv3 = 0, etmv4 = 0, ete = 0; 552 551 struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus; 553 552 struct perf_cpu_map *intersect_cpus; ··· 784 783 struct perf_record_auxtrace_info *info, 785 784 size_t priv_size) 786 785 { 787 - int i; 786 + unsigned int i; 788 787 u32 offset; 789 788 u64 nr_cpu, type; 790 789 struct perf_cpu_map *cpu_map;
+2 -1
tools/perf/arch/arm64/util/arm-spe.c
··· 144 144 struct perf_record_auxtrace_info *auxtrace_info, 145 145 size_t priv_size) 146 146 { 147 - int i, ret; 147 + unsigned int i; 148 + int ret; 148 149 size_t offset; 149 150 struct arm_spe_recording *sper = 150 151 container_of(itr, struct arm_spe_recording, itr);
+1 -1
tools/perf/arch/arm64/util/header.c
··· 43 43 int get_cpuid(char *buf, size_t sz, struct perf_cpu cpu) 44 44 { 45 45 struct perf_cpu_map *cpus; 46 - int idx; 46 + unsigned int idx; 47 47 48 48 if (cpu.cpu != -1) 49 49 return _get_cpuid(buf, sz, cpu);
+2 -1
tools/perf/arch/x86/util/pmu.c
··· 221 221 static struct perf_cpu_map *cha_adjusted[MAX_SNCS]; 222 222 static struct perf_cpu_map *imc_adjusted[MAX_SNCS]; 223 223 struct perf_cpu_map **adjusted = cha ? cha_adjusted : imc_adjusted; 224 - int idx, pmu_snc, cpu_adjust; 224 + unsigned int idx; 225 + int pmu_snc, cpu_adjust; 225 226 struct perf_cpu cpu; 226 227 bool alloc; 227 228
+3 -3
tools/perf/builtin-c2c.c
··· 2310 2310 { 2311 2311 struct numa_node *n; 2312 2312 unsigned long **nodes; 2313 - int node, idx; 2314 2313 struct perf_cpu cpu; 2315 2314 int *cpu2node; 2316 2315 struct perf_env *env = perf_session__env(session); ··· 2334 2335 if (!cpu2node) 2335 2336 return -ENOMEM; 2336 2337 2337 - for (idx = 0; idx < c2c.cpus_cnt; idx++) 2338 + for (int idx = 0; idx < c2c.cpus_cnt; idx++) 2338 2339 cpu2node[idx] = -1; 2339 2340 2340 2341 c2c.cpu2node = cpu2node; 2341 2342 2342 - for (node = 0; node < c2c.nodes_cnt; node++) { 2343 + for (int node = 0; node < c2c.nodes_cnt; node++) { 2343 2344 struct perf_cpu_map *map = n[node].map; 2344 2345 unsigned long *set; 2346 + unsigned int idx; 2345 2347 2346 2348 set = bitmap_zalloc(c2c.cpus_cnt); 2347 2349 if (!set)
+1 -1
tools/perf/builtin-record.c
··· 3663 3663 static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus) 3664 3664 { 3665 3665 struct perf_cpu cpu; 3666 - int idx; 3666 + unsigned int idx; 3667 3667 3668 3668 if (cpu_map__is_dummy(cpus)) 3669 3669 return 0;
+3 -2
tools/perf/builtin-script.c
··· 2572 2572 static void __process_stat(struct evsel *counter, u64 tstamp) 2573 2573 { 2574 2574 int nthreads = perf_thread_map__nr(counter->core.threads); 2575 - int idx, thread; 2576 2575 struct perf_cpu cpu; 2577 2576 static int header_printed; 2578 2577 ··· 2581 2582 header_printed = 1; 2582 2583 } 2583 2584 2584 - for (thread = 0; thread < nthreads; thread++) { 2585 + for (int thread = 0; thread < nthreads; thread++) { 2586 + unsigned int idx; 2587 + 2585 2588 perf_cpu_map__for_each_cpu(cpu, idx, evsel__cpus(counter)) { 2586 2589 struct perf_counts_values *counts; 2587 2590
+1 -1
tools/perf/builtin-stat.c
··· 410 410 struct evsel *counter; 411 411 412 412 evlist__for_each_entry(evsel_list, counter) { 413 - int idx; 413 + unsigned int idx; 414 414 415 415 if (!evsel__is_tool(counter)) 416 416 continue;
+1 -1
tools/perf/tests/bitmap.c
··· 16 16 bm = bitmap_zalloc(nbits); 17 17 18 18 if (map && bm) { 19 - int i; 19 + unsigned int i; 20 20 struct perf_cpu cpu; 21 21 22 22 perf_cpu_map__for_each_cpu(cpu, i, map)
+4 -2
tools/perf/tests/cpumap.c
··· 156 156 return 0; 157 157 } 158 158 159 - static int __test__cpu_map_merge(const char *lhs, const char *rhs, int nr, const char *expected) 159 + static int __test__cpu_map_merge(const char *lhs, const char *rhs, unsigned int nr, 160 + const char *expected) 160 161 { 161 162 struct perf_cpu_map *a = perf_cpu_map__new(lhs); 162 163 struct perf_cpu_map *b = perf_cpu_map__new(rhs); ··· 205 204 return ret; 206 205 } 207 206 208 - static int __test__cpu_map_intersect(const char *lhs, const char *rhs, int nr, const char *expected) 207 + static int __test__cpu_map_intersect(const char *lhs, const char *rhs, unsigned int nr, 208 + const char *expected) 209 209 { 210 210 struct perf_cpu_map *a = perf_cpu_map__new(lhs); 211 211 struct perf_cpu_map *b = perf_cpu_map__new(rhs);
+1 -1
tools/perf/tests/mem2node.c
··· 30 30 31 31 if (map && bm) { 32 32 struct perf_cpu cpu; 33 - int i; 33 + unsigned int i; 34 34 35 35 perf_cpu_map__for_each_cpu(cpu, i, map) 36 36 __set_bit(cpu.cpu, bm);
+2 -1
tools/perf/tests/openat-syscall-all-cpus.c
··· 22 22 static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __maybe_unused, 23 23 int subtest __maybe_unused) 24 24 { 25 - int err = TEST_FAIL, fd, idx; 25 + int err = TEST_FAIL, fd; 26 + unsigned int idx; 26 27 struct perf_cpu cpu; 27 28 struct perf_cpu_map *cpus; 28 29 struct evsel *evsel;
+2 -2
tools/perf/tests/topology.c
··· 69 69 .path = path, 70 70 .mode = PERF_DATA_MODE_READ, 71 71 }; 72 - int i; 72 + unsigned int i; 73 73 struct aggr_cpu_id id; 74 74 struct perf_cpu cpu; 75 75 struct perf_env *env; ··· 116 116 117 117 TEST_ASSERT_VAL("Session header CPU map not set", env->cpu); 118 118 119 - for (i = 0; i < env->nr_cpus_avail; i++) { 119 + for (i = 0; i < (unsigned int)env->nr_cpus_avail; i++) { 120 120 cpu.cpu = i; 121 121 if (!perf_cpu_map__has(map, cpu)) 122 122 continue;
+1 -1
tools/perf/util/affinity.c
··· 90 90 int cpu_set_size = get_cpu_set_size(); 91 91 unsigned long *cpuset = bitmap_zalloc(cpu_set_size * 8); 92 92 struct perf_cpu cpu; 93 - int idx; 93 + unsigned int idx; 94 94 95 95 if (!cpuset) 96 96 return;
+13 -11
tools/perf/util/bpf_counter.c
··· 294 294 struct perf_counts_values *counts; 295 295 int reading_map_fd; 296 296 __u32 key = 0; 297 - int err, idx, bpf_cpu; 297 + int err, bpf_cpu; 298 + unsigned int idx; 298 299 299 300 if (list_empty(&evsel->bpf_counter_list)) 300 301 return -EAGAIN; ··· 319 318 } 320 319 321 320 for (bpf_cpu = 0; bpf_cpu < num_cpu_bpf; bpf_cpu++) { 322 - idx = perf_cpu_map__idx(evsel__cpus(evsel), 323 - (struct perf_cpu){.cpu = bpf_cpu}); 324 - if (idx == -1) 321 + int i = perf_cpu_map__idx(evsel__cpus(evsel), 322 + (struct perf_cpu){.cpu = bpf_cpu}); 323 + 324 + if (i == -1) 325 325 continue; 326 - counts = perf_counts(evsel->counts, idx, 0); 326 + counts = perf_counts(evsel->counts, i, 0); 327 327 counts->val += values[bpf_cpu].counter; 328 328 counts->ena += values[bpf_cpu].enabled; 329 329 counts->run += values[bpf_cpu].running; ··· 670 668 static int bperf_sync_counters(struct evsel *evsel) 671 669 { 672 670 struct perf_cpu cpu; 673 - int idx; 671 + unsigned int idx; 674 672 675 673 perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) 676 674 bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu.cpu); ··· 697 695 struct bpf_perf_event_value values[num_cpu_bpf]; 698 696 struct perf_counts_values *counts; 699 697 int reading_map_fd, err = 0; 700 - __u32 i; 701 - int j; 702 698 703 699 bperf_sync_counters(evsel); 704 700 reading_map_fd = bpf_map__fd(skel->maps.accum_readings); 705 701 706 - for (i = 0; i < filter_entry_cnt; i++) { 702 + for (__u32 i = 0; i < filter_entry_cnt; i++) { 707 703 struct perf_cpu entry; 708 704 __u32 cpu; 709 705 ··· 709 709 if (err) 710 710 goto out; 711 711 switch (evsel->follower_skel->bss->type) { 712 - case BPERF_FILTER_GLOBAL: 713 - assert(i == 0); 712 + case BPERF_FILTER_GLOBAL: { 713 + unsigned int j; 714 714 715 + assert(i == 0); 715 716 perf_cpu_map__for_each_cpu(entry, j, evsel__cpus(evsel)) { 716 717 counts = perf_counts(evsel->counts, j, 0); 717 718 counts->val = values[entry.cpu].counter; ··· 720 719 counts->run = values[entry.cpu].running; 721 720 } 722 721 break; 722 + } 723 723 case BPERF_FILTER_CPU: 724 724 cpu = perf_cpu_map__cpu(evsel__cpus(evsel), i).cpu; 725 725 assert(cpu >= 0);
+5 -3
tools/perf/util/bpf_counter_cgroup.c
··· 98 98 struct bpf_link *link; 99 99 struct evsel *evsel; 100 100 struct cgroup *cgrp, *leader_cgrp; 101 - int i, j; 101 + unsigned int i; 102 102 struct perf_cpu cpu; 103 103 int total_cpus = cpu__max_cpu().cpu; 104 104 int map_fd, prog_fd, err; ··· 146 146 147 147 evlist__for_each_entry(evlist, evsel) { 148 148 if (cgrp == NULL || evsel->cgrp == leader_cgrp) { 149 + unsigned int j; 150 + 149 151 leader_cgrp = evsel->cgrp; 150 152 evsel->cgrp = NULL; 151 153 ··· 236 234 static int bperf_cgrp__sync_counters(struct evlist *evlist) 237 235 { 238 236 struct perf_cpu cpu; 239 - int idx; 237 + unsigned int idx; 240 238 int prog_fd = bpf_program__fd(skel->progs.trigger_read); 241 239 242 240 perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus) ··· 288 286 289 287 evlist__for_each_entry(evlist, evsel) { 290 288 __u32 idx = evsel->core.idx; 291 - int i; 289 + unsigned int i; 292 290 struct perf_cpu cpu; 293 291 294 292 err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
+2 -1
tools/perf/util/bpf_kwork.c
··· 148 148 static int setup_filters(struct perf_kwork *kwork) 149 149 { 150 150 if (kwork->cpu_list != NULL) { 151 - int idx, nr_cpus; 151 + unsigned int idx; 152 + int nr_cpus; 152 153 struct perf_cpu_map *map; 153 154 struct perf_cpu cpu; 154 155 int fd = bpf_map__fd(skel->maps.perf_kwork_cpu_filter);
+2 -1
tools/perf/util/bpf_kwork_top.c
··· 123 123 static int setup_filters(struct perf_kwork *kwork) 124 124 { 125 125 if (kwork->cpu_list) { 126 - int idx, nr_cpus, fd; 126 + unsigned int idx; 127 + int nr_cpus, fd; 127 128 struct perf_cpu_map *map; 128 129 struct perf_cpu cpu; 129 130
+1 -1
tools/perf/util/bpf_off_cpu.c
··· 67 67 struct evlist *evlist = arg; 68 68 struct evsel *evsel; 69 69 struct perf_cpu pcpu; 70 - int i; 70 + unsigned int i; 71 71 72 72 /* update task filter for the given workload */ 73 73 if (skel->rodata->has_task && skel->rodata->uses_tgid &&
+1 -1
tools/perf/util/bpf_trace_augment.c
··· 60 60 void augmented_syscalls__setup_bpf_output(void) 61 61 { 62 62 struct perf_cpu cpu; 63 - int i; 63 + unsigned int i; 64 64 65 65 if (bpf_output == NULL) 66 66 return;
+5 -5
tools/perf/util/cpumap.c
··· 254 254 aggr_cpu_id_get_t get_id, 255 255 void *data, bool needs_sort) 256 256 { 257 - int idx; 257 + unsigned int idx; 258 258 struct perf_cpu cpu; 259 259 struct cpu_aggr_map *c = cpu_aggr_map__empty_new(perf_cpu_map__nr(cpus)); 260 260 ··· 280 280 } 281 281 } 282 282 /* Trim. */ 283 - if (c->nr != perf_cpu_map__nr(cpus)) { 283 + if (c->nr != (int)perf_cpu_map__nr(cpus)) { 284 284 struct cpu_aggr_map *trimmed_c = 285 285 realloc(c, 286 286 sizeof(struct cpu_aggr_map) + sizeof(struct aggr_cpu_id) * c->nr); ··· 631 631 632 632 #define COMMA first ? "" : "," 633 633 634 - for (i = 0; i < perf_cpu_map__nr(map) + 1; i++) { 634 + for (i = 0; i < (int)perf_cpu_map__nr(map) + 1; i++) { 635 635 struct perf_cpu cpu = { .cpu = INT16_MAX }; 636 - bool last = i == perf_cpu_map__nr(map); 636 + bool last = i == (int)perf_cpu_map__nr(map); 637 637 638 638 if (!last) 639 639 cpu = perf_cpu_map__cpu(map, i); ··· 679 679 680 680 size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size) 681 681 { 682 - int idx; 682 + unsigned int idx; 683 683 char *ptr = buf; 684 684 unsigned char *bitmap; 685 685 struct perf_cpu c, last_cpu = perf_cpu_map__max(map);
+1 -1
tools/perf/util/cputopo.c
··· 191 191 const char *core_cpu_list = topology->core_cpus_list[i]; 192 192 struct perf_cpu_map *core_cpus = perf_cpu_map__new(core_cpu_list); 193 193 struct perf_cpu cpu; 194 - int idx; 194 + unsigned int idx; 195 195 bool has_first, first = true; 196 196 197 197 perf_cpu_map__for_each_cpu(cpu, idx, core_cpus) {
+1 -1
tools/perf/util/env.c
··· 718 718 719 719 for (i = 0; i < env->nr_numa_nodes; i++) { 720 720 struct perf_cpu tmp; 721 - int j; 721 + unsigned int j; 722 722 723 723 nn = &env->numa_nodes[i]; 724 724 perf_cpu_map__for_each_cpu(tmp, j, nn->map)
+1 -1
tools/perf/util/scripting-engines/trace-event-python.c
··· 1701 1701 struct perf_cpu_map *cpus = counter->core.cpus; 1702 1702 1703 1703 for (int thread = 0; thread < perf_thread_map__nr(threads); thread++) { 1704 - int idx; 1704 + unsigned int idx; 1705 1705 struct perf_cpu cpu; 1706 1706 1707 1707 perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
+2 -1
tools/perf/util/session.c
··· 2766 2766 int perf_session__cpu_bitmap(struct perf_session *session, 2767 2767 const char *cpu_list, unsigned long *cpu_bitmap) 2768 2768 { 2769 - int i, err = -1; 2769 + unsigned int i; 2770 + int err = -1; 2770 2771 struct perf_cpu_map *map; 2771 2772 int nr_cpus = min(perf_session__env(session)->nr_cpus_avail, MAX_NR_CPUS); 2772 2773 struct perf_cpu cpu;
+2 -2
tools/perf/util/stat-display.c
··· 897 897 const struct aggr_cpu_id *id) 898 898 { 899 899 struct perf_cpu cpu; 900 - int idx; 900 + unsigned int idx; 901 901 902 902 /* 903 903 * Skip unsupported default events when not verbose. (default events ··· 1125 1125 struct evlist *evlist, 1126 1126 struct outstate *os) 1127 1127 { 1128 - int all_idx; 1128 + unsigned int all_idx; 1129 1129 struct perf_cpu cpu; 1130 1130 1131 1131 perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.user_requested_cpus) {
+5 -3
tools/perf/util/stat.c
··· 246 246 247 247 static void evsel__copy_prev_raw_counts(struct evsel *evsel) 248 248 { 249 - int idx, nthreads = perf_thread_map__nr(evsel->core.threads); 249 + int nthreads = perf_thread_map__nr(evsel->core.threads); 250 250 251 251 for (int thread = 0; thread < nthreads; thread++) { 252 + unsigned int idx; 253 + 252 254 perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) { 253 255 *perf_counts(evsel->counts, idx, thread) = 254 256 *perf_counts(evsel->prev_raw_counts, idx, thread); ··· 582 580 struct perf_counts_values counts = { 0, }; 583 581 struct aggr_cpu_id id; 584 582 struct perf_cpu cpu; 585 - int idx; 583 + unsigned int idx; 586 584 587 585 /* collect per-core counts */ 588 586 perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) { ··· 619 617 struct perf_stat_evsel *ps = evsel->stats; 620 618 struct aggr_cpu_id core_id; 621 619 struct perf_cpu cpu; 622 - int idx; 620 + unsigned int idx; 623 621 624 622 if (!evsel->percore) 625 623 return;
+2 -1
tools/perf/util/svghelper.c
··· 726 726 727 727 static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus) 728 728 { 729 - int idx, ret = 0; 729 + unsigned int idx; 730 + int ret = 0; 730 731 struct perf_cpu_map *map; 731 732 struct perf_cpu cpu; 732 733
+2 -1
tools/perf/util/symbol.c
··· 2363 2363 { 2364 2364 struct perf_cpu_map *map; 2365 2365 struct perf_cpu cpu; 2366 - int i, err = -1; 2366 + unsigned int i; 2367 + int err = -1; 2367 2368 2368 2369 if (symbol_conf.parallelism_list_str == NULL) 2369 2370 return 0;
+1 -1
tools/perf/util/synthetic-events.c
··· 1266 1266 1267 1267 static void synthesize_mask(struct synthesize_cpu_map_data *data) 1268 1268 { 1269 - int idx; 1269 + unsigned int idx; 1270 1270 struct perf_cpu cpu; 1271 1271 1272 1272 /* Due to padding, the 4bytes per entry mask variant is always smaller. */