Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'perf-tools-for-v5.17-2022-01-22' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux

Pull more perf tools updates from Arnaldo Carvalho de Melo:

- Fix printing 'phys_addr' in 'perf script'.

- Fix failure to add events with 'perf probe' in ppc64 due to not
removing leading dot (ppc64 ABIv1).

- Fix cpu_map__item() python binding building.

- Support event alias in form foo-bar-baz, add pmu-events and
parse-event tests for it.

- No need to setup affinities when starting a workload or attaching to
a pid.

- Use path__join() to compose a path instead of ad-hoc snprintf()
equivalent.

- Override attr->sample_period for non-libpfm4 events.

- Use libperf cpumap APIs instead of accessing the internal state
directly.

- Sync x86 arch prctl headers and files changed by the new
set_mempolicy_home_node syscall with the kernel sources.

- Remove duplicate include in cpumap.h.

- Remove redundant err variable.

* tag 'perf-tools-for-v5.17-2022-01-22' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux:
perf tools: Remove redundant err variable
perf test: Add parse-events test for aliases with hyphens
perf test: Add pmu-events test for aliases with hyphens
perf parse-events: Support event alias in form foo-bar-baz
perf evsel: Override attr->sample_period for non-libpfm4 events
perf cpumap: Remove duplicate include in cpumap.h
perf cpumap: Migrate to libperf cpumap api
perf python: Fix cpu_map__item() building
perf script: Fix printing 'phys_addr' failure issue
tools headers UAPI: Sync files changed by new set_mempolicy_home_node syscall
tools headers UAPI: Sync x86 arch prctl headers with the kernel sources
perf machine: Use path__join() to compose a path instead of snprintf(dir, '/', filename)
perf evlist: No need to setup affinities when disabling events for pid targets
perf evlist: No need to setup affinities when enabling events for pid targets
perf stat: No need to setup affinities when starting a workload
perf affinity: Allow passing a NULL arg to affinity__cleanup()
perf probe: Fix ppc64 'perf probe add events failed' case

+354 -149
+14 -12
tools/arch/x86/include/uapi/asm/prctl.h
··· 2 2 #ifndef _ASM_X86_PRCTL_H 3 3 #define _ASM_X86_PRCTL_H 4 4 5 - #define ARCH_SET_GS 0x1001 6 - #define ARCH_SET_FS 0x1002 7 - #define ARCH_GET_FS 0x1003 8 - #define ARCH_GET_GS 0x1004 5 + #define ARCH_SET_GS 0x1001 6 + #define ARCH_SET_FS 0x1002 7 + #define ARCH_GET_FS 0x1003 8 + #define ARCH_GET_GS 0x1004 9 9 10 - #define ARCH_GET_CPUID 0x1011 11 - #define ARCH_SET_CPUID 0x1012 10 + #define ARCH_GET_CPUID 0x1011 11 + #define ARCH_SET_CPUID 0x1012 12 12 13 - #define ARCH_GET_XCOMP_SUPP 0x1021 14 - #define ARCH_GET_XCOMP_PERM 0x1022 15 - #define ARCH_REQ_XCOMP_PERM 0x1023 13 + #define ARCH_GET_XCOMP_SUPP 0x1021 14 + #define ARCH_GET_XCOMP_PERM 0x1022 15 + #define ARCH_REQ_XCOMP_PERM 0x1023 16 + #define ARCH_GET_XCOMP_GUEST_PERM 0x1024 17 + #define ARCH_REQ_XCOMP_GUEST_PERM 0x1025 16 18 17 - #define ARCH_MAP_VDSO_X32 0x2001 18 - #define ARCH_MAP_VDSO_32 0x2002 19 - #define ARCH_MAP_VDSO_64 0x2003 19 + #define ARCH_MAP_VDSO_X32 0x2001 20 + #define ARCH_MAP_VDSO_32 0x2002 21 + #define ARCH_MAP_VDSO_64 0x2003 20 22 21 23 #endif /* _ASM_X86_PRCTL_H */
+4 -1
tools/include/uapi/asm-generic/unistd.h
··· 883 883 #define __NR_futex_waitv 449 884 884 __SYSCALL(__NR_futex_waitv, sys_futex_waitv) 885 885 886 + #define __NR_set_mempolicy_home_node 450 887 + __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node) 888 + 886 889 #undef __NR_syscalls 887 - #define __NR_syscalls 450 890 + #define __NR_syscalls 451 888 891 889 892 /* 890 893 * 32 bit systems traditionally used different
+2 -2
tools/lib/perf/evsel.c
··· 141 141 } 142 142 143 143 if (evsel->fd == NULL && 144 - perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) 144 + perf_evsel__alloc_fd(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0) 145 145 return -ENOMEM; 146 146 147 147 perf_cpu_map__for_each_cpu(cpu, idx, cpus) { ··· 384 384 { 385 385 int err = 0, i; 386 386 387 - for (i = 0; i < evsel->cpus->nr && !err; i++) 387 + for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++) 388 388 err = perf_evsel__run_ioctl(evsel, 389 389 PERF_EVENT_IOC_SET_FILTER, 390 390 (void *)filter, i);
+1
tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
··· 364 364 # 447 reserved for memfd_secret 365 365 448 n64 process_mrelease sys_process_mrelease 366 366 449 n64 futex_waitv sys_futex_waitv 367 + 450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+1
tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
··· 529 529 # 447 reserved for memfd_secret 530 530 448 common process_mrelease sys_process_mrelease 531 531 449 common futex_waitv sys_futex_waitv 532 + 450 nospu set_mempolicy_home_node sys_set_mempolicy_home_node
+1
tools/perf/arch/s390/entry/syscalls/syscall.tbl
··· 452 452 # 447 reserved for memfd_secret 453 453 448 common process_mrelease sys_process_mrelease sys_process_mrelease 454 454 449 common futex_waitv sys_futex_waitv sys_futex_waitv 455 + 450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node
+1
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
··· 371 371 447 common memfd_secret sys_memfd_secret 372 372 448 common process_mrelease sys_process_mrelease 373 373 449 common futex_waitv sys_futex_waitv 374 + 450 common set_mempolicy_home_node sys_set_mempolicy_home_node 374 375 375 376 # 376 377 # Due to a historical design error, certain syscalls are numbered differently
+1 -1
tools/perf/bench/epoll-ctl.c
··· 333 333 334 334 /* default to the number of CPUs */ 335 335 if (!nthreads) 336 - nthreads = cpu->nr; 336 + nthreads = perf_cpu_map__nr(cpu); 337 337 338 338 worker = calloc(nthreads, sizeof(*worker)); 339 339 if (!worker)
+1 -1
tools/perf/bench/epoll-wait.c
··· 452 452 453 453 /* default to the number of CPUs and leave one for the writer pthread */ 454 454 if (!nthreads) 455 - nthreads = cpu->nr - 1; 455 + nthreads = perf_cpu_map__nr(cpu) - 1; 456 456 457 457 worker = calloc(nthreads, sizeof(*worker)); 458 458 if (!worker) {
+2 -2
tools/perf/bench/evlist-open-close.c
··· 71 71 int cnt = 0; 72 72 73 73 evlist__for_each_entry(evlist, evsel) 74 - cnt += evsel->core.threads->nr * evsel->core.cpus->nr; 74 + cnt += evsel->core.threads->nr * perf_cpu_map__nr(evsel->core.cpus); 75 75 76 76 return cnt; 77 77 } ··· 151 151 152 152 init_stats(&time_stats); 153 153 154 - printf(" Number of cpus:\t%d\n", evlist->core.cpus->nr); 154 + printf(" Number of cpus:\t%d\n", perf_cpu_map__nr(evlist->core.cpus)); 155 155 printf(" Number of threads:\t%d\n", evlist->core.threads->nr); 156 156 printf(" Number of events:\t%d (%d fds)\n", 157 157 evlist->core.nr_entries, evlist__count_evsel_fds(evlist));
+1 -1
tools/perf/bench/futex-hash.c
··· 150 150 } 151 151 152 152 if (!params.nthreads) /* default to the number of CPUs */ 153 - params.nthreads = cpu->nr; 153 + params.nthreads = perf_cpu_map__nr(cpu); 154 154 155 155 worker = calloc(params.nthreads, sizeof(*worker)); 156 156 if (!worker)
+1 -1
tools/perf/bench/futex-lock-pi.c
··· 173 173 } 174 174 175 175 if (!params.nthreads) 176 - params.nthreads = cpu->nr; 176 + params.nthreads = perf_cpu_map__nr(cpu); 177 177 178 178 worker = calloc(params.nthreads, sizeof(*worker)); 179 179 if (!worker)
+1 -1
tools/perf/bench/futex-requeue.c
··· 175 175 } 176 176 177 177 if (!params.nthreads) 178 - params.nthreads = cpu->nr; 178 + params.nthreads = perf_cpu_map__nr(cpu); 179 179 180 180 worker = calloc(params.nthreads, sizeof(*worker)); 181 181 if (!worker)
+1 -1
tools/perf/bench/futex-wake-parallel.c
··· 252 252 err(EXIT_FAILURE, "calloc"); 253 253 254 254 if (!params.nthreads) 255 - params.nthreads = cpu->nr; 255 + params.nthreads = perf_cpu_map__nr(cpu); 256 256 257 257 /* some sanity checks */ 258 258 if (params.nwakes > params.nthreads ||
+1 -1
tools/perf/bench/futex-wake.c
··· 151 151 } 152 152 153 153 if (!params.nthreads) 154 - params.nthreads = cpu->nr; 154 + params.nthreads = perf_cpu_map__nr(cpu); 155 155 156 156 worker = calloc(params.nthreads, sizeof(*worker)); 157 157 if (!worker)
+1 -1
tools/perf/builtin-ftrace.c
··· 281 281 int ret; 282 282 int last_cpu; 283 283 284 - last_cpu = perf_cpu_map__cpu(cpumap, cpumap->nr - 1).cpu; 284 + last_cpu = perf_cpu_map__cpu(cpumap, perf_cpu_map__nr(cpumap) - 1).cpu; 285 285 mask_size = last_cpu / 4 + 2; /* one more byte for EOS */ 286 286 mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */ 287 287
+1 -4
tools/perf/builtin-inject.c
··· 535 535 static int perf_event__repipe_tracing_data(struct perf_session *session, 536 536 union perf_event *event) 537 537 { 538 - int err; 539 - 540 538 perf_event__repipe_synth(session->tool, event); 541 - err = perf_event__process_tracing_data(session, event); 542 539 543 - return err; 540 + return perf_event__process_tracing_data(session, event); 544 541 } 545 542 546 543 static int dso__read_build_id(struct dso *dso)
+1 -1
tools/perf/builtin-script.c
··· 515 515 return -EINVAL; 516 516 517 517 if (PRINT_FIELD(PHYS_ADDR) && 518 - evsel__check_stype(evsel, PERF_SAMPLE_PHYS_ADDR, "PHYS_ADDR", PERF_OUTPUT_PHYS_ADDR)) 518 + evsel__do_check_stype(evsel, PERF_SAMPLE_PHYS_ADDR, "PHYS_ADDR", PERF_OUTPUT_PHYS_ADDR, allow_user_set)) 519 519 return -EINVAL; 520 520 521 521 if (PRINT_FIELD(DATA_PAGE_SIZE) &&
+14 -10
tools/perf/builtin-stat.c
··· 230 230 if (!a->core.cpus || !b->core.cpus) 231 231 return false; 232 232 233 - if (a->core.cpus->nr != b->core.cpus->nr) 233 + if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus)) 234 234 return false; 235 235 236 - for (int i = 0; i < a->core.cpus->nr; i++) { 237 - if (a->core.cpus->map[i].cpu != b->core.cpus->map[i].cpu) 236 + for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) { 237 + if (perf_cpu_map__cpu(a->core.cpus, i).cpu != 238 + perf_cpu_map__cpu(b->core.cpus, i).cpu) 238 239 return false; 239 240 } 240 241 ··· 789 788 const bool forks = (argc > 0); 790 789 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; 791 790 struct evlist_cpu_iterator evlist_cpu_itr; 792 - struct affinity affinity; 791 + struct affinity saved_affinity, *affinity = NULL; 793 792 int err; 794 793 bool second_pass = false; 795 794 ··· 804 803 if (group) 805 804 evlist__set_leader(evsel_list); 806 805 807 - if (affinity__setup(&affinity) < 0) 808 - return -1; 806 + if (!cpu_map__is_dummy(evsel_list->core.cpus)) { 807 + if (affinity__setup(&saved_affinity) < 0) 808 + return -1; 809 + affinity = &saved_affinity; 810 + } 809 811 810 812 evlist__for_each_entry(evsel_list, counter) { 811 813 if (bpf_counter__load(counter, &target)) ··· 817 813 all_counters_use_bpf = false; 818 814 } 819 815 820 - evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) { 816 + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 821 817 counter = evlist_cpu_itr.evsel; 822 818 823 819 /* ··· 873 869 */ 874 870 875 871 /* First close errored or weak retry */ 876 - evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) { 872 + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 877 873 counter = evlist_cpu_itr.evsel; 878 874 879 875 if (!counter->reset_group && !counter->errored) ··· 882 878 perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx); 883 879 } 884 880 /* Now reopen weak */ 885 - evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) { 881 + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 886 882 counter = evlist_cpu_itr.evsel; 887 883 888 884 if (!counter->reset_group && !counter->errored) ··· 908 904 counter->supported = true; 909 905 } 910 906 } 911 - affinity__cleanup(&affinity); 907 + affinity__cleanup(affinity); 912 908 913 909 evlist__for_each_entry(evsel_list, counter) { 914 910 if (!counter->supported) {
+16
tools/perf/pmu-events/arch/test/test_soc/cpu/uncore.json
··· 19 19 "EdgeDetect": "0" 20 20 }, 21 21 { 22 + "Unit": "CBO", 23 + "EventCode": "0xE0", 24 + "UMask": "0x00", 25 + "EventName": "event-hyphen", 26 + "BriefDescription": "UNC_CBO_HYPHEN", 27 + "PublicDescription": "UNC_CBO_HYPHEN" 28 + }, 29 + { 30 + "Unit": "CBO", 31 + "EventCode": "0xC0", 32 + "UMask": "0x00", 33 + "EventName": "event-two-hyph", 34 + "BriefDescription": "UNC_CBO_TWO_HYPH", 35 + "PublicDescription": "UNC_CBO_TWO_HYPH" 36 + }, 37 + { 22 38 "EventCode": "0x7", 23 39 "EventName": "uncore_hisi_l3c.rd_hit_cpipe", 24 40 "BriefDescription": "Total read hits",
+2 -2
tools/perf/tests/bitmap.c
··· 17 17 bm = bitmap_zalloc(nbits); 18 18 19 19 if (map && bm) { 20 - for (i = 0; i < map->nr; i++) 21 - set_bit(map->map[i].cpu, bm); 20 + for (i = 0; i < perf_cpu_map__nr(map); i++) 21 + set_bit(perf_cpu_map__cpu(map, i).cpu, bm); 22 22 } 23 23 24 24 if (map)
+4 -4
tools/perf/tests/event_update.c
··· 75 75 76 76 TEST_ASSERT_VAL("wrong id", ev->id == 123); 77 77 TEST_ASSERT_VAL("wrong type", ev->type == PERF_EVENT_UPDATE__CPUS); 78 - TEST_ASSERT_VAL("wrong cpus", map->nr == 3); 79 - TEST_ASSERT_VAL("wrong cpus", map->map[0].cpu == 1); 80 - TEST_ASSERT_VAL("wrong cpus", map->map[1].cpu == 2); 81 - TEST_ASSERT_VAL("wrong cpus", map->map[2].cpu == 3); 78 + TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__nr(map) == 3); 79 + TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 0).cpu == 1); 80 + TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 1).cpu == 2); 81 + TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 2).cpu == 3); 82 82 perf_cpu_map__put(map); 83 83 return 0; 84 84 }
+5 -4
tools/perf/tests/mem2node.c
··· 25 25 { 26 26 struct perf_cpu_map *map = perf_cpu_map__new(str); 27 27 unsigned long *bm = NULL; 28 - int i; 29 28 30 29 bm = bitmap_zalloc(nbits); 31 30 32 31 if (map && bm) { 33 - for (i = 0; i < map->nr; i++) { 34 - set_bit(map->map[i].cpu, bm); 35 - } 32 + struct perf_cpu cpu; 33 + int i; 34 + 35 + perf_cpu_map__for_each_cpu(cpu, i, map) 36 + set_bit(cpu.cpu, bm); 36 37 } 37 38 38 39 if (map)
+3 -2
tools/perf/tests/mmap-basic.c
··· 59 59 } 60 60 61 61 CPU_ZERO(&cpu_set); 62 - CPU_SET(cpus->map[0].cpu, &cpu_set); 62 + CPU_SET(perf_cpu_map__cpu(cpus, 0).cpu, &cpu_set); 63 63 sched_setaffinity(0, sizeof(cpu_set), &cpu_set); 64 64 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { 65 65 pr_debug("sched_setaffinity() failed on CPU %d: %s ", 66 - cpus->map[0].cpu, str_error_r(errno, sbuf, sizeof(sbuf))); 66 + perf_cpu_map__cpu(cpus, 0).cpu, 67 + str_error_r(errno, sbuf, sizeof(sbuf))); 67 68 goto out_free_cpus; 68 69 } 69 70
+49
tools/perf/tests/parse-events.c
··· 2069 2069 return ret; 2070 2070 } 2071 2071 2072 + static int test_event_fake_pmu(const char *str) 2073 + { 2074 + struct parse_events_error err; 2075 + struct evlist *evlist; 2076 + int ret; 2077 + 2078 + evlist = evlist__new(); 2079 + if (!evlist) 2080 + return -ENOMEM; 2081 + 2082 + parse_events_error__init(&err); 2083 + perf_pmu__test_parse_init(); 2084 + ret = __parse_events(evlist, str, &err, &perf_pmu__fake); 2085 + if (ret) { 2086 + pr_debug("failed to parse event '%s', err %d, str '%s'\n", 2087 + str, ret, err.str); 2088 + parse_events_error__print(&err, str); 2089 + } 2090 + 2091 + parse_events_error__exit(&err); 2092 + evlist__delete(evlist); 2093 + 2094 + return ret; 2095 + } 2096 + 2072 2097 static int test_events(struct evlist_test *events, unsigned cnt) 2073 2098 { 2074 2099 int ret1, ret2 = 0; ··· 2301 2276 return test_event(&e); 2302 2277 } 2303 2278 2279 + static int test_pmu_events_alias2(void) 2280 + { 2281 + static const char events[][30] = { 2282 + "event-hyphen", 2283 + "event-two-hyph", 2284 + }; 2285 + unsigned long i; 2286 + int ret = 0; 2287 + 2288 + for (i = 0; i < ARRAY_SIZE(events); i++) { 2289 + ret = test_event_fake_pmu(&events[i][0]); 2290 + if (ret) { 2291 + pr_err("check_parse_fake %s failed\n", &events[i][0]); 2292 + break; 2293 + } 2294 + } 2295 + 2296 + return ret; 2297 + } 2298 + 2304 2299 static int test__parse_events(struct test_suite *test __maybe_unused, int subtest __maybe_unused) 2305 2300 { 2306 2301 int ret1, ret2 = 0; ··· 2357 2312 if (ret) 2358 2313 return ret; 2359 2314 } 2315 + 2316 + ret1 = test_pmu_events_alias2(); 2317 + if (!ret2) 2318 + ret2 = ret1; 2360 2319 2361 2320 ret1 = test_terms(test__terms, ARRAY_SIZE(test__terms)); 2362 2321 if (!ret2)
+32
tools/perf/tests/pmu-events.c
··· 143 143 .matching_pmu = "uncore_cbox_0", 144 144 }; 145 145 146 + static const struct perf_pmu_test_event uncore_hyphen = { 147 + .event = { 148 + .name = "event-hyphen", 149 + .event = "umask=0x00,event=0xe0", 150 + .desc = "Unit: uncore_cbox UNC_CBO_HYPHEN", 151 + .topic = "uncore", 152 + .long_desc = "UNC_CBO_HYPHEN", 153 + .pmu = "uncore_cbox", 154 + }, 155 + .alias_str = "umask=0,event=0xe0", 156 + .alias_long_desc = "UNC_CBO_HYPHEN", 157 + .matching_pmu = "uncore_cbox_0", 158 + }; 159 + 160 + static const struct perf_pmu_test_event uncore_two_hyph = { 161 + .event = { 162 + .name = "event-two-hyph", 163 + .event = "umask=0x00,event=0xc0", 164 + .desc = "Unit: uncore_cbox UNC_CBO_TWO_HYPH", 165 + .topic = "uncore", 166 + .long_desc = "UNC_CBO_TWO_HYPH", 167 + .pmu = "uncore_cbox", 168 + }, 169 + .alias_str = "umask=0,event=0xc0", 170 + .alias_long_desc = "UNC_CBO_TWO_HYPH", 171 + .matching_pmu = "uncore_cbox_0", 172 + }; 173 + 146 174 static const struct perf_pmu_test_event uncore_hisi_l3c_rd_hit_cpipe = { 147 175 .event = { 148 176 .name = "uncore_hisi_l3c.rd_hit_cpipe", ··· 216 188 static const struct perf_pmu_test_event *uncore_events[] = { 217 189 &uncore_hisi_ddrc_flux_wcmd, 218 190 &unc_cbo_xsnp_response_miss_eviction, 191 + &uncore_hyphen, 192 + &uncore_two_hyph, 219 193 &uncore_hisi_l3c_rd_hit_cpipe, 220 194 &uncore_imc_free_running_cache_miss, 221 195 &uncore_imc_cache_hits, ··· 684 654 }, 685 655 .aliases = { 686 656 &unc_cbo_xsnp_response_miss_eviction, 657 + &uncore_hyphen, 658 + &uncore_two_hyph, 687 659 }, 688 660 }, 689 661 {
+21 -16
tools/perf/tests/topology.c
··· 122 122 } 123 123 124 124 // Test that CPU ID contains socket, die, core and CPU 125 - for (i = 0; i < map->nr; i++) { 125 + for (i = 0; i < perf_cpu_map__nr(map); i++) { 126 126 id = aggr_cpu_id__cpu(perf_cpu_map__cpu(map, i), NULL); 127 - TEST_ASSERT_VAL("Cpu map - CPU ID doesn't match", map->map[i].cpu == id.cpu.cpu); 127 + TEST_ASSERT_VAL("Cpu map - CPU ID doesn't match", 128 + perf_cpu_map__cpu(map, i).cpu == id.cpu.cpu); 128 129 129 130 TEST_ASSERT_VAL("Cpu map - Core ID doesn't match", 130 - session->header.env.cpu[map->map[i].cpu].core_id == id.core); 131 + session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].core_id == id.core); 131 132 TEST_ASSERT_VAL("Cpu map - Socket ID doesn't match", 132 - session->header.env.cpu[map->map[i].cpu].socket_id == id.socket); 133 + session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id == 134 + id.socket); 133 135 134 136 TEST_ASSERT_VAL("Cpu map - Die ID doesn't match", 135 - session->header.env.cpu[map->map[i].cpu].die_id == id.die); 137 + session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id == id.die); 136 138 TEST_ASSERT_VAL("Cpu map - Node ID is set", id.node == -1); 137 139 TEST_ASSERT_VAL("Cpu map - Thread is set", id.thread == -1); 138 140 } 139 141 140 142 // Test that core ID contains socket, die and core 141 - for (i = 0; i < map->nr; i++) { 143 + for (i = 0; i < perf_cpu_map__nr(map); i++) { 142 144 id = aggr_cpu_id__core(perf_cpu_map__cpu(map, i), NULL); 143 145 TEST_ASSERT_VAL("Core map - Core ID doesn't match", 144 - session->header.env.cpu[map->map[i].cpu].core_id == id.core); 146 + session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].core_id == id.core); 145 147 146 148 TEST_ASSERT_VAL("Core map - Socket ID doesn't match", 147 - session->header.env.cpu[map->map[i].cpu].socket_id == id.socket); 149 + session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id == 150 + id.socket); 148 151 149 152 TEST_ASSERT_VAL("Core map - Die ID doesn't match", 150 - session->header.env.cpu[map->map[i].cpu].die_id == id.die); 153 + session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id == id.die); 151 154 TEST_ASSERT_VAL("Core map - Node ID is set", id.node == -1); 152 155 TEST_ASSERT_VAL("Core map - Thread is set", id.thread == -1); 153 156 } 154 157 155 158 // Test that die ID contains socket and die 156 - for (i = 0; i < map->nr; i++) { 159 + for (i = 0; i < perf_cpu_map__nr(map); i++) { 157 160 id = aggr_cpu_id__die(perf_cpu_map__cpu(map, i), NULL); 158 161 TEST_ASSERT_VAL("Die map - Socket ID doesn't match", 159 - session->header.env.cpu[map->map[i].cpu].socket_id == id.socket); 162 + session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id == 163 + id.socket); 160 164 161 165 TEST_ASSERT_VAL("Die map - Die ID doesn't match", 162 - session->header.env.cpu[map->map[i].cpu].die_id == id.die); 166 + session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id == id.die); 163 167 164 168 TEST_ASSERT_VAL("Die map - Node ID is set", id.node == -1); 165 169 TEST_ASSERT_VAL("Die map - Core is set", id.core == -1); ··· 172 168 } 173 169 174 170 // Test that socket ID contains only socket 175 - for (i = 0; i < map->nr; i++) { 171 + for (i = 0; i < perf_cpu_map__nr(map); i++) { 176 172 id = aggr_cpu_id__socket(perf_cpu_map__cpu(map, i), NULL); 177 173 TEST_ASSERT_VAL("Socket map - Socket ID doesn't match", 178 - session->header.env.cpu[map->map[i].cpu].socket_id == id.socket); 174 + session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id == 175 + id.socket); 179 176 180 177 TEST_ASSERT_VAL("Socket map - Node ID is set", id.node == -1); 181 178 TEST_ASSERT_VAL("Socket map - Die ID is set", id.die == -1); ··· 186 181 } 187 182 188 183 // Test that node ID contains only node 189 - for (i = 0; i < map->nr; i++) { 184 + for (i = 0; i < perf_cpu_map__nr(map); i++) { 190 185 id = aggr_cpu_id__node(perf_cpu_map__cpu(map, i), NULL); 191 186 TEST_ASSERT_VAL("Node map - Node ID doesn't match", 192 - cpu__get_node(map->map[i]) == id.node); 187 + cpu__get_node(perf_cpu_map__cpu(map, i)) == id.node); 193 188 TEST_ASSERT_VAL("Node map - Socket is set", id.socket == -1); 194 189 TEST_ASSERT_VAL("Node map - Die ID is set", id.die == -1); 195 190 TEST_ASSERT_VAL("Node map - Core is set", id.core == -1);
+7 -1
tools/perf/util/affinity.c
··· 62 62 clear_bit(cpu, a->sched_cpus); 63 63 } 64 64 65 - void affinity__cleanup(struct affinity *a) 65 + static void __affinity__cleanup(struct affinity *a) 66 66 { 67 67 int cpu_set_size = get_cpu_set_size(); 68 68 ··· 70 70 sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus); 71 71 zfree(&a->sched_cpus); 72 72 zfree(&a->orig_cpus); 73 + } 74 + 75 + void affinity__cleanup(struct affinity *a) 76 + { 77 + if (a != NULL) 78 + __affinity__cleanup(a); 73 79 }
+1 -1
tools/perf/util/auxtrace.c
··· 174 174 mp->idx = idx; 175 175 176 176 if (per_cpu) { 177 - mp->cpu = evlist->core.cpus->map[idx]; 177 + mp->cpu = perf_cpu_map__cpu(evlist->core.cpus, idx); 178 178 if (evlist->core.threads) 179 179 mp->tid = perf_thread_map__pid(evlist->core.threads, 0); 180 180 else
+1 -1
tools/perf/util/counts.c
··· 61 61 struct perf_cpu_map *cpus = evsel__cpus(evsel); 62 62 int nthreads = perf_thread_map__nr(evsel->core.threads); 63 63 64 - evsel->counts = perf_counts__new(cpus ? cpus->nr : 1, nthreads); 64 + evsel->counts = perf_counts__new(perf_cpu_map__nr(cpus), nthreads); 65 65 return evsel->counts != NULL ? 0 : -ENOMEM; 66 66 } 67 67
+1 -2
tools/perf/util/cpumap.h
··· 4 4 5 5 #include <stdbool.h> 6 6 #include <stdio.h> 7 - #include <stdbool.h> 8 7 #include <internal/cpumap.h> 9 8 #include <perf/cpumap.h> 10 9 ··· 56 57 */ 57 58 static inline bool cpu_map__is_dummy(struct perf_cpu_map *cpus) 58 59 { 59 - return cpus->nr == 1 && cpus->map[0].cpu == -1; 60 + return perf_cpu_map__nr(cpus) == 1 && perf_cpu_map__cpu(cpus, 0).cpu == -1; 60 61 } 61 62 62 63 /**
+2 -2
tools/perf/util/cputopo.c
··· 325 325 if (!node_map) 326 326 goto out; 327 327 328 - nr = (u32) node_map->nr; 328 + nr = (u32) perf_cpu_map__nr(node_map); 329 329 330 330 tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr); 331 331 if (!tp) ··· 334 334 tp->nr = nr; 335 335 336 336 for (i = 0; i < nr; i++) { 337 - if (load_numa_node(&tp->nodes[i], node_map->map[i].cpu)) { 337 + if (load_numa_node(&tp->nodes[i], perf_cpu_map__cpu(node_map, i).cpu)) { 338 338 numa_topology__delete(tp); 339 339 tp = NULL; 340 340 break;
+6 -5
tools/perf/util/evlist-hybrid.c
··· 124 124 125 125 events_nr++; 126 126 127 - if (matched_cpus->nr > 0 && (unmatched_cpus->nr > 0 || 128 - matched_cpus->nr < cpus->nr || 129 - matched_cpus->nr < pmu->cpus->nr)) { 127 + if (perf_cpu_map__nr(matched_cpus) > 0 && 128 + (perf_cpu_map__nr(unmatched_cpus) > 0 || 129 + perf_cpu_map__nr(matched_cpus) < perf_cpu_map__nr(cpus) || 130 + perf_cpu_map__nr(matched_cpus) < perf_cpu_map__nr(pmu->cpus))) { 130 131 perf_cpu_map__put(evsel->core.cpus); 131 132 perf_cpu_map__put(evsel->core.own_cpus); 132 133 evsel->core.cpus = perf_cpu_map__get(matched_cpus); 133 134 evsel->core.own_cpus = perf_cpu_map__get(matched_cpus); 134 135 135 - if (unmatched_cpus->nr > 0) { 136 + if (perf_cpu_map__nr(unmatched_cpus) > 0) { 136 137 cpu_map__snprint(matched_cpus, buf1, sizeof(buf1)); 137 138 pr_warning("WARNING: use %s in '%s' for '%s', skip other cpus in list.\n", 138 139 buf1, pmu->name, evsel->name); 139 140 } 140 141 } 141 142 142 - if (matched_cpus->nr == 0) { 143 + if (perf_cpu_map__nr(matched_cpus) == 0) { 143 144 evlist__remove(evlist, evsel); 144 145 evsel__delete(evsel); 145 146
+18 -10
tools/perf/util/evlist.c
··· 430 430 { 431 431 struct evsel *pos; 432 432 struct evlist_cpu_iterator evlist_cpu_itr; 433 - struct affinity affinity; 433 + struct affinity saved_affinity, *affinity = NULL; 434 434 bool has_imm = false; 435 435 436 - if (affinity__setup(&affinity) < 0) 437 - return; 436 + // See explanation in evlist__close() 437 + if (!cpu_map__is_dummy(evlist->core.cpus)) { 438 + if (affinity__setup(&saved_affinity) < 0) 439 + return; 440 + affinity = &saved_affinity; 441 + } 438 442 439 443 /* Disable 'immediate' events last */ 440 444 for (int imm = 0; imm <= 1; imm++) { 441 - evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) { 445 + evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) { 442 446 pos = evlist_cpu_itr.evsel; 443 447 if (evsel__strcmp(pos, evsel_name)) 444 448 continue; ··· 458 454 break; 459 455 } 460 456 461 - affinity__cleanup(&affinity); 457 + affinity__cleanup(affinity); 462 458 evlist__for_each_entry(evlist, pos) { 463 459 if (evsel__strcmp(pos, evsel_name)) 464 460 continue; ··· 491 487 { 492 488 struct evsel *pos; 493 489 struct evlist_cpu_iterator evlist_cpu_itr; 494 - struct affinity affinity; 490 + struct affinity saved_affinity, *affinity = NULL; 495 491 496 - if (affinity__setup(&affinity) < 0) 497 - return; 492 + // See explanation in evlist__close() 493 + if (!cpu_map__is_dummy(evlist->core.cpus)) { 494 + if (affinity__setup(&saved_affinity) < 0) 495 + return; 496 + affinity = &saved_affinity; 497 + } 498 498 499 - evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) { 499 + evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) { 500 500 pos = evlist_cpu_itr.evsel; 501 501 if (evsel__strcmp(pos, evsel_name)) 502 502 continue; ··· 508 500 continue; 509 501 evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx); 510 502 } 511 - affinity__cleanup(&affinity); 503 + affinity__cleanup(affinity); 512 504 evlist__for_each_entry(evlist, pos) { 513 505 if (evsel__strcmp(pos, evsel_name)) 514 506 continue;
+28 -17
tools/perf/util/evsel.c
··· 1064 1064 { 1065 1065 } 1066 1066 1067 + static void evsel__set_default_freq_period(struct record_opts *opts, 1068 + struct perf_event_attr *attr) 1069 + { 1070 + if (opts->freq) { 1071 + attr->freq = 1; 1072 + attr->sample_freq = opts->freq; 1073 + } else { 1074 + attr->sample_period = opts->default_interval; 1075 + } 1076 + } 1077 + 1067 1078 /* 1068 1079 * The enable_on_exec/disabled value strategy: 1069 1080 * ··· 1141 1130 * We default some events to have a default interval. But keep 1142 1131 * it a weak assumption overridable by the user. 1143 1132 */ 1144 - if (!attr->sample_period) { 1145 - if (opts->freq) { 1146 - attr->freq = 1; 1147 - attr->sample_freq = opts->freq; 1148 - } else { 1149 - attr->sample_period = opts->default_interval; 1150 - } 1151 - } 1133 + if ((evsel->is_libpfm_event && !attr->sample_period) || 1134 + (!evsel->is_libpfm_event && (!attr->sample_period || 1135 + opts->user_freq != UINT_MAX || 1136 + opts->user_interval != ULLONG_MAX))) 1137 + evsel__set_default_freq_period(opts, attr); 1138 + 1152 1139 /* 1153 1140 * If attr->freq was set (here or earlier), ask for period 1154 1141 * to be sampled. ··· 1791 1782 nthreads = threads->nr; 1792 1783 1793 1784 if (evsel->core.fd == NULL && 1794 - perf_evsel__alloc_fd(&evsel->core, cpus->nr, nthreads) < 0) 1785 + perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0) 1795 1786 return -ENOMEM; 1796 1787 1797 1788 evsel->open_flags = PERF_FLAG_FD_CLOEXEC; ··· 2029 2020 test_attr__ready(); 2030 2021 2031 2022 pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx", 2032 - pid, cpus->map[idx].cpu, group_fd, evsel->open_flags); 2023 + pid, perf_cpu_map__cpu(cpus, idx).cpu, group_fd, evsel->open_flags); 2033 2024 2034 - fd = sys_perf_event_open(&evsel->core.attr, pid, cpus->map[idx].cpu, 2025 + fd = sys_perf_event_open(&evsel->core.attr, pid, 2026 + perf_cpu_map__cpu(cpus, idx).cpu, 2035 2027 group_fd, evsel->open_flags); 2036 2028 2037 2029 FD(evsel, idx, thread) = fd; ··· 2048 2038 bpf_counter__install_pe(evsel, idx, fd); 2049 2039 2050 2040 if (unlikely(test_attr__enabled)) { 2051 - test_attr__open(&evsel->core.attr, pid, cpus->map[idx], 2041 + test_attr__open(&evsel->core.attr, pid, 2042 + perf_cpu_map__cpu(cpus, idx), 2052 2043 fd, group_fd, evsel->open_flags); 2053 2044 } 2054 2045 ··· 2090 2079 if (evsel__precise_ip_fallback(evsel)) 2091 2080 goto retry_open; 2092 2081 2093 - if (evsel__ignore_missing_thread(evsel, cpus->nr, idx, threads, thread, err)) { 2082 + if (evsel__ignore_missing_thread(evsel, perf_cpu_map__nr(cpus), 2083 + idx, threads, thread, err)) { 2094 2084 /* We just removed 1 thread, so lower the upper nthreads limit. */ 2095 2085 nthreads--; 2096 2086 ··· 2131 2119 int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, 2132 2120 struct perf_thread_map *threads) 2133 2121 { 2134 - return evsel__open_cpu(evsel, cpus, threads, 0, cpus ? cpus->nr : 1); 2122 + return evsel__open_cpu(evsel, cpus, threads, 0, perf_cpu_map__nr(cpus)); 2135 2123 } 2136 2124 2137 2125 void evsel__close(struct evsel *evsel) ··· 2143 2131 int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx) 2144 2132 { 2145 2133 if (cpu_map_idx == -1) 2146 - return evsel__open_cpu(evsel, cpus, NULL, 0, 2147 - cpus ? cpus->nr : 1); 2134 + return evsel__open_cpu(evsel, cpus, NULL, 0, perf_cpu_map__nr(cpus)); 2148 2135 2149 2136 return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1); 2150 2137 } ··· 2993 2982 struct perf_cpu_map *cpus = evsel->core.cpus; 2994 2983 struct perf_thread_map *threads = evsel->core.threads; 2995 2984 2996 - if (perf_evsel__alloc_id(&evsel->core, cpus->nr, threads->nr)) 2985 + if (perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr)) 2997 2986 return -ENOMEM; 2998 2987 2999 2988 return store_evsel_ids(evsel, evlist);
+2 -1
tools/perf/util/evsel.h
··· 11 11 #include <perf/evsel.h> 12 12 #include "symbol_conf.h" 13 13 #include <internal/cpumap.h> 14 + #include <perf/cpumap.h> 14 15 15 16 struct bpf_object; 16 17 struct cgroup; ··· 192 191 193 192 static inline int evsel__nr_cpus(struct evsel *evsel) 194 193 { 195 - return evsel__cpus(evsel)->nr; 194 + return perf_cpu_map__nr(evsel__cpus(evsel)); 196 195 } 197 196 198 197 void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
+2 -1
tools/perf/util/machine.c
··· 16 16 #include "map_symbol.h" 17 17 #include "branch.h" 18 18 #include "mem-events.h" 19 + #include "path.h" 19 20 #include "srcline.h" 20 21 #include "symbol.h" 21 22 #include "sort.h" ··· 1417 1416 struct stat st; 1418 1417 1419 1418 /*sshfs might return bad dent->d_type, so we have to stat*/ 1420 - snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); 1419 + path__join(path, sizeof(path), dir_name, dent->d_name); 1421 1420 if (stat(path, &st)) 1422 1421 continue; 1423 1422
+1 -1
tools/perf/util/mmap.c
··· 250 250 251 251 nr_cpus = perf_cpu_map__nr(cpu_map); 252 252 for (idx = 0; idx < nr_cpus; idx++) { 253 - cpu = cpu_map->map[idx]; /* map c index to online cpu index */ 253 + cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */ 254 254 if (cpu__get_node(cpu) == node) 255 255 set_bit(cpu.cpu, mask->bits); 256 256 }
+56 -11
tools/perf/util/parse-events.c
··· 1697 1697 } 1698 1698 } 1699 1699 } 1700 + 1701 + if (parse_state->fake_pmu) { 1702 + if (!parse_events_add_pmu(parse_state, list, str, head, 1703 + true, true)) { 1704 + pr_debug("%s -> %s/%s/\n", str, "fake_pmu", str); 1705 + ok++; 1706 + } 1707 + } 1708 + 1700 1709 out_err: 1701 1710 if (ok) 1702 1711 *listp = list; ··· 2107 2098 pmu = NULL; 2108 2099 while ((pmu = perf_pmu__scan(pmu)) != NULL) { 2109 2100 list_for_each_entry(alias, &pmu->aliases, list) { 2110 - if (strchr(alias->name, '-')) 2101 + char *tmp = strchr(alias->name, '-'); 2102 + 2103 + if (tmp) { 2104 + char *tmp2 = NULL; 2105 + 2106 + tmp2 = strchr(tmp + 1, '-'); 2111 2107 len++; 2108 + if (tmp2) 2109 + len++; 2110 + } 2111 + 2112 2112 len++; 2113 2113 } 2114 2114 } ··· 2137 2119 list_for_each_entry(alias, &pmu->aliases, list) { 2138 2120 struct perf_pmu_event_symbol *p = perf_pmu_events_list + len; 2139 2121 char *tmp = strchr(alias->name, '-'); 2122 + char *tmp2 = NULL; 2140 2123 2141 - if (tmp != NULL) { 2124 + if (tmp) 2125 + tmp2 = strchr(tmp + 1, '-'); 2126 + if (tmp2) { 2127 + SET_SYMBOL(strndup(alias->name, tmp - alias->name), 2128 + PMU_EVENT_SYMBOL_PREFIX); 2129 + p++; 2130 + tmp++; 2131 + SET_SYMBOL(strndup(tmp, tmp2 - tmp), PMU_EVENT_SYMBOL_SUFFIX); 2132 + p++; 2133 + SET_SYMBOL(strdup(++tmp2), PMU_EVENT_SYMBOL_SUFFIX2); 2134 + len += 3; 2135 + } else if (tmp) { 2142 2136 SET_SYMBOL(strndup(alias->name, tmp - alias->name), 2143 2137 PMU_EVENT_SYMBOL_PREFIX); 2144 2138 p++; ··· 2177 2147 */ 2178 2148 int perf_pmu__test_parse_init(void) 2179 2149 { 2180 - struct perf_pmu_event_symbol *list; 2150 + struct perf_pmu_event_symbol *list, *tmp, symbols[] = { 2151 + {(char *)"read", PMU_EVENT_SYMBOL}, 2152 + {(char *)"event", PMU_EVENT_SYMBOL_PREFIX}, 2153 + {(char *)"two", PMU_EVENT_SYMBOL_SUFFIX}, 2154 + {(char *)"hyphen", PMU_EVENT_SYMBOL_SUFFIX}, 2155 + {(char *)"hyph", PMU_EVENT_SYMBOL_SUFFIX2}, 2156 + }; 2157 + unsigned long i, j; 2181 2158 2182 - list = malloc(sizeof(*list) * 1); 2159 + tmp = list = malloc(sizeof(*list) * ARRAY_SIZE(symbols)); 2183 2160 if (!list) 2184 2161 return -ENOMEM; 2185 2162 2186 - list->type = PMU_EVENT_SYMBOL; 2187 - list->symbol = strdup("read"); 2188 - 2189 - if (!list->symbol) { 2190 - free(list); 2191 - return -ENOMEM; 2163 + for (i = 0; i < ARRAY_SIZE(symbols); i++, tmp++) { 2164 + tmp->type = symbols[i].type; 2165 + tmp->symbol = strdup(symbols[i].symbol); 2166 + if (!list->symbol) 2167 + goto err_free; 2192 2168 } 2193 2169 2194 2170 perf_pmu_events_list = list; 2195 - perf_pmu_events_list_num = 1; 2171 + perf_pmu_events_list_num = ARRAY_SIZE(symbols); 2172 + 2173 + qsort(perf_pmu_events_list, ARRAY_SIZE(symbols), 2174 + sizeof(struct perf_pmu_event_symbol), comp_pmu); 2196 2175 return 0; 2176 + 2177 + err_free: 2178 + for (j = 0, tmp = list; j < i; j++, tmp++) 2179 + free(tmp->symbol); 2180 + free(list); 2181 + return -ENOMEM; 2197 2182 } 2198 2183 2199 2184 enum perf_pmu_event_symbol_type
+1
tools/perf/util/parse-events.h
··· 53 53 PMU_EVENT_SYMBOL, /* normal style PMU event */ 54 54 PMU_EVENT_SYMBOL_PREFIX, /* prefix of pre-suf style event */ 55 55 PMU_EVENT_SYMBOL_SUFFIX, /* suffix of pre-suf style event */ 56 + PMU_EVENT_SYMBOL_SUFFIX2, /* suffix of pre-suf2 style event */ 56 57 }; 57 58 58 59 struct perf_pmu_event_symbol {
+2
tools/perf/util/parse-events.l
··· 149 149 return PE_PMU_EVENT_PRE; 150 150 case PMU_EVENT_SYMBOL_SUFFIX: 151 151 return PE_PMU_EVENT_SUF; 152 + case PMU_EVENT_SYMBOL_SUFFIX2: 153 + return PE_PMU_EVENT_SUF2; 152 154 case PMU_EVENT_SYMBOL: 153 155 return parse_state->fake_pmu 154 156 ? PE_PMU_EVENT_FAKE : PE_KERNEL_PMU_EVENT;
+15 -2
tools/perf/util/parse-events.y
··· 69 69 %token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT 70 70 %token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP 71 71 %token PE_ERROR 72 - %token PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT PE_PMU_EVENT_FAKE 72 + %token PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_PMU_EVENT_SUF2 PE_KERNEL_PMU_EVENT PE_PMU_EVENT_FAKE 73 73 %token PE_ARRAY_ALL PE_ARRAY_RANGE 74 74 %token PE_DRV_CFG_TERM 75 75 %type <num> PE_VALUE ··· 87 87 %type <str> PE_MODIFIER_EVENT 88 88 %type <str> PE_MODIFIER_BP 89 89 %type <str> PE_EVENT_NAME 90 - %type <str> PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT PE_PMU_EVENT_FAKE 90 + %type <str> PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_PMU_EVENT_SUF2 PE_KERNEL_PMU_EVENT PE_PMU_EVENT_FAKE 91 91 %type <str> PE_DRV_CFG_TERM 92 92 %type <str> event_pmu_name 93 93 %destructor { free ($$); } <str> ··· 368 368 err = parse_events_multi_pmu_add(_parse_state, $1, $2, &list); 369 369 free($1); 370 370 if (err < 0) 371 + YYABORT; 372 + $$ = list; 373 + } 374 + | 375 + PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF '-' PE_PMU_EVENT_SUF2 sep_dc 376 + { 377 + struct list_head *list; 378 + char pmu_name[128]; 379 + snprintf(pmu_name, sizeof(pmu_name), "%s-%s-%s", $1, $3, $5); 380 + free($1); 381 + free($3); 382 + free($5); 383 + if (parse_events_multi_pmu_add(_parse_state, pmu_name, NULL, &list) < 0) 371 384 YYABORT; 372 385 $$ = list; 373 386 }
+2 -2
tools/perf/util/perf_api_probe.c
··· 67 67 cpus = perf_cpu_map__new(NULL); 68 68 if (!cpus) 69 69 return false; 70 - cpu = cpus->map[0]; 70 + cpu = perf_cpu_map__cpu(cpus, 0); 71 71 perf_cpu_map__put(cpus); 72 72 73 73 do { ··· 144 144 if (!cpus) 145 145 return false; 146 146 147 - cpu = cpus->map[0]; 147 + cpu = perf_cpu_map__cpu(cpus, 0); 148 148 perf_cpu_map__put(cpus); 149 149 150 150 fd = sys_perf_event_open(&attr, -1, cpu.cpu, -1, 0);
+3
tools/perf/util/probe-event.c
··· 3083 3083 for (j = 0; j < num_matched_functions; j++) { 3084 3084 sym = syms[j]; 3085 3085 3086 + if (sym->type != STT_FUNC) 3087 + continue; 3088 + 3086 3089 /* There can be duplicated symbols in the map */ 3087 3090 for (i = 0; i < j; i++) 3088 3091 if (sym->start == syms[i]->start) {
+3 -3
tools/perf/util/python.c
··· 638 638 { 639 639 struct pyrf_cpu_map *pcpus = (void *)obj; 640 640 641 - return pcpus->cpus->nr; 641 + return perf_cpu_map__nr(pcpus->cpus); 642 642 } 643 643 644 644 static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i) 645 645 { 646 646 struct pyrf_cpu_map *pcpus = (void *)obj; 647 647 648 - if (i >= pcpus->cpus->nr) 648 + if (i >= perf_cpu_map__nr(pcpus->cpus)) 649 649 return NULL; 650 650 651 - return Py_BuildValue("i", pcpus->cpus->map[i]); 651 + return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu); 652 652 } 653 653 654 654 static PySequenceMethods pyrf_cpu_map__sequence_methods = {
+3 -3
tools/perf/util/record.c
··· 106 106 if (opts->group) 107 107 evlist__set_leader(evlist); 108 108 109 - if (evlist->core.cpus->map[0].cpu < 0) 109 + if (perf_cpu_map__cpu(evlist->core.cpus, 0).cpu < 0) 110 110 opts->no_inherit = true; 111 111 112 112 use_comm_exec = perf_can_comm_exec(); ··· 248 248 struct perf_cpu_map *cpus = perf_cpu_map__new(NULL); 249 249 250 250 if (cpus) 251 - cpu = cpus->map[0]; 251 + cpu = perf_cpu_map__cpu(cpus, 0); 252 252 253 253 perf_cpu_map__put(cpus); 254 254 } else { 255 - cpu = evlist->core.cpus->map[0]; 255 + cpu = perf_cpu_map__cpu(evlist->core.cpus, 0); 256 256 } 257 257 258 258 while (1) {
+2 -2
tools/perf/util/scripting-engines/trace-event-python.c
··· 1607 1607 } 1608 1608 1609 1609 for (thread = 0; thread < threads->nr; thread++) { 1610 - for (cpu = 0; cpu < cpus->nr; cpu++) { 1611 - process_stat(counter, cpus->map[cpu], 1610 + for (cpu = 0; cpu < perf_cpu_map__nr(cpus); cpu++) { 1611 + process_stat(counter, perf_cpu_map__cpu(cpus, cpu), 1612 1612 perf_thread_map__pid(threads, thread), tstamp, 1613 1613 perf_counts(counter->counts, cpu, thread)); 1614 1614 }
+2 -2
tools/perf/util/session.c
··· 2537 2537 return -1; 2538 2538 } 2539 2539 2540 - for (i = 0; i < map->nr; i++) { 2541 - struct perf_cpu cpu = map->map[i]; 2540 + for (i = 0; i < perf_cpu_map__nr(map); i++) { 2541 + struct perf_cpu cpu = perf_cpu_map__cpu(map, i); 2542 2542 2543 2543 if (cpu.cpu >= nr_cpus) { 2544 2544 pr_err("Requested CPU %d too large. "
+2 -2
tools/perf/util/svghelper.c
··· 734 734 if (!m) 735 735 return -1; 736 736 737 - for (i = 0; i < m->nr; i++) { 738 - c = m->map[i]; 737 + for (i = 0; i < perf_cpu_map__nr(m); i++) { 738 + c = perf_cpu_map__cpu(m, i); 739 739 if (c.cpu >= nr_cpus) { 740 740 ret = -1; 741 741 break;
+9 -9
tools/perf/util/synthetic-events.c
··· 1186 1186 static void synthesize_cpus(struct cpu_map_entries *cpus, 1187 1187 struct perf_cpu_map *map) 1188 1188 { 1189 - int i; 1189 + int i, map_nr = perf_cpu_map__nr(map); 1190 1190 1191 - cpus->nr = map->nr; 1191 + cpus->nr = map_nr; 1192 1192 1193 - for (i = 0; i < map->nr; i++) 1194 - cpus->cpu[i] = map->map[i].cpu; 1193 + for (i = 0; i < map_nr; i++) 1194 + cpus->cpu[i] = perf_cpu_map__cpu(map, i).cpu; 1195 1195 } 1196 1196 1197 1197 static void synthesize_mask(struct perf_record_record_cpu_map *mask, ··· 1202 1202 mask->nr = BITS_TO_LONGS(max); 1203 1203 mask->long_size = sizeof(long); 1204 1204 1205 - for (i = 0; i < map->nr; i++) 1206 - set_bit(map->map[i].cpu, mask->mask); 1205 + for (i = 0; i < perf_cpu_map__nr(map); i++) 1206 + set_bit(perf_cpu_map__cpu(map, i).cpu, mask->mask); 1207 1207 } 1208 1208 1209 1209 static size_t cpus_size(struct perf_cpu_map *map) 1210 1210 { 1211 - return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16); 1211 + return sizeof(struct cpu_map_entries) + perf_cpu_map__nr(map) * sizeof(u16); 1212 1212 } 1213 1213 1214 1214 static size_t mask_size(struct perf_cpu_map *map, int *max) ··· 1217 1217 1218 1218 *max = 0; 1219 1219 1220 - for (i = 0; i < map->nr; i++) { 1220 + for (i = 0; i < perf_cpu_map__nr(map); i++) { 1221 1221 /* bit position of the cpu is + 1 */ 1222 - int bit = map->map[i].cpu + 1; 1222 + int bit = perf_cpu_map__cpu(map, i).cpu + 1; 1223 1223 1224 1224 if (bit > *max) 1225 1225 *max = bit;
+3 -3
tools/perf/util/top.c
··· 95 95 96 96 if (target->cpu_list) 97 97 ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)", 98 - top->evlist->core.cpus->nr > 1 ? "s" : "", 98 + perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : "", 99 99 target->cpu_list); 100 100 else { 101 101 if (target->tid) 102 102 ret += SNPRINTF(bf + ret, size - ret, ")"); 103 103 else 104 104 ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)", 105 - top->evlist->core.cpus->nr, 106 - top->evlist->core.cpus->nr > 1 ? "s" : ""); 105 + perf_cpu_map__nr(top->evlist->core.cpus), 106 + perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : ""); 107 107 } 108 108 109 109 perf_top__reset_sample_counters(top);