Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'perf-tools-fixes-for-v5.14-2021-07-18' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux

Pull perf tools fixes from Arnaldo Carvalho de Melo:

- Skip invalid hybrid PMU on hybrid systems when the atom (little) CPUs
are offlined.

- Fix 'perf test' problems related to the recently added hybrid
(BIG/little) code.

- Split ARM's coresight (hw tracing) decode by aux records to avoid
fatal decoding errors.

- Fix add event failure in 'perf probe' when running 32-bit perf in a
64-bit kernel.

- Fix 'perf sched record' failure when CONFIG_SCHEDSTATS is not set.

- Fix memory and refcount leaks detected by ASAn when running 'perf
test', should be clean of warnings now.

- Remove broken definition of __LITTLE_ENDIAN from tools'
linux/kconfig.h, which was breaking the build in some systems.

- Cast PTHREAD_STACK_MIN to int as it may turn into 'long
sysconf(__SC_THREAD_STACK_MIN_VALUE), breaking the build in some
systems.

- Fix libperf build error with LIBPFM4=1.

- Sync UAPI files changed by the memfd_secret new syscall.

* tag 'perf-tools-fixes-for-v5.14-2021-07-18' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux: (35 commits)
perf sched: Fix record failure when CONFIG_SCHEDSTATS is not set
perf probe: Fix add event failure when running 32-bit perf in a 64-bit kernel
perf data: Close all files in close_dir()
perf probe-file: Delete namelist in del_events() on the error path
perf test bpf: Free obj_buf
perf trace: Free strings in trace__parse_events_option()
perf trace: Free syscall tp fields in evsel->priv
perf trace: Free syscall->arg_fmt
perf trace: Free malloc'd trace fields on exit
perf lzma: Close lzma stream on exit
perf script: Fix memory 'threads' and 'cpus' leaks on exit
perf script: Release zstd data
perf session: Cleanup trace_event
perf inject: Close inject.output on exit
perf report: Free generated help strings for sort option
perf env: Fix memory leak of cpu_pmu_caps
perf test maps__merge_in: Fix memory leak of maps
perf dso: Fix memory leak in dso__new_map()
perf test event_update: Fix memory leak of unit
perf test event_update: Fix memory leak of evlist
...

+391 -98
+1
tools/arch/arm64/include/uapi/asm/unistd.h
··· 20 20 #define __ARCH_WANT_SET_GET_RLIMIT 21 21 #define __ARCH_WANT_TIME32_SYSCALLS 22 22 #define __ARCH_WANT_SYS_CLONE3 23 + #define __ARCH_WANT_MEMFD_SECRET 23 24 24 25 #include <asm-generic/unistd.h>
-6
tools/include/linux/kconfig.h
··· 4 4 5 5 /* CONFIG_CC_VERSION_TEXT (Do not delete this comment. See help in Kconfig) */ 6 6 7 - #ifdef CONFIG_CPU_BIG_ENDIAN 8 - #define __BIG_ENDIAN 4321 9 - #else 10 - #define __LITTLE_ENDIAN 1234 11 - #endif 12 - 13 7 #define __ARG_PLACEHOLDER_1 0, 14 8 #define __take_second_arg(__ignored, val, ...) val 15 9
+6 -1
tools/include/uapi/asm-generic/unistd.h
··· 873 873 #define __NR_landlock_restrict_self 446 874 874 __SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self) 875 875 876 + #ifdef __ARCH_WANT_MEMFD_SECRET 877 + #define __NR_memfd_secret 447 878 + __SYSCALL(__NR_memfd_secret, sys_memfd_secret) 879 + #endif 880 + 876 881 #undef __NR_syscalls 877 - #define __NR_syscalls 447 882 + #define __NR_syscalls 448 878 883 879 884 /* 880 885 * 32 bit systems traditionally used different
+1
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
··· 368 368 444 common landlock_create_ruleset sys_landlock_create_ruleset 369 369 445 common landlock_add_rule sys_landlock_add_rule 370 370 446 common landlock_restrict_self sys_landlock_restrict_self 371 + 447 common memfd_secret sys_memfd_secret 371 372 372 373 # 373 374 # Due to a historical design error, certain syscalls are numbered differently
+9 -4
tools/perf/builtin-inject.c
··· 361 361 dso = machine__findnew_dso_id(machine, filename, id); 362 362 } 363 363 364 - if (dso) 364 + if (dso) { 365 + nsinfo__put(dso->nsinfo); 365 366 dso->nsinfo = nsi; 366 - else 367 + } else 367 368 nsinfo__put(nsi); 368 369 369 370 thread__put(thread); ··· 993 992 994 993 data.path = inject.input_name; 995 994 inject.session = perf_session__new(&data, inject.output.is_pipe, &inject.tool); 996 - if (IS_ERR(inject.session)) 997 - return PTR_ERR(inject.session); 995 + if (IS_ERR(inject.session)) { 996 + ret = PTR_ERR(inject.session); 997 + goto out_close_output; 998 + } 998 999 999 1000 if (zstd_init(&(inject.session->zstd_data), 0) < 0) 1000 1001 pr_warning("Decompression initialization failed.\n"); ··· 1038 1035 out_delete: 1039 1036 zstd_fini(&(inject.session->zstd_data)); 1040 1037 perf_session__delete(inject.session); 1038 + out_close_output: 1039 + perf_data__close(&inject.output); 1041 1040 free(inject.itrace_synth_opts.vm_tm_corr_args); 1042 1041 return ret; 1043 1042 }
+22 -11
tools/perf/builtin-report.c
··· 1175 1175 .annotation_opts = annotation__default_options, 1176 1176 .skip_empty = true, 1177 1177 }; 1178 + char *sort_order_help = sort_help("sort by key(s):"); 1179 + char *field_order_help = sort_help("output field(s): overhead period sample "); 1178 1180 const struct option options[] = { 1179 1181 OPT_STRING('i', "input", &input_name, "file", 1180 1182 "input file name"), ··· 1211 1209 OPT_BOOLEAN(0, "header-only", &report.header_only, 1212 1210 "Show only data header."), 1213 1211 OPT_STRING('s', "sort", &sort_order, "key[,key2...]", 1214 - sort_help("sort by key(s):")), 1212 + sort_order_help), 1215 1213 OPT_STRING('F', "fields", &field_order, "key[,keys...]", 1216 - sort_help("output field(s): overhead period sample ")), 1214 + field_order_help), 1217 1215 OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization, 1218 1216 "Show sample percentage for different cpu modes"), 1219 1217 OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization, ··· 1346 1344 char sort_tmp[128]; 1347 1345 1348 1346 if (ret < 0) 1349 - return ret; 1347 + goto exit; 1350 1348 1351 1349 ret = perf_config(report__config, &report); 1352 1350 if (ret) 1353 - return ret; 1351 + goto exit; 1354 1352 1355 1353 argc = parse_options(argc, argv, options, report_usage, 0); 1356 1354 if (argc) { ··· 1364 1362 report.symbol_filter_str = argv[0]; 1365 1363 } 1366 1364 1367 - if (annotate_check_args(&report.annotation_opts) < 0) 1368 - return -EINVAL; 1365 + if (annotate_check_args(&report.annotation_opts) < 0) { 1366 + ret = -EINVAL; 1367 + goto exit; 1368 + } 1369 1369 1370 1370 if (report.mmaps_mode) 1371 1371 report.tasks_mode = true; ··· 1381 1377 if (symbol_conf.vmlinux_name && 1382 1378 access(symbol_conf.vmlinux_name, R_OK)) { 1383 1379 pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name); 1384 - return -EINVAL; 1380 + ret = -EINVAL; 1381 + goto exit; 1385 1382 } 1386 1383 if (symbol_conf.kallsyms_name && 1387 1384 access(symbol_conf.kallsyms_name, R_OK)) { 1388 1385 pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name); 1389 - return -EINVAL; 1386 + ret = -EINVAL; 1387 + goto exit; 1390 1388 } 1391 1389 1392 1390 if (report.inverted_callchain) ··· 1412 1406 1413 1407 repeat: 1414 1408 session = perf_session__new(&data, false, &report.tool); 1415 - if (IS_ERR(session)) 1416 - return PTR_ERR(session); 1409 + if (IS_ERR(session)) { 1410 + ret = PTR_ERR(session); 1411 + goto exit; 1412 + } 1417 1413 1418 1414 ret = evswitch__init(&report.evswitch, session->evlist, stderr); 1419 1415 if (ret) 1420 - return ret; 1416 + goto exit; 1421 1417 1422 1418 if (zstd_init(&(session->zstd_data), 0) < 0) 1423 1419 pr_warning("Decompression initialization failed. Reported data may be incomplete.\n"); ··· 1654 1646 1655 1647 zstd_fini(&(session->zstd_data)); 1656 1648 perf_session__delete(session); 1649 + exit: 1650 + free(sort_order_help); 1651 + free(field_order_help); 1657 1652 return ret; 1658 1653 }
+30 -5
tools/perf/builtin-sched.c
··· 670 670 err = pthread_attr_init(&attr); 671 671 BUG_ON(err); 672 672 err = pthread_attr_setstacksize(&attr, 673 - (size_t) max(16 * 1024, PTHREAD_STACK_MIN)); 673 + (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN)); 674 674 BUG_ON(err); 675 675 err = pthread_mutex_lock(&sched->start_work_mutex); 676 676 BUG_ON(err); ··· 3335 3335 sort_dimension__add("pid", &sched->cmp_pid); 3336 3336 } 3337 3337 3338 + static bool schedstat_events_exposed(void) 3339 + { 3340 + /* 3341 + * Select "sched:sched_stat_wait" event to check 3342 + * whether schedstat tracepoints are exposed. 3343 + */ 3344 + return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ? 3345 + false : true; 3346 + } 3347 + 3338 3348 static int __cmd_record(int argc, const char **argv) 3339 3349 { 3340 3350 unsigned int rec_argc, i, j; ··· 3356 3346 "-m", "1024", 3357 3347 "-c", "1", 3358 3348 "-e", "sched:sched_switch", 3359 - "-e", "sched:sched_stat_wait", 3360 - "-e", "sched:sched_stat_sleep", 3361 - "-e", "sched:sched_stat_iowait", 3362 3349 "-e", "sched:sched_stat_runtime", 3363 3350 "-e", "sched:sched_process_fork", 3364 3351 "-e", "sched:sched_wakeup_new", 3365 3352 "-e", "sched:sched_migrate_task", 3366 3353 }; 3354 + 3355 + /* 3356 + * The tracepoints trace_sched_stat_{wait, sleep, iowait} 3357 + * are not exposed to user if CONFIG_SCHEDSTATS is not set, 3358 + * to prevent "perf sched record" execution failure, determine 3359 + * whether to record schedstat events according to actual situation. 3360 + */ 3361 + const char * const schedstat_args[] = { 3362 + "-e", "sched:sched_stat_wait", 3363 + "-e", "sched:sched_stat_sleep", 3364 + "-e", "sched:sched_stat_iowait", 3365 + }; 3366 + unsigned int schedstat_argc = schedstat_events_exposed() ? 3367 + ARRAY_SIZE(schedstat_args) : 0; 3368 + 3367 3369 struct tep_event *waking_event; 3368 3370 3369 3371 /* 3370 3372 * +2 for either "-e", "sched:sched_wakeup" or 3371 3373 * "-e", "sched:sched_waking" 3372 3374 */ 3373 - rec_argc = ARRAY_SIZE(record_args) + 2 + argc - 1; 3375 + rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1; 3374 3376 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 3375 3377 3376 3378 if (rec_argv == NULL) ··· 3397 3375 rec_argv[i++] = strdup("sched:sched_waking"); 3398 3376 else 3399 3377 rec_argv[i++] = strdup("sched:sched_wakeup"); 3378 + 3379 + for (j = 0; j < schedstat_argc; j++) 3380 + rec_argv[i++] = strdup(schedstat_args[j]); 3400 3381 3401 3382 for (j = 1; j < (unsigned int)argc; j++, i++) 3402 3383 rec_argv[i] = argv[j];
+8
tools/perf/builtin-script.c
··· 2601 2601 } 2602 2602 } 2603 2603 2604 + static void perf_script__exit(struct perf_script *script) 2605 + { 2606 + perf_thread_map__put(script->threads); 2607 + perf_cpu_map__put(script->cpus); 2608 + } 2609 + 2604 2610 static int __cmd_script(struct perf_script *script) 2605 2611 { 2606 2612 int ret; ··· 4149 4143 zfree(&script.ptime_range); 4150 4144 } 4151 4145 4146 + zstd_fini(&(session->zstd_data)); 4152 4147 evlist__free_stats(session->evlist); 4153 4148 perf_session__delete(session); 4149 + perf_script__exit(&script); 4154 4150 4155 4151 if (script_started) 4156 4152 cleanup_scripting();
-3
tools/perf/builtin-stat.c
··· 2445 2445 2446 2446 evlist__check_cpu_maps(evsel_list); 2447 2447 2448 - if (perf_pmu__has_hybrid()) 2449 - stat_config.no_merge = true; 2450 - 2451 2448 /* 2452 2449 * Initialize thread_map with comm names, 2453 2450 * so we could print it out on output.
+43 -2
tools/perf/builtin-trace.c
··· 2266 2266 return augmented_args; 2267 2267 } 2268 2268 2269 + static void syscall__exit(struct syscall *sc) 2270 + { 2271 + if (!sc) 2272 + return; 2273 + 2274 + free(sc->arg_fmt); 2275 + } 2276 + 2269 2277 static int trace__sys_enter(struct trace *trace, struct evsel *evsel, 2270 2278 union perf_event *event __maybe_unused, 2271 2279 struct perf_sample *sample) ··· 3101 3093 evsel->handler = trace__pgfault; 3102 3094 3103 3095 return evsel; 3096 + } 3097 + 3098 + static void evlist__free_syscall_tp_fields(struct evlist *evlist) 3099 + { 3100 + struct evsel *evsel; 3101 + 3102 + evlist__for_each_entry(evlist, evsel) { 3103 + struct evsel_trace *et = evsel->priv; 3104 + 3105 + if (!et || !evsel->tp_format || strcmp(evsel->tp_format->system, "syscalls")) 3106 + continue; 3107 + 3108 + free(et->fmt); 3109 + free(et); 3110 + } 3104 3111 } 3105 3112 3106 3113 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample) ··· 4153 4130 4154 4131 out_delete_evlist: 4155 4132 trace__symbols__exit(trace); 4156 - 4133 + evlist__free_syscall_tp_fields(evlist); 4157 4134 evlist__delete(evlist); 4158 4135 cgroup__put(trace->cgroup); 4159 4136 trace->evlist = NULL; ··· 4659 4636 err = parse_events_option(&o, lists[0], 0); 4660 4637 } 4661 4638 out: 4639 + free(strace_groups_dir); 4640 + free(lists[0]); 4641 + free(lists[1]); 4662 4642 if (sep) 4663 4643 *sep = ','; 4664 4644 ··· 4725 4699 } 4726 4700 out: 4727 4701 return err; 4702 + } 4703 + 4704 + static void trace__exit(struct trace *trace) 4705 + { 4706 + int i; 4707 + 4708 + strlist__delete(trace->ev_qualifier); 4709 + free(trace->ev_qualifier_ids.entries); 4710 + if (trace->syscalls.table) { 4711 + for (i = 0; i <= trace->sctbl->syscalls.max_id; i++) 4712 + syscall__exit(&trace->syscalls.table[i]); 4713 + free(trace->syscalls.table); 4714 + } 4715 + syscalltbl__delete(trace->sctbl); 4716 + zfree(&trace->perfconfig_events); 4728 4717 } 4729 4718 4730 4719 int cmd_trace(int argc, const char **argv) ··· 5176 5135 if (output_name != NULL) 5177 5136 fclose(trace.output); 5178 5137 out: 5179 - zfree(&trace.perfconfig_events); 5138 + trace__exit(&trace); 5180 5139 return err; 5181 5140 }
+2
tools/perf/tests/bpf.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <errno.h> 3 3 #include <stdio.h> 4 + #include <stdlib.h> 4 5 #include <sys/epoll.h> 5 6 #include <sys/types.h> 6 7 #include <sys/stat.h> ··· 277 276 } 278 277 279 278 out: 279 + free(obj_buf); 280 280 bpf__clear(); 281 281 return ret; 282 282 }
+4 -2
tools/perf/tests/event_update.c
··· 88 88 struct evsel *evsel; 89 89 struct event_name tmp; 90 90 struct evlist *evlist = evlist__new_default(); 91 + char *unit = strdup("KRAVA"); 91 92 92 93 TEST_ASSERT_VAL("failed to get evlist", evlist); 93 94 ··· 99 98 100 99 perf_evlist__id_add(&evlist->core, &evsel->core, 0, 0, 123); 101 100 102 - evsel->unit = strdup("KRAVA"); 101 + evsel->unit = unit; 103 102 104 103 TEST_ASSERT_VAL("failed to synthesize attr update unit", 105 104 !perf_event__synthesize_event_update_unit(NULL, evsel, process_event_unit)); ··· 119 118 TEST_ASSERT_VAL("failed to synthesize attr update cpus", 120 119 !perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus)); 121 120 122 - perf_cpu_map__put(evsel->core.own_cpus); 121 + free(unit); 122 + evlist__delete(evlist); 123 123 return 0; 124 124 }
+2 -1
tools/perf/tests/evsel-roundtrip-name.c
··· 5 5 #include "tests.h" 6 6 #include "debug.h" 7 7 #include "pmu.h" 8 + #include "pmu-hybrid.h" 8 9 #include <errno.h> 9 10 #include <linux/kernel.h> 10 11 ··· 103 102 { 104 103 int err = 0, ret = 0; 105 104 106 - if (perf_pmu__has_hybrid()) 105 + if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) 107 106 return perf_evsel__name_array_test(evsel__hw_names, 2); 108 107 109 108 err = perf_evsel__name_array_test(evsel__hw_names, 1);
+2
tools/perf/tests/maps.c
··· 116 116 117 117 ret = check_maps(merged3, ARRAY_SIZE(merged3), &maps); 118 118 TEST_ASSERT_VAL("merge check failed", !ret); 119 + 120 + maps__exit(&maps); 119 121 return TEST_OK; 120 122 }
+10 -6
tools/perf/tests/parse-events.c
··· 6 6 #include "tests.h" 7 7 #include "debug.h" 8 8 #include "pmu.h" 9 + #include "pmu-hybrid.h" 9 10 #include <dirent.h> 10 11 #include <errno.h> 11 12 #include <sys/types.h> ··· 1597 1596 { 1598 1597 struct evsel *evsel = evlist__first(evlist); 1599 1598 1599 + if (!perf_pmu__hybrid_mounted("cpu_atom")) { 1600 + TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); 1601 + TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); 1602 + TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config); 1603 + return 0; 1604 + } 1605 + 1600 1606 TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); 1601 1607 TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); 1602 1608 TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config); ··· 1628 1620 { 1629 1621 struct evsel *evsel = evlist__first(evlist); 1630 1622 1631 - TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); 1623 + TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); 1632 1624 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type); 1633 1625 TEST_ASSERT_VAL("wrong config", 0x2 == (evsel->core.attr.config & 0xffffffff)); 1634 - 1635 - evsel = evsel__next(evsel); 1636 - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type); 1637 - TEST_ASSERT_VAL("wrong config", 0x10002 == (evsel->core.attr.config & 0xffffffff)); 1638 1626 return 0; 1639 1627 } 1640 1628 ··· 2032 2028 .id = 7, 2033 2029 }, 2034 2030 { 2035 - .name = "cpu_core/LLC-loads/,cpu_atom/LLC-load-misses/", 2031 + .name = "cpu_core/LLC-loads/", 2036 2032 .check = test__hybrid_cache_event, 2037 2033 .id = 8, 2038 2034 },
+2 -1
tools/perf/tests/perf-time-to-tsc.c
··· 21 21 #include "mmap.h" 22 22 #include "tests.h" 23 23 #include "pmu.h" 24 + #include "pmu-hybrid.h" 24 25 25 26 #define CHECK__(x) { \ 26 27 while ((x) < 0) { \ ··· 94 93 * For hybrid "cycles:u", it creates two events. 95 94 * Init the second evsel here. 96 95 */ 97 - if (perf_pmu__has_hybrid()) { 96 + if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) { 98 97 evsel = evsel__next(evsel); 99 98 evsel->core.attr.comm = 1; 100 99 evsel->core.attr.disabled = 1;
+1
tools/perf/tests/topology.c
··· 61 61 TEST_ASSERT_VAL("failed to write header", 62 62 !perf_session__write_header(session, session->evlist, data.file.fd, true)); 63 63 64 + evlist__delete(session->evlist); 64 65 perf_session__delete(session); 65 66 66 67 return 0;
+167 -1
tools/perf/util/cs-etm.c
··· 2683 2683 return metadata; 2684 2684 } 2685 2685 2686 + /** 2687 + * Puts a fragment of an auxtrace buffer into the auxtrace queues based 2688 + * on the bounds of aux_event, if it matches with the buffer that's at 2689 + * file_offset. 2690 + * 2691 + * Normally, whole auxtrace buffers would be added to the queue. But we 2692 + * want to reset the decoder for every PERF_RECORD_AUX event, and the decoder 2693 + * is reset across each buffer, so splitting the buffers up in advance has 2694 + * the same effect. 2695 + */ 2696 + static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_offset, size_t sz, 2697 + struct perf_record_aux *aux_event, struct perf_sample *sample) 2698 + { 2699 + int err; 2700 + char buf[PERF_SAMPLE_MAX_SIZE]; 2701 + union perf_event *auxtrace_event_union; 2702 + struct perf_record_auxtrace *auxtrace_event; 2703 + union perf_event auxtrace_fragment; 2704 + __u64 aux_offset, aux_size; 2705 + 2706 + struct cs_etm_auxtrace *etm = container_of(session->auxtrace, 2707 + struct cs_etm_auxtrace, 2708 + auxtrace); 2709 + 2710 + /* 2711 + * There should be a PERF_RECORD_AUXTRACE event at the file_offset that we got 2712 + * from looping through the auxtrace index. 2713 + */ 2714 + err = perf_session__peek_event(session, file_offset, buf, 2715 + PERF_SAMPLE_MAX_SIZE, &auxtrace_event_union, NULL); 2716 + if (err) 2717 + return err; 2718 + auxtrace_event = &auxtrace_event_union->auxtrace; 2719 + if (auxtrace_event->header.type != PERF_RECORD_AUXTRACE) 2720 + return -EINVAL; 2721 + 2722 + if (auxtrace_event->header.size < sizeof(struct perf_record_auxtrace) || 2723 + auxtrace_event->header.size != sz) { 2724 + return -EINVAL; 2725 + } 2726 + 2727 + /* 2728 + * In per-thread mode, CPU is set to -1, but TID will be set instead. See 2729 + * auxtrace_mmap_params__set_idx(). Return 'not found' if neither CPU nor TID match. 2730 + */ 2731 + if ((auxtrace_event->cpu == (__u32) -1 && auxtrace_event->tid != sample->tid) || 2732 + auxtrace_event->cpu != sample->cpu) 2733 + return 1; 2734 + 2735 + if (aux_event->flags & PERF_AUX_FLAG_OVERWRITE) { 2736 + /* 2737 + * Clamp size in snapshot mode. The buffer size is clamped in 2738 + * __auxtrace_mmap__read() for snapshots, so the aux record size doesn't reflect 2739 + * the buffer size. 2740 + */ 2741 + aux_size = min(aux_event->aux_size, auxtrace_event->size); 2742 + 2743 + /* 2744 + * In this mode, the head also points to the end of the buffer so aux_offset 2745 + * needs to have the size subtracted so it points to the beginning as in normal mode 2746 + */ 2747 + aux_offset = aux_event->aux_offset - aux_size; 2748 + } else { 2749 + aux_size = aux_event->aux_size; 2750 + aux_offset = aux_event->aux_offset; 2751 + } 2752 + 2753 + if (aux_offset >= auxtrace_event->offset && 2754 + aux_offset + aux_size <= auxtrace_event->offset + auxtrace_event->size) { 2755 + /* 2756 + * If this AUX event was inside this buffer somewhere, create a new auxtrace event 2757 + * based on the sizes of the aux event, and queue that fragment. 2758 + */ 2759 + auxtrace_fragment.auxtrace = *auxtrace_event; 2760 + auxtrace_fragment.auxtrace.size = aux_size; 2761 + auxtrace_fragment.auxtrace.offset = aux_offset; 2762 + file_offset += aux_offset - auxtrace_event->offset + auxtrace_event->header.size; 2763 + 2764 + pr_debug3("CS ETM: Queue buffer size: %#"PRI_lx64" offset: %#"PRI_lx64 2765 + " tid: %d cpu: %d\n", aux_size, aux_offset, sample->tid, sample->cpu); 2766 + return auxtrace_queues__add_event(&etm->queues, session, &auxtrace_fragment, 2767 + file_offset, NULL); 2768 + } 2769 + 2770 + /* Wasn't inside this buffer, but there were no parse errors. 1 == 'not found' */ 2771 + return 1; 2772 + } 2773 + 2774 + static int cs_etm__queue_aux_records_cb(struct perf_session *session, union perf_event *event, 2775 + u64 offset __maybe_unused, void *data __maybe_unused) 2776 + { 2777 + struct perf_sample sample; 2778 + int ret; 2779 + struct auxtrace_index_entry *ent; 2780 + struct auxtrace_index *auxtrace_index; 2781 + struct evsel *evsel; 2782 + size_t i; 2783 + 2784 + /* Don't care about any other events, we're only queuing buffers for AUX events */ 2785 + if (event->header.type != PERF_RECORD_AUX) 2786 + return 0; 2787 + 2788 + if (event->header.size < sizeof(struct perf_record_aux)) 2789 + return -EINVAL; 2790 + 2791 + /* Truncated Aux records can have 0 size and shouldn't result in anything being queued. */ 2792 + if (!event->aux.aux_size) 2793 + return 0; 2794 + 2795 + /* 2796 + * Parse the sample, we need the sample_id_all data that comes after the event so that the 2797 + * CPU or PID can be matched to an AUXTRACE buffer's CPU or PID. 2798 + */ 2799 + evsel = evlist__event2evsel(session->evlist, event); 2800 + if (!evsel) 2801 + return -EINVAL; 2802 + ret = evsel__parse_sample(evsel, event, &sample); 2803 + if (ret) 2804 + return ret; 2805 + 2806 + /* 2807 + * Loop through the auxtrace index to find the buffer that matches up with this aux event. 2808 + */ 2809 + list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) { 2810 + for (i = 0; i < auxtrace_index->nr; i++) { 2811 + ent = &auxtrace_index->entries[i]; 2812 + ret = cs_etm__queue_aux_fragment(session, ent->file_offset, 2813 + ent->sz, &event->aux, &sample); 2814 + /* 2815 + * Stop search on error or successful values. Continue search on 2816 + * 1 ('not found') 2817 + */ 2818 + if (ret != 1) 2819 + return ret; 2820 + } 2821 + } 2822 + 2823 + /* 2824 + * Couldn't find the buffer corresponding to this aux record, something went wrong. Warn but 2825 + * don't exit with an error because it will still be possible to decode other aux records. 2826 + */ 2827 + pr_err("CS ETM: Couldn't find auxtrace buffer for aux_offset: %#"PRI_lx64 2828 + " tid: %d cpu: %d\n", event->aux.aux_offset, sample.tid, sample.cpu); 2829 + return 0; 2830 + } 2831 + 2832 + static int cs_etm__queue_aux_records(struct perf_session *session) 2833 + { 2834 + struct auxtrace_index *index = list_first_entry_or_null(&session->auxtrace_index, 2835 + struct auxtrace_index, list); 2836 + if (index && index->nr > 0) 2837 + return perf_session__peek_events(session, session->header.data_offset, 2838 + session->header.data_size, 2839 + cs_etm__queue_aux_records_cb, NULL); 2840 + 2841 + /* 2842 + * We would get here if there are no entries in the index (either no auxtrace 2843 + * buffers or no index at all). Fail silently as there is the possibility of 2844 + * queueing them in cs_etm__process_auxtrace_event() if etm->data_queued is still 2845 + * false. 2846 + * 2847 + * In that scenario, buffers will not be split by AUX records. 2848 + */ 2849 + return 0; 2850 + } 2851 + 2686 2852 int cs_etm__process_auxtrace_info(union perf_event *event, 2687 2853 struct perf_session *session) 2688 2854 { ··· 3049 2883 if (err) 3050 2884 goto err_delete_thread; 3051 2885 3052 - err = auxtrace_queues__process_index(&etm->queues, session); 2886 + err = cs_etm__queue_aux_records(session); 3053 2887 if (err) 3054 2888 goto err_delete_thread; 3055 2889
+1 -1
tools/perf/util/data.c
··· 20 20 21 21 static void close_dir(struct perf_data_file *files, int nr) 22 22 { 23 - while (--nr >= 1) { 23 + while (--nr >= 0) { 24 24 close(files[nr].fd); 25 25 zfree(&files[nr].path); 26 26 }
+3 -1
tools/perf/util/dso.c
··· 1154 1154 struct map *map = NULL; 1155 1155 struct dso *dso = dso__new(name); 1156 1156 1157 - if (dso) 1157 + if (dso) { 1158 1158 map = map__new2(0, dso); 1159 + dso__put(dso); 1160 + } 1159 1161 1160 1162 return map; 1161 1163 }
+4 -4
tools/perf/util/dwarf-aux.c
··· 113 113 * 114 114 * Find a line number and file name for @addr in @cu_die. 115 115 */ 116 - int cu_find_lineinfo(Dwarf_Die *cu_die, unsigned long addr, 117 - const char **fname, int *lineno) 116 + int cu_find_lineinfo(Dwarf_Die *cu_die, Dwarf_Addr addr, 117 + const char **fname, int *lineno) 118 118 { 119 119 Dwarf_Line *line; 120 120 Dwarf_Die die_mem; 121 121 Dwarf_Addr faddr; 122 122 123 - if (die_find_realfunc(cu_die, (Dwarf_Addr)addr, &die_mem) 123 + if (die_find_realfunc(cu_die, addr, &die_mem) 124 124 && die_entrypc(&die_mem, &faddr) == 0 && 125 125 faddr == addr) { 126 126 *fname = dwarf_decl_file(&die_mem); ··· 128 128 goto out; 129 129 } 130 130 131 - line = cu_getsrc_die(cu_die, (Dwarf_Addr)addr); 131 + line = cu_getsrc_die(cu_die, addr); 132 132 if (line && dwarf_lineno(line, lineno) == 0) { 133 133 *fname = dwarf_linesrc(line, NULL, NULL); 134 134 if (!*fname)
+1 -1
tools/perf/util/dwarf-aux.h
··· 19 19 const char *cu_get_comp_dir(Dwarf_Die *cu_die); 20 20 21 21 /* Get a line number and file name for given address */ 22 - int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr, 22 + int cu_find_lineinfo(Dwarf_Die *cudie, Dwarf_Addr addr, 23 23 const char **fname, int *lineno); 24 24 25 25 /* Walk on functions at given address */
+2
tools/perf/util/env.c
··· 186 186 zfree(&env->cpuid); 187 187 zfree(&env->cmdline); 188 188 zfree(&env->cmdline_argv); 189 + zfree(&env->sibling_dies); 189 190 zfree(&env->sibling_cores); 190 191 zfree(&env->sibling_threads); 191 192 zfree(&env->pmu_mappings); 192 193 zfree(&env->cpu); 194 + zfree(&env->cpu_pmu_caps); 193 195 zfree(&env->numa_map); 194 196 195 197 for (i = 0; i < env->nr_numa_nodes; i++)
+5 -3
tools/perf/util/lzma.c
··· 69 69 70 70 if (ferror(infile)) { 71 71 pr_err("lzma: read error: %s\n", strerror(errno)); 72 - goto err_fclose; 72 + goto err_lzma_end; 73 73 } 74 74 75 75 if (feof(infile)) ··· 83 83 84 84 if (writen(output_fd, buf_out, write_size) != write_size) { 85 85 pr_err("lzma: write error: %s\n", strerror(errno)); 86 - goto err_fclose; 86 + goto err_lzma_end; 87 87 } 88 88 89 89 strm.next_out = buf_out; ··· 95 95 break; 96 96 97 97 pr_err("lzma: failed %s\n", lzma_strerror(ret)); 98 - goto err_fclose; 98 + goto err_lzma_end; 99 99 } 100 100 } 101 101 102 102 err = 0; 103 + err_lzma_end: 104 + lzma_end(&strm); 103 105 err_fclose: 104 106 fclose(infile); 105 107 return err;
+2
tools/perf/util/map.c
··· 192 192 if (!(prot & PROT_EXEC)) 193 193 dso__set_loaded(dso); 194 194 } 195 + 196 + nsinfo__put(dso->nsinfo); 195 197 dso->nsinfo = nsi; 196 198 197 199 if (build_id__is_defined(bid))
+1 -1
tools/perf/util/pfm.c
··· 99 99 grp_leader = evsel; 100 100 101 101 if (grp_evt > -1) { 102 - evsel->leader = grp_leader; 102 + evsel__set_leader(evsel, grp_leader); 103 103 grp_leader->core.nr_members++; 104 104 grp_evt++; 105 105 }
+8 -1
tools/perf/util/pmu.c
··· 950 950 LIST_HEAD(format); 951 951 LIST_HEAD(aliases); 952 952 __u32 type; 953 + bool is_hybrid = perf_pmu__hybrid_mounted(name); 954 + 955 + /* 956 + * Check pmu name for hybrid and the pmu may be invalid in sysfs 957 + */ 958 + if (!strncmp(name, "cpu_", 4) && !is_hybrid) 959 + return NULL; 953 960 954 961 /* 955 962 * The pmu data we store & need consists of the pmu ··· 985 978 pmu->is_uncore = pmu_is_uncore(name); 986 979 if (pmu->is_uncore) 987 980 pmu->id = pmu_id(name); 988 - pmu->is_hybrid = perf_pmu__hybrid_mounted(name); 981 + pmu->is_hybrid = is_hybrid; 989 982 pmu->max_precise = pmu_max_precise(name); 990 983 pmu_add_cpu_aliases(&aliases, pmu); 991 984 pmu_add_sys_aliases(&aliases, pmu);
+26 -27
tools/perf/util/probe-event.c
··· 179 179 struct map *map; 180 180 181 181 map = dso__new_map(target); 182 - if (map && map->dso) 182 + if (map && map->dso) { 183 + nsinfo__put(map->dso->nsinfo); 183 184 map->dso->nsinfo = nsinfo__get(nsi); 185 + } 184 186 return map; 185 187 } else { 186 188 return kernel_get_module_map(target); ··· 239 237 clear_probe_trace_event(tevs + i); 240 238 } 241 239 242 - static bool kprobe_blacklist__listed(unsigned long address); 243 - static bool kprobe_warn_out_range(const char *symbol, unsigned long address) 240 + static bool kprobe_blacklist__listed(u64 address); 241 + static bool kprobe_warn_out_range(const char *symbol, u64 address) 244 242 { 245 243 struct map *map; 246 244 bool ret = false; ··· 400 398 pr_debug("Symbol %s address found : %" PRIx64 "\n", 401 399 pp->function, address); 402 400 403 - ret = debuginfo__find_probe_point(dinfo, (unsigned long)address, 404 - result); 401 + ret = debuginfo__find_probe_point(dinfo, address, result); 405 402 if (ret <= 0) 406 403 ret = (!ret) ? -ENOENT : ret; 407 404 else { ··· 588 587 } 589 588 590 589 591 - static int get_text_start_address(const char *exec, unsigned long *address, 590 + static int get_text_start_address(const char *exec, u64 *address, 592 591 struct nsinfo *nsi) 593 592 { 594 593 Elf *elf; ··· 633 632 bool is_kprobe) 634 633 { 635 634 struct debuginfo *dinfo = NULL; 636 - unsigned long stext = 0; 635 + u64 stext = 0; 637 636 u64 addr = tp->address; 638 637 int ret = -ENOENT; 639 638 ··· 661 660 662 661 dinfo = debuginfo_cache__open(tp->module, verbose <= 0); 663 662 if (dinfo) 664 - ret = debuginfo__find_probe_point(dinfo, 665 - (unsigned long)addr, pp); 663 + ret = debuginfo__find_probe_point(dinfo, addr, pp); 666 664 else 667 665 ret = -ENOENT; 668 666 ··· 676 676 677 677 /* Adjust symbol name and address */ 678 678 static int post_process_probe_trace_point(struct probe_trace_point *tp, 679 - struct map *map, unsigned long offs) 679 + struct map *map, u64 offs) 680 680 { 681 681 struct symbol *sym; 682 682 u64 addr = tp->address - offs; ··· 719 719 int ntevs, const char *pathname) 720 720 { 721 721 struct map *map; 722 - unsigned long stext = 0; 722 + u64 stext = 0; 723 723 int i, ret = 0; 724 724 725 725 /* Prepare a map for offline binary */ ··· 745 745 struct nsinfo *nsi) 746 746 { 747 747 int i, ret = 0; 748 - unsigned long stext = 0; 748 + u64 stext = 0; 749 749 750 750 if (!exec) 751 751 return 0; ··· 790 790 mod_name = find_module_name(module); 791 791 for (i = 0; i < ntevs; i++) { 792 792 ret = post_process_probe_trace_point(&tevs[i].point, 793 - map, (unsigned long)text_offs); 793 + map, text_offs); 794 794 if (ret < 0) 795 795 break; 796 796 tevs[i].point.module = ··· 1534 1534 * so tmp[1] should always valid (but could be '\0'). 1535 1535 */ 1536 1536 if (tmp && !strncmp(tmp, "0x", 2)) { 1537 - pp->abs_address = strtoul(pp->function, &tmp, 0); 1537 + pp->abs_address = strtoull(pp->function, &tmp, 0); 1538 1538 if (*tmp != '\0') { 1539 1539 semantic_error("Invalid absolute address.\n"); 1540 1540 return -EINVAL; ··· 1909 1909 argv[i] = NULL; 1910 1910 argc -= 1; 1911 1911 } else 1912 - tp->address = strtoul(fmt1_str, NULL, 0); 1912 + tp->address = strtoull(fmt1_str, NULL, 0); 1913 1913 } else { 1914 1914 /* Only the symbol-based probe has offset */ 1915 1915 tp->symbol = strdup(fmt1_str); ··· 2155 2155 return -EINVAL; 2156 2156 2157 2157 /* Use the tp->address for uprobes */ 2158 - err = strbuf_addf(buf, "%s:0x%lx", tp->module, tp->address); 2158 + err = strbuf_addf(buf, "%s:0x%" PRIx64, tp->module, tp->address); 2159 2159 2160 2160 if (err >= 0 && tp->ref_ctr_offset) { 2161 2161 if (!uprobe_ref_ctr_is_supported()) ··· 2170 2170 { 2171 2171 if (!strncmp(tp->symbol, "0x", 2)) { 2172 2172 /* Absolute address. See try_to_find_absolute_address() */ 2173 - return strbuf_addf(buf, "%s%s0x%lx", tp->module ?: "", 2173 + return strbuf_addf(buf, "%s%s0x%" PRIx64, tp->module ?: "", 2174 2174 tp->module ? ":" : "", tp->address); 2175 2175 } else { 2176 2176 return strbuf_addf(buf, "%s%s%s+%lu", tp->module ?: "", ··· 2269 2269 pp->function = strdup(tp->symbol); 2270 2270 pp->offset = tp->offset; 2271 2271 } else { 2272 - ret = e_snprintf(buf, 128, "0x%" PRIx64, (u64)tp->address); 2272 + ret = e_snprintf(buf, 128, "0x%" PRIx64, tp->address); 2273 2273 if (ret < 0) 2274 2274 return ret; 2275 2275 pp->function = strdup(buf); ··· 2450 2450 2451 2451 struct kprobe_blacklist_node { 2452 2452 struct list_head list; 2453 - unsigned long start; 2454 - unsigned long end; 2453 + u64 start; 2454 + u64 end; 2455 2455 char *symbol; 2456 2456 }; 2457 2457 ··· 2496 2496 } 2497 2497 INIT_LIST_HEAD(&node->list); 2498 2498 list_add_tail(&node->list, blacklist); 2499 - if (sscanf(buf, "0x%lx-0x%lx", &node->start, &node->end) != 2) { 2499 + if (sscanf(buf, "0x%" PRIx64 "-0x%" PRIx64, &node->start, &node->end) != 2) { 2500 2500 ret = -EINVAL; 2501 2501 break; 2502 2502 } ··· 2512 2512 ret = -ENOMEM; 2513 2513 break; 2514 2514 } 2515 - pr_debug2("Blacklist: 0x%lx-0x%lx, %s\n", 2515 + pr_debug2("Blacklist: 0x%" PRIx64 "-0x%" PRIx64 ", %s\n", 2516 2516 node->start, node->end, node->symbol); 2517 2517 ret++; 2518 2518 } ··· 2524 2524 } 2525 2525 2526 2526 static struct kprobe_blacklist_node * 2527 - kprobe_blacklist__find_by_address(struct list_head *blacklist, 2528 - unsigned long address) 2527 + kprobe_blacklist__find_by_address(struct list_head *blacklist, u64 address) 2529 2528 { 2530 2529 struct kprobe_blacklist_node *node; 2531 2530 ··· 2552 2553 kprobe_blacklist__delete(&kprobe_blacklist); 2553 2554 } 2554 2555 2555 - static bool kprobe_blacklist__listed(unsigned long address) 2556 + static bool kprobe_blacklist__listed(u64 address) 2556 2557 { 2557 2558 return !!kprobe_blacklist__find_by_address(&kprobe_blacklist, address); 2558 2559 } ··· 3220 3221 * In __add_probe_trace_events, a NULL symbol is interpreted as 3221 3222 * invalid. 3222 3223 */ 3223 - if (asprintf(&tp->symbol, "0x%lx", tp->address) < 0) 3224 + if (asprintf(&tp->symbol, "0x%" PRIx64, tp->address) < 0) 3224 3225 goto errout; 3225 3226 3226 3227 /* For kprobe, check range */ ··· 3231 3232 goto errout; 3232 3233 } 3233 3234 3234 - if (asprintf(&tp->realname, "abs_%lx", tp->address) < 0) 3235 + if (asprintf(&tp->realname, "abs_%" PRIx64, tp->address) < 0) 3235 3236 goto errout; 3236 3237 3237 3238 if (pev->target) {
+2 -2
tools/perf/util/probe-event.h
··· 33 33 char *module; /* Module name */ 34 34 unsigned long offset; /* Offset from symbol */ 35 35 unsigned long ref_ctr_offset; /* SDT reference counter offset */ 36 - unsigned long address; /* Actual address of the trace point */ 36 + u64 address; /* Actual address of the trace point */ 37 37 bool retprobe; /* Return probe flag */ 38 38 }; 39 39 ··· 70 70 bool retprobe; /* Return probe flag */ 71 71 char *lazy_line; /* Lazy matching pattern */ 72 72 unsigned long offset; /* Offset from function entry */ 73 - unsigned long abs_address; /* Absolute address of the point */ 73 + u64 abs_address; /* Absolute address of the point */ 74 74 }; 75 75 76 76 /* Perf probe probing argument field chain */
+2 -2
tools/perf/util/probe-file.c
··· 377 377 378 378 ret = probe_file__get_events(fd, filter, namelist); 379 379 if (ret < 0) 380 - return ret; 380 + goto out; 381 381 382 382 ret = probe_file__del_strlist(fd, namelist); 383 + out: 383 384 strlist__delete(namelist); 384 - 385 385 return ret; 386 386 } 387 387
+7 -8
tools/perf/util/probe-finder.c
··· 668 668 } 669 669 670 670 tp->offset = (unsigned long)(paddr - eaddr); 671 - tp->address = (unsigned long)paddr; 671 + tp->address = paddr; 672 672 tp->symbol = strdup(symbol); 673 673 if (!tp->symbol) 674 674 return -ENOMEM; ··· 1707 1707 } 1708 1708 1709 1709 /* Reverse search */ 1710 - int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, 1710 + int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr, 1711 1711 struct perf_probe_point *ppt) 1712 1712 { 1713 1713 Dwarf_Die cudie, spdie, indie; ··· 1720 1720 addr += baseaddr; 1721 1721 /* Find cu die */ 1722 1722 if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) { 1723 - pr_warning("Failed to find debug information for address %lx\n", 1723 + pr_warning("Failed to find debug information for address %" PRIx64 "\n", 1724 1724 addr); 1725 1725 ret = -EINVAL; 1726 1726 goto end; 1727 1727 } 1728 1728 1729 1729 /* Find a corresponding line (filename and lineno) */ 1730 - cu_find_lineinfo(&cudie, addr, &fname, &lineno); 1730 + cu_find_lineinfo(&cudie, (Dwarf_Addr)addr, &fname, &lineno); 1731 1731 /* Don't care whether it failed or not */ 1732 1732 1733 1733 /* Find a corresponding function (name, baseline and baseaddr) */ ··· 1742 1742 } 1743 1743 1744 1744 fname = dwarf_decl_file(&spdie); 1745 - if (addr == (unsigned long)baseaddr) { 1745 + if (addr == baseaddr) { 1746 1746 /* Function entry - Relative line number is 0 */ 1747 1747 lineno = baseline; 1748 1748 goto post; ··· 1788 1788 if (lineno) 1789 1789 ppt->line = lineno - baseline; 1790 1790 else if (basefunc) { 1791 - ppt->offset = addr - (unsigned long)baseaddr; 1791 + ppt->offset = addr - baseaddr; 1792 1792 func = basefunc; 1793 1793 } 1794 1794 ··· 1828 1828 } 1829 1829 1830 1830 static int line_range_walk_cb(const char *fname, int lineno, 1831 - Dwarf_Addr addr __maybe_unused, 1832 - void *data) 1831 + Dwarf_Addr addr, void *data) 1833 1832 { 1834 1833 struct line_finder *lf = data; 1835 1834 const char *__fname;
+1 -1
tools/perf/util/probe-finder.h
··· 46 46 struct probe_trace_event **tevs); 47 47 48 48 /* Find a perf_probe_point from debuginfo */ 49 - int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, 49 + int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr, 50 50 struct perf_probe_point *ppt); 51 51 52 52 int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
+1
tools/perf/util/session.c
··· 306 306 evlist__delete(session->evlist); 307 307 perf_data__close(session->data); 308 308 } 309 + trace_event__cleanup(&session->tevent); 309 310 free(session); 310 311 } 311 312
+1 -1
tools/perf/util/sort.c
··· 3370 3370 add_key(sb, s[i].name, llen); 3371 3371 } 3372 3372 3373 - const char *sort_help(const char *prefix) 3373 + char *sort_help(const char *prefix) 3374 3374 { 3375 3375 struct strbuf sb; 3376 3376 char *s;
+1 -1
tools/perf/util/sort.h
··· 302 302 void sort__setup_elide(FILE *fp); 303 303 void perf_hpp__set_elide(int idx, bool elide); 304 304 305 - const char *sort_help(const char *prefix); 305 + char *sort_help(const char *prefix); 306 306 307 307 int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset); 308 308
+13 -1
tools/perf/util/stat-display.c
··· 596 596 } 597 597 } 598 598 599 + static bool is_uncore(struct evsel *evsel) 600 + { 601 + struct perf_pmu *pmu = evsel__find_pmu(evsel); 602 + 603 + return pmu && pmu->is_uncore; 604 + } 605 + 606 + static bool hybrid_uniquify(struct evsel *evsel) 607 + { 608 + return perf_pmu__has_hybrid() && !is_uncore(evsel); 609 + } 610 + 599 611 static bool collect_data(struct perf_stat_config *config, struct evsel *counter, 600 612 void (*cb)(struct perf_stat_config *config, struct evsel *counter, void *data, 601 613 bool first), ··· 616 604 if (counter->merged_stat) 617 605 return false; 618 606 cb(config, counter, data, true); 619 - if (config->no_merge) 607 + if (config->no_merge || hybrid_uniquify(counter)) 620 608 uniquify_event_name(counter); 621 609 else if (counter->auto_merge_stats) 622 610 collect_all_aliases(config, counter, cb, data);