Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf updates from Thomas Gleixner:
"Mostly updates to the perf tool plus two fixes to the kernel core code:

- Handle tracepoint filters correctly for inherited events (Peter
Zijlstra)

- Prevent a deadlock in perf_lock_task_context (Paul McKenney)

- Add missing newlines to some pr_err() calls (Arnaldo Carvalho de
Melo)

- Print full source file paths when using 'perf annotate --print-line
--full-paths' (Michael Petlan)

- Fix 'perf probe -d' when just one out of uprobes and kprobes is
enabled (Wang Nan)

- Add compiler.h to list.h to fix 'make perf-tar-src-pkg' generated
tarballs, i.e. out of tree building (Arnaldo Carvalho de Melo)

- Add the llvm-src-base.c and llvm-src-kbuild.c files, generated by
the 'perf test' LLVM entries, when running it in-tree, to
.gitignore (Yunlong Song)

- libbpf error reporting improvements, using a strerror interface to
more precisely tell the user about problems with the provided
scriptlet, be it in C or as a ready made object file (Wang Nan)

- Do not be case sensitive when searching for matching 'perf test'
entries (Arnaldo Carvalho de Melo)

- Inform the user about objdump failures in 'perf annotate' (Andi
Kleen)

- Improve the LLVM 'perf test' entry, introduce a new ones for BPF
and kbuild tests to check the environment used by clang to compile
.c scriptlets (Wang Nan)"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (32 commits)
perf/x86/intel/rapl: Remove the unused RAPL_EVENT_DESC() macro
tools include: Add compiler.h to list.h
perf probe: Verify parameters in two functions
perf session: Add missing newlines to some pr_err() calls
perf annotate: Support full source file paths for srcline fix
perf test: Add llvm-src-base.c and llvm-src-kbuild.c to .gitignore
perf: Fix inherited events vs. tracepoint filters
perf: Disable IRQs across RCU RS CS that acquires scheduler lock
perf test: Do not be case sensitive when searching for matching tests
perf test: Add 'perf test BPF'
perf test: Enhance the LLVM tests: add kbuild test
perf test: Enhance the LLVM test: update basic BPF test program
perf bpf: Improve BPF related error messages
perf tools: Make fetch_kernel_version() publicly available
bpf tools: Add new API bpf_object__get_kversion()
bpf tools: Improve libbpf error reporting
perf probe: Cleanup find_perf_probe_point_from_map to reduce redundancy
perf annotate: Inform the user about objdump failures in --stdio
perf stat: Make stat options global
perf sched latency: Fix thread pid reuse issue
...

+949 -294
-6
arch/x86/kernel/cpu/perf_event_intel_rapl.c
··· 107 107 static struct kobj_attribute format_attr_##_var = \ 108 108 __ATTR(_name, 0444, __rapl_##_var##_show, NULL) 109 109 110 - #define RAPL_EVENT_DESC(_name, _config) \ 111 - { \ 112 - .attr = __ATTR(_name, 0444, rapl_event_show, NULL), \ 113 - .config = _config, \ 114 - } 115 - 116 110 #define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */ 117 111 118 112 #define RAPL_EVENT_ATTR_STR(_name, v, str) \
+13 -8
kernel/events/core.c
··· 1050 1050 /* 1051 1051 * One of the few rules of preemptible RCU is that one cannot do 1052 1052 * rcu_read_unlock() while holding a scheduler (or nested) lock when 1053 - * part of the read side critical section was preemptible -- see 1053 + * part of the read side critical section was irqs-enabled -- see 1054 1054 * rcu_read_unlock_special(). 1055 1055 * 1056 1056 * Since ctx->lock nests under rq->lock we must ensure the entire read 1057 - * side critical section is non-preemptible. 1057 + * side critical section has interrupts disabled. 1058 1058 */ 1059 - preempt_disable(); 1059 + local_irq_save(*flags); 1060 1060 rcu_read_lock(); 1061 1061 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); 1062 1062 if (ctx) { ··· 1070 1070 * if so. If we locked the right context, then it 1071 1071 * can't get swapped on us any more. 1072 1072 */ 1073 - raw_spin_lock_irqsave(&ctx->lock, *flags); 1073 + raw_spin_lock(&ctx->lock); 1074 1074 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { 1075 - raw_spin_unlock_irqrestore(&ctx->lock, *flags); 1075 + raw_spin_unlock(&ctx->lock); 1076 1076 rcu_read_unlock(); 1077 - preempt_enable(); 1077 + local_irq_restore(*flags); 1078 1078 goto retry; 1079 1079 } 1080 1080 1081 1081 if (!atomic_inc_not_zero(&ctx->refcount)) { 1082 - raw_spin_unlock_irqrestore(&ctx->lock, *flags); 1082 + raw_spin_unlock(&ctx->lock); 1083 1083 ctx = NULL; 1084 1084 } 1085 1085 } 1086 1086 rcu_read_unlock(); 1087 - preempt_enable(); 1087 + if (!ctx) 1088 + local_irq_restore(*flags); 1088 1089 return ctx; 1089 1090 } 1090 1091 ··· 6913 6912 struct perf_sample_data *data) 6914 6913 { 6915 6914 void *record = data->raw->data; 6915 + 6916 + /* only top level events have filters set */ 6917 + if (event->parent) 6918 + event = event->parent; 6916 6919 6917 6920 if (likely(!event->filter) || filter_match_preds(event->filter, record)) 6918 6921 return 1;
+1
tools/include/linux/list.h
··· 1 + #include <linux/compiler.h> 1 2 #include <linux/kernel.h> 2 3 #include <linux/types.h> 3 4
+1 -1
tools/lib/bpf/.gitignore
··· 1 1 libbpf_version.h 2 - FEATURE-DUMP 2 + FEATURE-DUMP.libbpf
+1 -1
tools/lib/bpf/Makefile
··· 180 180 clean: 181 181 $(call QUIET_CLEAN, libbpf) $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d \ 182 182 $(RM) LIBBPF-CFLAGS 183 - $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP 183 + $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf 184 184 185 185 186 186
+120 -51
tools/lib/bpf/libbpf.c
··· 61 61 __pr_debug = debug; 62 62 } 63 63 64 + #define STRERR_BUFSIZE 128 65 + 66 + #define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START) 67 + #define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c) 68 + #define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START) 69 + 70 + static const char *libbpf_strerror_table[NR_ERRNO] = { 71 + [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf", 72 + [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid", 73 + [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost", 74 + [ERRCODE_OFFSET(ENDIAN)] = "Endian missmatch", 75 + [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf", 76 + [ERRCODE_OFFSET(RELOC)] = "Relocation failed", 77 + [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading", 78 + [ERRCODE_OFFSET(PROG2BIG)] = "Program too big", 79 + [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version", 80 + }; 81 + 82 + int libbpf_strerror(int err, char *buf, size_t size) 83 + { 84 + if (!buf || !size) 85 + return -1; 86 + 87 + err = err > 0 ? err : -err; 88 + 89 + if (err < __LIBBPF_ERRNO__START) { 90 + int ret; 91 + 92 + ret = strerror_r(err, buf, size); 93 + buf[size - 1] = '\0'; 94 + return ret; 95 + } 96 + 97 + if (err < __LIBBPF_ERRNO__END) { 98 + const char *msg; 99 + 100 + msg = libbpf_strerror_table[ERRNO_OFFSET(err)]; 101 + snprintf(buf, size, "%s", msg); 102 + buf[size - 1] = '\0'; 103 + return 0; 104 + } 105 + 106 + snprintf(buf, size, "Unknown libbpf error %d", err); 107 + buf[size - 1] = '\0'; 108 + return -1; 109 + } 110 + 111 + #define CHECK_ERR(action, err, out) do { \ 112 + err = action; \ 113 + if (err) \ 114 + goto out; \ 115 + } while(0) 116 + 117 + 64 118 /* Copied from tools/perf/util/util.h */ 65 119 #ifndef zfree 66 120 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) ··· 312 258 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); 313 259 if (!obj) { 314 260 pr_warning("alloc memory failed for %s\n", path); 315 - return NULL; 261 + return ERR_PTR(-ENOMEM); 316 262 } 317 263 318 264 strcpy(obj->path, path); ··· 359 305 360 306 if (obj_elf_valid(obj)) { 361 307 pr_warning("elf init: internal error\n"); 362 - return -EEXIST; 308 + return -LIBBPF_ERRNO__LIBELF; 363 309 } 364 310 365 311 if (obj->efile.obj_buf_sz > 0) { ··· 385 331 if (!obj->efile.elf) { 386 332 pr_warning("failed to open %s as ELF file\n", 387 333 obj->path); 388 - err = -EINVAL; 334 + err = -LIBBPF_ERRNO__LIBELF; 389 335 goto errout; 390 336 } 391 337 392 338 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { 393 339 pr_warning("failed to get EHDR from %s\n", 394 340 obj->path); 395 - err = -EINVAL; 341 + err = -LIBBPF_ERRNO__FORMAT; 396 342 goto errout; 397 343 } 398 344 ep = &obj->efile.ehdr; ··· 400 346 if ((ep->e_type != ET_REL) || (ep->e_machine != 0)) { 401 347 pr_warning("%s is not an eBPF object file\n", 402 348 obj->path); 403 - err = -EINVAL; 349 + err = -LIBBPF_ERRNO__FORMAT; 404 350 goto errout; 405 351 } 406 352 ··· 428 374 goto mismatch; 429 375 break; 430 376 default: 431 - return -EINVAL; 377 + return -LIBBPF_ERRNO__ENDIAN; 432 378 } 433 379 434 380 return 0; 435 381 436 382 mismatch: 437 383 pr_warning("Error: endianness mismatch.\n"); 438 - return -EINVAL; 384 + return -LIBBPF_ERRNO__ENDIAN; 439 385 } 440 386 441 387 static int ··· 456 402 457 403 if (size != sizeof(kver)) { 458 404 pr_warning("invalid kver section in %s\n", obj->path); 459 - return -EINVAL; 405 + return -LIBBPF_ERRNO__FORMAT; 460 406 } 461 407 memcpy(&kver, data, sizeof(kver)); 462 408 obj->kern_version = kver; ··· 498 444 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) { 499 445 pr_warning("failed to get e_shstrndx from %s\n", 500 446 obj->path); 501 - return -EINVAL; 447 + return -LIBBPF_ERRNO__FORMAT; 502 448 } 503 449 504 450 while ((scn = elf_nextscn(elf, scn)) != NULL) { ··· 510 456 if (gelf_getshdr(scn, &sh) != &sh) { 511 457 pr_warning("failed to get section header from %s\n", 512 458 obj->path); 513 - err = -EINVAL; 459 + err = -LIBBPF_ERRNO__FORMAT; 514 460 goto out; 515 461 } 516 462 ··· 518 464 if (!name) { 519 465 pr_warning("failed to get section name from %s\n", 520 466 obj->path); 521 - err = -EINVAL; 467 + err = -LIBBPF_ERRNO__FORMAT; 522 468 goto out; 523 469 } 524 470 ··· 526 472 if (!data) { 527 473 pr_warning("failed to get section data from %s(%s)\n", 528 474 name, obj->path); 529 - err = -EINVAL; 475 + err = -LIBBPF_ERRNO__FORMAT; 530 476 goto out; 531 477 } 532 478 pr_debug("section %s, size %ld, link %d, flags %lx, type=%d\n", ··· 549 495 if (obj->efile.symbols) { 550 496 pr_warning("bpf: multiple SYMTAB in %s\n", 551 497 obj->path); 552 - err = -EEXIST; 498 + err = -LIBBPF_ERRNO__FORMAT; 553 499 } else 554 500 obj->efile.symbols = data; 555 501 } else if ((sh.sh_type == SHT_PROGBITS) && ··· 558 504 err = bpf_object__add_program(obj, data->d_buf, 559 505 data->d_size, name, idx); 560 506 if (err) { 561 - char errmsg[128]; 507 + char errmsg[STRERR_BUFSIZE]; 508 + 562 509 strerror_r(-err, errmsg, sizeof(errmsg)); 563 510 pr_warning("failed to alloc program %s (%s): %s", 564 511 name, obj->path, errmsg); ··· 631 576 632 577 if (!gelf_getrel(data, i, &rel)) { 633 578 pr_warning("relocation: failed to get %d reloc\n", i); 634 - return -EINVAL; 579 + return -LIBBPF_ERRNO__FORMAT; 635 580 } 636 581 637 582 insn_idx = rel.r_offset / sizeof(struct bpf_insn); ··· 642 587 &sym)) { 643 588 pr_warning("relocation: symbol %"PRIx64" not found\n", 644 589 GELF_R_SYM(rel.r_info)); 645 - return -EINVAL; 590 + return -LIBBPF_ERRNO__FORMAT; 646 591 } 647 592 648 593 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) { 649 594 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n", 650 595 insn_idx, insns[insn_idx].code); 651 - return -EINVAL; 596 + return -LIBBPF_ERRNO__RELOC; 652 597 } 653 598 654 599 map_idx = sym.st_value / sizeof(struct bpf_map_def); 655 600 if (map_idx >= nr_maps) { 656 601 pr_warning("bpf relocation: map_idx %d large than %d\n", 657 602 (int)map_idx, (int)nr_maps - 1); 658 - return -EINVAL; 603 + return -LIBBPF_ERRNO__RELOC; 659 604 } 660 605 661 606 prog->reloc_desc[i].insn_idx = insn_idx; ··· 738 683 if (insn_idx >= (int)prog->insns_cnt) { 739 684 pr_warning("relocation out of range: '%s'\n", 740 685 prog->section_name); 741 - return -ERANGE; 686 + return -LIBBPF_ERRNO__RELOC; 742 687 } 743 688 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; 744 689 insns[insn_idx].imm = map_fds[map_idx]; ··· 776 721 777 722 if (!obj_elf_valid(obj)) { 778 723 pr_warning("Internal error: elf object is closed\n"); 779 - return -EINVAL; 724 + return -LIBBPF_ERRNO__INTERNAL; 780 725 } 781 726 782 727 for (i = 0; i < obj->efile.nr_reloc; i++) { ··· 789 734 790 735 if (shdr->sh_type != SHT_REL) { 791 736 pr_warning("internal error at %d\n", __LINE__); 792 - return -EINVAL; 737 + return -LIBBPF_ERRNO__INTERNAL; 793 738 } 794 739 795 740 prog = bpf_object__find_prog_by_idx(obj, idx); 796 741 if (!prog) { 797 742 pr_warning("relocation failed: no %d section\n", 798 743 idx); 799 - return -ENOENT; 744 + return -LIBBPF_ERRNO__RELOC; 800 745 } 801 746 802 747 err = bpf_program__collect_reloc(prog, nr_maps, 803 748 shdr, data, 804 749 obj->efile.symbols); 805 750 if (err) 806 - return -EINVAL; 751 + return err; 807 752 } 808 753 return 0; 809 754 } ··· 832 777 goto out; 833 778 } 834 779 835 - ret = -EINVAL; 780 + ret = -LIBBPF_ERRNO__LOAD; 836 781 pr_warning("load bpf program failed: %s\n", strerror(errno)); 837 782 838 - if (log_buf) { 783 + if (log_buf && log_buf[0] != '\0') { 784 + ret = -LIBBPF_ERRNO__VERIFY; 839 785 pr_warning("-- BEGIN DUMP LOG ---\n"); 840 786 pr_warning("\n%s\n", log_buf); 841 787 pr_warning("-- END LOG --\n"); 788 + } else { 789 + if (insns_cnt >= BPF_MAXINSNS) { 790 + pr_warning("Program too large (%d insns), at most %d insns\n", 791 + insns_cnt, BPF_MAXINSNS); 792 + ret = -LIBBPF_ERRNO__PROG2BIG; 793 + } else if (log_buf) { 794 + pr_warning("log buffer is empty\n"); 795 + ret = -LIBBPF_ERRNO__KVER; 796 + } 842 797 } 843 798 844 799 out: ··· 896 831 if (obj->kern_version == 0) { 897 832 pr_warning("%s doesn't provide kernel version\n", 898 833 obj->path); 899 - return -EINVAL; 834 + return -LIBBPF_ERRNO__KVERSION; 900 835 } 901 836 return 0; 902 837 } ··· 905 840 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz) 906 841 { 907 842 struct bpf_object *obj; 843 + int err; 908 844 909 845 if (elf_version(EV_CURRENT) == EV_NONE) { 910 846 pr_warning("failed to init libelf for %s\n", path); 911 - return NULL; 847 + return ERR_PTR(-LIBBPF_ERRNO__LIBELF); 912 848 } 913 849 914 850 obj = bpf_object__new(path, obj_buf, obj_buf_sz); 915 - if (!obj) 916 - return NULL; 851 + if (IS_ERR(obj)) 852 + return obj; 917 853 918 - if (bpf_object__elf_init(obj)) 919 - goto out; 920 - if (bpf_object__check_endianness(obj)) 921 - goto out; 922 - if (bpf_object__elf_collect(obj)) 923 - goto out; 924 - if (bpf_object__collect_reloc(obj)) 925 - goto out; 926 - if (bpf_object__validate(obj)) 927 - goto out; 854 + CHECK_ERR(bpf_object__elf_init(obj), err, out); 855 + CHECK_ERR(bpf_object__check_endianness(obj), err, out); 856 + CHECK_ERR(bpf_object__elf_collect(obj), err, out); 857 + CHECK_ERR(bpf_object__collect_reloc(obj), err, out); 858 + CHECK_ERR(bpf_object__validate(obj), err, out); 928 859 929 860 bpf_object__elf_finish(obj); 930 861 return obj; 931 862 out: 932 863 bpf_object__close(obj); 933 - return NULL; 864 + return ERR_PTR(err); 934 865 } 935 866 936 867 struct bpf_object *bpf_object__open(const char *path) ··· 983 922 984 923 int bpf_object__load(struct bpf_object *obj) 985 924 { 925 + int err; 926 + 986 927 if (!obj) 987 928 return -EINVAL; 988 929 ··· 994 931 } 995 932 996 933 obj->loaded = true; 997 - if (bpf_object__create_maps(obj)) 998 - goto out; 999 - if (bpf_object__relocate(obj)) 1000 - goto out; 1001 - if (bpf_object__load_progs(obj)) 1002 - goto out; 934 + 935 + CHECK_ERR(bpf_object__create_maps(obj), err, out); 936 + CHECK_ERR(bpf_object__relocate(obj), err, out); 937 + CHECK_ERR(bpf_object__load_progs(obj), err, out); 1003 938 1004 939 return 0; 1005 940 out: 1006 941 bpf_object__unload(obj); 1007 942 pr_warning("failed to load object '%s'\n", obj->path); 1008 - return -EINVAL; 943 + return err; 1009 944 } 1010 945 1011 946 void bpf_object__close(struct bpf_object *obj) ··· 1051 990 bpf_object__get_name(struct bpf_object *obj) 1052 991 { 1053 992 if (!obj) 1054 - return NULL; 993 + return ERR_PTR(-EINVAL); 1055 994 return obj->path; 995 + } 996 + 997 + unsigned int 998 + bpf_object__get_kversion(struct bpf_object *obj) 999 + { 1000 + if (!obj) 1001 + return 0; 1002 + return obj->kern_version; 1056 1003 } 1057 1004 1058 1005 struct bpf_program * ··· 1103 1034 return 0; 1104 1035 } 1105 1036 1106 - const char *bpf_program__title(struct bpf_program *prog, bool dup) 1037 + const char *bpf_program__title(struct bpf_program *prog, bool needs_copy) 1107 1038 { 1108 1039 const char *title; 1109 1040 1110 1041 title = prog->section_name; 1111 - if (dup) { 1042 + if (needs_copy) { 1112 1043 title = strdup(title); 1113 1044 if (!title) { 1114 1045 pr_warning("failed to strdup program title\n"); 1115 - return NULL; 1046 + return ERR_PTR(-ENOMEM); 1116 1047 } 1117 1048 } 1118 1049
+22 -1
tools/lib/bpf/libbpf.h
··· 10 10 11 11 #include <stdio.h> 12 12 #include <stdbool.h> 13 + #include <linux/err.h> 14 + 15 + enum libbpf_errno { 16 + __LIBBPF_ERRNO__START = 4000, 17 + 18 + /* Something wrong in libelf */ 19 + LIBBPF_ERRNO__LIBELF = __LIBBPF_ERRNO__START, 20 + LIBBPF_ERRNO__FORMAT, /* BPF object format invalid */ 21 + LIBBPF_ERRNO__KVERSION, /* Incorrect or no 'version' section */ 22 + LIBBPF_ERRNO__ENDIAN, /* Endian missmatch */ 23 + LIBBPF_ERRNO__INTERNAL, /* Internal error in libbpf */ 24 + LIBBPF_ERRNO__RELOC, /* Relocation failed */ 25 + LIBBPF_ERRNO__LOAD, /* Load program failure for unknown reason */ 26 + LIBBPF_ERRNO__VERIFY, /* Kernel verifier blocks program loading */ 27 + LIBBPF_ERRNO__PROG2BIG, /* Program too big */ 28 + LIBBPF_ERRNO__KVER, /* Incorrect kernel version */ 29 + __LIBBPF_ERRNO__END, 30 + }; 31 + 32 + int libbpf_strerror(int err, char *buf, size_t size); 13 33 14 34 /* 15 35 * In include/linux/compiler-gcc.h, __printf is defined. However ··· 56 36 int bpf_object__load(struct bpf_object *obj); 57 37 int bpf_object__unload(struct bpf_object *obj); 58 38 const char *bpf_object__get_name(struct bpf_object *obj); 39 + unsigned int bpf_object__get_kversion(struct bpf_object *obj); 59 40 60 41 struct bpf_object *bpf_object__next(struct bpf_object *prev); 61 42 #define bpf_object__for_each_safe(pos, tmp) \ ··· 84 63 int bpf_program__get_private(struct bpf_program *prog, 85 64 void **ppriv); 86 65 87 - const char *bpf_program__title(struct bpf_program *prog, bool dup); 66 + const char *bpf_program__title(struct bpf_program *prog, bool needs_copy); 88 67 89 68 int bpf_program__fd(struct bpf_program *prog); 90 69
-1
tools/perf/Documentation/perf-trace.txt
··· 62 62 --verbose=:: 63 63 Verbosity level. 64 64 65 - -i:: 66 65 --no-inherit:: 67 66 Child tasks do not inherit counters. 68 67
+1 -1
tools/perf/Makefile
··· 78 78 # The build-test target is not really parallel, don't print the jobs info: 79 79 # 80 80 build-test: 81 - @$(MAKE) -f tests/make --no-print-directory 81 + @$(MAKE) SHUF=1 -f tests/make --no-print-directory 82 82 83 83 # 84 84 # All other targets get passed through:
+3 -2
tools/perf/builtin-sched.c
··· 1203 1203 1204 1204 static int pid_cmp(struct work_atoms *l, struct work_atoms *r) 1205 1205 { 1206 + if (l->thread == r->thread) 1207 + return 0; 1206 1208 if (l->thread->tid < r->thread->tid) 1207 1209 return -1; 1208 1210 if (l->thread->tid > r->thread->tid) 1209 1211 return 1; 1210 - 1211 - return 0; 1212 + return (int)(l->thread - r->thread); 1212 1213 } 1213 1214 1214 1215 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
+102 -120
tools/perf/builtin-stat.c
··· 122 122 static struct timespec ref_time; 123 123 static struct cpu_map *aggr_map; 124 124 static aggr_get_id_t aggr_get_id; 125 + static bool append_file; 126 + static const char *output_name; 127 + static int output_fd; 125 128 126 129 static volatile int done = 0; 127 130 ··· 516 513 517 514 if (evsel->cgrp) 518 515 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); 519 - 520 - if (csv_output || stat_config.interval) 521 - return; 522 - 523 - if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) 524 - fprintf(output, " # %8.3f CPUs utilized ", 525 - avg / avg_stats(&walltime_nsecs_stats)); 526 - else 527 - fprintf(output, " "); 528 516 } 529 517 530 518 static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg) ··· 523 529 FILE *output = stat_config.output; 524 530 double sc = evsel->scale; 525 531 const char *fmt; 526 - int cpu = cpu_map__id_to_cpu(id); 527 532 528 533 if (csv_output) { 529 534 fmt = sc != 1.0 ? "%.2f%s" : "%.0f%s"; ··· 535 542 536 543 aggr_printout(evsel, id, nr); 537 544 538 - if (stat_config.aggr_mode == AGGR_GLOBAL) 539 - cpu = 0; 540 - 541 545 fprintf(output, fmt, avg, csv_sep); 542 546 543 547 if (evsel->unit) ··· 546 556 547 557 if (evsel->cgrp) 548 558 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); 559 + } 549 560 550 - if (csv_output || stat_config.interval) 551 - return; 561 + static void printout(int id, int nr, struct perf_evsel *counter, double uval) 562 + { 563 + int cpu = cpu_map__id_to_cpu(id); 552 564 553 - perf_stat__print_shadow_stats(output, evsel, avg, cpu, 554 - stat_config.aggr_mode); 565 + if (stat_config.aggr_mode == AGGR_GLOBAL) 566 + cpu = 0; 567 + 568 + if (nsec_counter(counter)) 569 + nsec_printout(id, nr, counter, uval); 570 + else 571 + abs_printout(id, nr, counter, uval); 572 + 573 + if (!csv_output && !stat_config.interval) 574 + perf_stat__print_shadow_stats(stat_config.output, counter, 575 + uval, cpu, 576 + stat_config.aggr_mode); 555 577 } 556 578 557 579 static void print_aggr(char *prefix) ··· 619 617 continue; 620 618 } 621 619 uval = val * counter->scale; 622 - 623 - if (nsec_counter(counter)) 624 - nsec_printout(id, nr, counter, uval); 625 - else 626 - abs_printout(id, nr, counter, uval); 627 - 620 + printout(id, nr, counter, uval); 628 621 if (!csv_output) 629 622 print_noise(counter, 1.0); 630 623 ··· 650 653 fprintf(output, "%s", prefix); 651 654 652 655 uval = val * counter->scale; 653 - 654 - if (nsec_counter(counter)) 655 - nsec_printout(thread, 0, counter, uval); 656 - else 657 - abs_printout(thread, 0, counter, uval); 656 + printout(thread, 0, counter, uval); 658 657 659 658 if (!csv_output) 660 659 print_noise(counter, 1.0); ··· 700 707 } 701 708 702 709 uval = avg * counter->scale; 703 - 704 - if (nsec_counter(counter)) 705 - nsec_printout(-1, 0, counter, uval); 706 - else 707 - abs_printout(-1, 0, counter, uval); 710 + printout(-1, 0, counter, uval); 708 711 709 712 print_noise(counter, avg); 710 713 ··· 753 764 } 754 765 755 766 uval = val * counter->scale; 756 - 757 - if (nsec_counter(counter)) 758 - nsec_printout(cpu, 0, counter, uval); 759 - else 760 - abs_printout(cpu, 0, counter, uval); 761 - 767 + printout(cpu, 0, counter, uval); 762 768 if (!csv_output) 763 769 print_noise(counter, 1.0); 764 770 print_running(run, ena); ··· 929 945 big_num_opt = unset ? 0 : 1; 930 946 return 0; 931 947 } 948 + 949 + static const struct option stat_options[] = { 950 + OPT_BOOLEAN('T', "transaction", &transaction_run, 951 + "hardware transaction statistics"), 952 + OPT_CALLBACK('e', "event", &evsel_list, "event", 953 + "event selector. use 'perf list' to list available events", 954 + parse_events_option), 955 + OPT_CALLBACK(0, "filter", &evsel_list, "filter", 956 + "event filter", parse_filter), 957 + OPT_BOOLEAN('i', "no-inherit", &no_inherit, 958 + "child tasks do not inherit counters"), 959 + OPT_STRING('p', "pid", &target.pid, "pid", 960 + "stat events on existing process id"), 961 + OPT_STRING('t', "tid", &target.tid, "tid", 962 + "stat events on existing thread id"), 963 + OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 964 + "system-wide collection from all CPUs"), 965 + OPT_BOOLEAN('g', "group", &group, 966 + "put the counters into a counter group"), 967 + OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"), 968 + OPT_INCR('v', "verbose", &verbose, 969 + "be more verbose (show counter open errors, etc)"), 970 + OPT_INTEGER('r', "repeat", &run_count, 971 + "repeat command and print average + stddev (max: 100, forever: 0)"), 972 + OPT_BOOLEAN('n', "null", &null_run, 973 + "null run - dont start any counters"), 974 + OPT_INCR('d', "detailed", &detailed_run, 975 + "detailed run - start a lot of events"), 976 + OPT_BOOLEAN('S', "sync", &sync_run, 977 + "call sync() before starting a run"), 978 + OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 979 + "print large numbers with thousands\' separators", 980 + stat__set_big_num), 981 + OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 982 + "list of cpus to monitor in system-wide"), 983 + OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, 984 + "disable CPU count aggregation", AGGR_NONE), 985 + OPT_STRING('x', "field-separator", &csv_sep, "separator", 986 + "print counts with custom separator"), 987 + OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 988 + "monitor event in cgroup name only", parse_cgroups), 989 + OPT_STRING('o', "output", &output_name, "file", "output file name"), 990 + OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), 991 + OPT_INTEGER(0, "log-fd", &output_fd, 992 + "log output to fd, instead of stderr"), 993 + OPT_STRING(0, "pre", &pre_cmd, "command", 994 + "command to run prior to the measured command"), 995 + OPT_STRING(0, "post", &post_cmd, "command", 996 + "command to run after to the measured command"), 997 + OPT_UINTEGER('I', "interval-print", &stat_config.interval, 998 + "print counts at regular interval in ms (>= 10)"), 999 + OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode, 1000 + "aggregate counts per processor socket", AGGR_SOCKET), 1001 + OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode, 1002 + "aggregate counts per physical processor core", AGGR_CORE), 1003 + OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode, 1004 + "aggregate counts per thread", AGGR_THREAD), 1005 + OPT_UINTEGER('D', "delay", &initial_delay, 1006 + "ms to wait before starting measurement after program start"), 1007 + OPT_END() 1008 + }; 932 1009 933 1010 static int perf_stat__get_socket(struct cpu_map *map, int cpu) 934 1011 { ··· 1238 1193 1239 1194 int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) 1240 1195 { 1241 - bool append_file = false; 1242 - int output_fd = 0; 1243 - const char *output_name = NULL; 1244 - const struct option options[] = { 1245 - OPT_BOOLEAN('T', "transaction", &transaction_run, 1246 - "hardware transaction statistics"), 1247 - OPT_CALLBACK('e', "event", &evsel_list, "event", 1248 - "event selector. use 'perf list' to list available events", 1249 - parse_events_option), 1250 - OPT_CALLBACK(0, "filter", &evsel_list, "filter", 1251 - "event filter", parse_filter), 1252 - OPT_BOOLEAN('i', "no-inherit", &no_inherit, 1253 - "child tasks do not inherit counters"), 1254 - OPT_STRING('p', "pid", &target.pid, "pid", 1255 - "stat events on existing process id"), 1256 - OPT_STRING('t', "tid", &target.tid, "tid", 1257 - "stat events on existing thread id"), 1258 - OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 1259 - "system-wide collection from all CPUs"), 1260 - OPT_BOOLEAN('g', "group", &group, 1261 - "put the counters into a counter group"), 1262 - OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"), 1263 - OPT_INCR('v', "verbose", &verbose, 1264 - "be more verbose (show counter open errors, etc)"), 1265 - OPT_INTEGER('r', "repeat", &run_count, 1266 - "repeat command and print average + stddev (max: 100, forever: 0)"), 1267 - OPT_BOOLEAN('n', "null", &null_run, 1268 - "null run - dont start any counters"), 1269 - OPT_INCR('d', "detailed", &detailed_run, 1270 - "detailed run - start a lot of events"), 1271 - OPT_BOOLEAN('S', "sync", &sync_run, 1272 - "call sync() before starting a run"), 1273 - OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1274 - "print large numbers with thousands\' separators", 1275 - stat__set_big_num), 1276 - OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 1277 - "list of cpus to monitor in system-wide"), 1278 - OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, 1279 - "disable CPU count aggregation", AGGR_NONE), 1280 - OPT_STRING('x', "field-separator", &csv_sep, "separator", 1281 - "print counts with custom separator"), 1282 - OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 1283 - "monitor event in cgroup name only", parse_cgroups), 1284 - OPT_STRING('o', "output", &output_name, "file", "output file name"), 1285 - OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), 1286 - OPT_INTEGER(0, "log-fd", &output_fd, 1287 - "log output to fd, instead of stderr"), 1288 - OPT_STRING(0, "pre", &pre_cmd, "command", 1289 - "command to run prior to the measured command"), 1290 - OPT_STRING(0, "post", &post_cmd, "command", 1291 - "command to run after to the measured command"), 1292 - OPT_UINTEGER('I', "interval-print", &stat_config.interval, 1293 - "print counts at regular interval in ms (>= 10)"), 1294 - OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode, 1295 - "aggregate counts per processor socket", AGGR_SOCKET), 1296 - OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode, 1297 - "aggregate counts per physical processor core", AGGR_CORE), 1298 - OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode, 1299 - "aggregate counts per thread", AGGR_THREAD), 1300 - OPT_UINTEGER('D', "delay", &initial_delay, 1301 - "ms to wait before starting measurement after program start"), 1302 - OPT_END() 1303 - }; 1304 1196 const char * const stat_usage[] = { 1305 1197 "perf stat [<options>] [<command>]", 1306 1198 NULL ··· 1253 1271 if (evsel_list == NULL) 1254 1272 return -ENOMEM; 1255 1273 1256 - argc = parse_options(argc, argv, options, stat_usage, 1274 + argc = parse_options(argc, argv, stat_options, stat_usage, 1257 1275 PARSE_OPT_STOP_AT_NON_OPTION); 1258 1276 1259 1277 interval = stat_config.interval; ··· 1263 1281 1264 1282 if (output_name && output_fd) { 1265 1283 fprintf(stderr, "cannot use both --output and --log-fd\n"); 1266 - parse_options_usage(stat_usage, options, "o", 1); 1267 - parse_options_usage(NULL, options, "log-fd", 0); 1284 + parse_options_usage(stat_usage, stat_options, "o", 1); 1285 + parse_options_usage(NULL, stat_options, "log-fd", 0); 1268 1286 goto out; 1269 1287 } 1270 1288 1271 1289 if (output_fd < 0) { 1272 1290 fprintf(stderr, "argument to --log-fd must be a > 0\n"); 1273 - parse_options_usage(stat_usage, options, "log-fd", 0); 1291 + parse_options_usage(stat_usage, stat_options, "log-fd", 0); 1274 1292 goto out; 1275 1293 } 1276 1294 ··· 1310 1328 /* User explicitly passed -B? */ 1311 1329 if (big_num_opt == 1) { 1312 1330 fprintf(stderr, "-B option not supported with -x\n"); 1313 - parse_options_usage(stat_usage, options, "B", 1); 1314 - parse_options_usage(NULL, options, "x", 1); 1331 + parse_options_usage(stat_usage, stat_options, "B", 1); 1332 + parse_options_usage(NULL, stat_options, "x", 1); 1315 1333 goto out; 1316 1334 } else /* Nope, so disable big number formatting */ 1317 1335 big_num = false; ··· 1319 1337 big_num = false; 1320 1338 1321 1339 if (!argc && target__none(&target)) 1322 - usage_with_options(stat_usage, options); 1340 + usage_with_options(stat_usage, stat_options); 1323 1341 1324 1342 if (run_count < 0) { 1325 1343 pr_err("Run count must be a positive number\n"); 1326 - parse_options_usage(stat_usage, options, "r", 1); 1344 + parse_options_usage(stat_usage, stat_options, "r", 1); 1327 1345 goto out; 1328 1346 } else if (run_count == 0) { 1329 1347 forever = true; ··· 1333 1351 if ((stat_config.aggr_mode == AGGR_THREAD) && !target__has_task(&target)) { 1334 1352 fprintf(stderr, "The --per-thread option is only available " 1335 1353 "when monitoring via -p -t options.\n"); 1336 - parse_options_usage(NULL, options, "p", 1); 1337 - parse_options_usage(NULL, options, "t", 1); 1354 + parse_options_usage(NULL, stat_options, "p", 1); 1355 + parse_options_usage(NULL, stat_options, "t", 1); 1338 1356 goto out; 1339 1357 } 1340 1358 ··· 1348 1366 fprintf(stderr, "both cgroup and no-aggregation " 1349 1367 "modes only available in system-wide mode\n"); 1350 1368 1351 - parse_options_usage(stat_usage, options, "G", 1); 1352 - parse_options_usage(NULL, options, "A", 1); 1353 - parse_options_usage(NULL, options, "a", 1); 1369 + parse_options_usage(stat_usage, stat_options, "G", 1); 1370 + parse_options_usage(NULL, stat_options, "A", 1); 1371 + parse_options_usage(NULL, stat_options, "a", 1); 1354 1372 goto out; 1355 1373 } 1356 1374 ··· 1362 1380 if (perf_evlist__create_maps(evsel_list, &target) < 0) { 1363 1381 if (target__has_task(&target)) { 1364 1382 pr_err("Problems finding threads of monitor\n"); 1365 - parse_options_usage(stat_usage, options, "p", 1); 1366 - parse_options_usage(NULL, options, "t", 1); 1383 + parse_options_usage(stat_usage, stat_options, "p", 1); 1384 + parse_options_usage(NULL, stat_options, "t", 1); 1367 1385 } else if (target__has_cpu(&target)) { 1368 1386 perror("failed to parse CPUs map"); 1369 - parse_options_usage(stat_usage, options, "C", 1); 1370 - parse_options_usage(NULL, options, "a", 1); 1387 + parse_options_usage(stat_usage, stat_options, "C", 1); 1388 + parse_options_usage(NULL, stat_options, "a", 1); 1371 1389 } 1372 1390 goto out; 1373 1391 } ··· 1382 1400 if (interval && interval < 100) { 1383 1401 if (interval < 10) { 1384 1402 pr_err("print interval must be >= 10ms\n"); 1385 - parse_options_usage(stat_usage, options, "I", 1); 1403 + parse_options_usage(stat_usage, stat_options, "I", 1); 1386 1404 goto out; 1387 1405 } else 1388 1406 pr_warning("print interval < 100ms. "
+2
tools/perf/tests/.gitignore
··· 1 + llvm-src-base.c 2 + llvm-src-kbuild.c
+16 -1
tools/perf/tests/Build
··· 31 31 perf-y += parse-no-sample-id-all.o 32 32 perf-y += kmod-path.o 33 33 perf-y += thread-map.o 34 - perf-y += llvm.o 34 + perf-y += llvm.o llvm-src-base.o llvm-src-kbuild.o 35 + perf-y += bpf.o 35 36 perf-y += topology.o 37 + 38 + $(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c 39 + $(call rule_mkdir) 40 + $(Q)echo '#include <tests/llvm.h>' > $@ 41 + $(Q)echo 'const char test_llvm__bpf_base_prog[] =' >> $@ 42 + $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@ 43 + $(Q)echo ';' >> $@ 44 + 45 + $(OUTPUT)tests/llvm-src-kbuild.c: tests/bpf-script-test-kbuild.c 46 + $(call rule_mkdir) 47 + $(Q)echo '#include <tests/llvm.h>' > $@ 48 + $(Q)echo 'const char test_llvm__bpf_test_kbuild_prog[] =' >> $@ 49 + $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@ 50 + $(Q)echo ';' >> $@ 36 51 37 52 ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64)) 38 53 perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
+1 -2
tools/perf/tests/attr.c
··· 171 171 !lstat(path_perf, &st)) 172 172 return run_dir(path_dir, path_perf); 173 173 174 - fprintf(stderr, " (omitted)"); 175 - return 0; 174 + return TEST_SKIP; 176 175 }
+4
tools/perf/tests/bpf-script-example.c
··· 1 + /* 2 + * bpf-script-example.c 3 + * Test basic LLVM building 4 + */ 1 5 #ifndef LINUX_VERSION_CODE 2 6 # error Need LINUX_VERSION_CODE 3 7 # error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'
+21
tools/perf/tests/bpf-script-test-kbuild.c
··· 1 + /* 2 + * bpf-script-test-kbuild.c 3 + * Test include from kernel header 4 + */ 5 + #ifndef LINUX_VERSION_CODE 6 + # error Need LINUX_VERSION_CODE 7 + # error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig' 8 + #endif 9 + #define SEC(NAME) __attribute__((section(NAME), used)) 10 + 11 + #include <uapi/linux/fs.h> 12 + #include <uapi/asm/ptrace.h> 13 + 14 + SEC("func=vfs_llseek") 15 + int bpf_func__vfs_llseek(void *ctx) 16 + { 17 + return 0; 18 + } 19 + 20 + char _license[] SEC("license") = "GPL"; 21 + int _version SEC("version") = LINUX_VERSION_CODE;
+209
tools/perf/tests/bpf.c
··· 1 + #include <stdio.h> 2 + #include <sys/epoll.h> 3 + #include <util/bpf-loader.h> 4 + #include <util/evlist.h> 5 + #include "tests.h" 6 + #include "llvm.h" 7 + #include "debug.h" 8 + #define NR_ITERS 111 9 + 10 + #ifdef HAVE_LIBBPF_SUPPORT 11 + 12 + static int epoll_pwait_loop(void) 13 + { 14 + int i; 15 + 16 + /* Should fail NR_ITERS times */ 17 + for (i = 0; i < NR_ITERS; i++) 18 + epoll_pwait(-(i + 1), NULL, 0, 0, NULL); 19 + return 0; 20 + } 21 + 22 + static struct { 23 + enum test_llvm__testcase prog_id; 24 + const char *desc; 25 + const char *name; 26 + const char *msg_compile_fail; 27 + const char *msg_load_fail; 28 + int (*target_func)(void); 29 + int expect_result; 30 + } bpf_testcase_table[] = { 31 + { 32 + LLVM_TESTCASE_BASE, 33 + "Test basic BPF filtering", 34 + "[basic_bpf_test]", 35 + "fix 'perf test LLVM' first", 36 + "load bpf object failed", 37 + &epoll_pwait_loop, 38 + (NR_ITERS + 1) / 2, 39 + }, 40 + }; 41 + 42 + static int do_test(struct bpf_object *obj, int (*func)(void), 43 + int expect) 44 + { 45 + struct record_opts opts = { 46 + .target = { 47 + .uid = UINT_MAX, 48 + .uses_mmap = true, 49 + }, 50 + .freq = 0, 51 + .mmap_pages = 256, 52 + .default_interval = 1, 53 + }; 54 + 55 + char pid[16]; 56 + char sbuf[STRERR_BUFSIZE]; 57 + struct perf_evlist *evlist; 58 + int i, ret = TEST_FAIL, err = 0, count = 0; 59 + 60 + struct parse_events_evlist parse_evlist; 61 + struct parse_events_error parse_error; 62 + 63 + bzero(&parse_error, sizeof(parse_error)); 64 + bzero(&parse_evlist, sizeof(parse_evlist)); 65 + parse_evlist.error = &parse_error; 66 + INIT_LIST_HEAD(&parse_evlist.list); 67 + 68 + err = parse_events_load_bpf_obj(&parse_evlist, &parse_evlist.list, obj); 69 + if (err || list_empty(&parse_evlist.list)) { 70 + pr_debug("Failed to add events selected by BPF\n"); 71 + if (!err) 72 + return TEST_FAIL; 73 + } 74 + 75 + snprintf(pid, sizeof(pid), "%d", getpid()); 76 + pid[sizeof(pid) - 1] = '\0'; 77 + opts.target.tid = opts.target.pid = pid; 78 + 79 + /* Instead of perf_evlist__new_default, don't add default events */ 80 + evlist = perf_evlist__new(); 81 + if (!evlist) { 82 + pr_debug("No ehough memory to create evlist\n"); 83 + return TEST_FAIL; 84 + } 85 + 86 + err = perf_evlist__create_maps(evlist, &opts.target); 87 + if (err < 0) { 88 + pr_debug("Not enough memory to create thread/cpu maps\n"); 89 + goto out_delete_evlist; 90 + } 91 + 92 + perf_evlist__splice_list_tail(evlist, &parse_evlist.list); 93 + evlist->nr_groups = parse_evlist.nr_groups; 94 + 95 + perf_evlist__config(evlist, &opts); 96 + 97 + err = perf_evlist__open(evlist); 98 + if (err < 0) { 99 + pr_debug("perf_evlist__open: %s\n", 100 + strerror_r(errno, sbuf, sizeof(sbuf))); 101 + goto out_delete_evlist; 102 + } 103 + 104 + err = perf_evlist__mmap(evlist, opts.mmap_pages, false); 105 + if (err < 0) { 106 + pr_debug("perf_evlist__mmap: %s\n", 107 + strerror_r(errno, sbuf, sizeof(sbuf))); 108 + goto out_delete_evlist; 109 + } 110 + 111 + perf_evlist__enable(evlist); 112 + (*func)(); 113 + perf_evlist__disable(evlist); 114 + 115 + for (i = 0; i < evlist->nr_mmaps; i++) { 116 + union perf_event *event; 117 + 118 + while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { 119 + const u32 type = event->header.type; 120 + 121 + if (type == PERF_RECORD_SAMPLE) 122 + count ++; 123 + } 124 + } 125 + 126 + if (count != expect) 127 + pr_debug("BPF filter result incorrect\n"); 128 + 129 + ret = TEST_OK; 130 + 131 + out_delete_evlist: 132 + perf_evlist__delete(evlist); 133 + return ret; 134 + } 135 + 136 + static struct bpf_object * 137 + prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name) 138 + { 139 + struct bpf_object *obj; 140 + 141 + obj = bpf__prepare_load_buffer(obj_buf, obj_buf_sz, name); 142 + if (IS_ERR(obj)) { 143 + pr_debug("Compile BPF program failed.\n"); 144 + return NULL; 145 + } 146 + return obj; 147 + } 148 + 149 + static int __test__bpf(int index) 150 + { 151 + int ret; 152 + void *obj_buf; 153 + size_t obj_buf_sz; 154 + struct bpf_object *obj; 155 + 156 + ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz, 157 + bpf_testcase_table[index].prog_id, 158 + true); 159 + if (ret != TEST_OK || !obj_buf || !obj_buf_sz) { 160 + pr_debug("Unable to get BPF object, %s\n", 161 + bpf_testcase_table[index].msg_compile_fail); 162 + if (index == 0) 163 + return TEST_SKIP; 164 + else 165 + return TEST_FAIL; 166 + } 167 + 168 + obj = prepare_bpf(obj_buf, obj_buf_sz, 169 + bpf_testcase_table[index].name); 170 + if (!obj) { 171 + ret = TEST_FAIL; 172 + goto out; 173 + } 174 + 175 + ret = do_test(obj, 176 + bpf_testcase_table[index].target_func, 177 + bpf_testcase_table[index].expect_result); 178 + out: 179 + bpf__clear(); 180 + return ret; 181 + } 182 + 183 + int test__bpf(void) 184 + { 185 + unsigned int i; 186 + int err; 187 + 188 + if (geteuid() != 0) { 189 + pr_debug("Only root can run BPF test\n"); 190 + return TEST_SKIP; 191 + } 192 + 193 + for (i = 0; i < ARRAY_SIZE(bpf_testcase_table); i++) { 194 + err = __test__bpf(i); 195 + 196 + if (err != TEST_OK) 197 + return err; 198 + } 199 + 200 + return TEST_OK; 201 + } 202 + 203 + #else 204 + int test__bpf(void) 205 + { 206 + pr_debug("Skip BPF test because BPF support is not compiled\n"); 207 + return TEST_SKIP; 208 + } 209 + #endif
+5 -1
tools/perf/tests/builtin-test.c
··· 166 166 .func = test_session_topology, 167 167 }, 168 168 { 169 + .desc = "Test BPF filter", 170 + .func = test__bpf, 171 + }, 172 + { 169 173 .func = NULL, 170 174 }, 171 175 }; ··· 196 192 continue; 197 193 } 198 194 199 - if (strstr(test->desc, argv[i])) 195 + if (strcasestr(test->desc, argv[i])) 200 196 return true; 201 197 } 202 198
+4 -4
tools/perf/tests/code-reading.c
··· 613 613 case TEST_CODE_READING_OK: 614 614 return 0; 615 615 case TEST_CODE_READING_NO_VMLINUX: 616 - fprintf(stderr, " (no vmlinux)"); 616 + pr_debug("no vmlinux\n"); 617 617 return 0; 618 618 case TEST_CODE_READING_NO_KCORE: 619 - fprintf(stderr, " (no kcore)"); 619 + pr_debug("no kcore\n"); 620 620 return 0; 621 621 case TEST_CODE_READING_NO_ACCESS: 622 - fprintf(stderr, " (no access)"); 622 + pr_debug("no access\n"); 623 623 return 0; 624 624 case TEST_CODE_READING_NO_KERNEL_OBJ: 625 - fprintf(stderr, " (no kernel obj)"); 625 + pr_debug("no kernel obj\n"); 626 626 return 0; 627 627 default: 628 628 return -1;
+2 -2
tools/perf/tests/keep-tracking.c
··· 90 90 evsel->attr.enable_on_exec = 0; 91 91 92 92 if (perf_evlist__open(evlist) < 0) { 93 - fprintf(stderr, " (not supported)"); 94 - err = 0; 93 + pr_debug("Unable to open dummy and cycles event\n"); 94 + err = TEST_SKIP; 95 95 goto out_err; 96 96 } 97 97
+109 -37
tools/perf/tests/llvm.c
··· 2 2 #include <bpf/libbpf.h> 3 3 #include <util/llvm-utils.h> 4 4 #include <util/cache.h> 5 + #include "llvm.h" 5 6 #include "tests.h" 6 7 #include "debug.h" 7 8 ··· 12 11 return perf_default_config(var, val, arg); 13 12 } 14 13 15 - /* 16 - * Randomly give it a "version" section since we don't really load it 17 - * into kernel 18 - */ 19 - static const char test_bpf_prog[] = 20 - "__attribute__((section(\"do_fork\"), used)) " 21 - "int fork(void *ctx) {return 0;} " 22 - "char _license[] __attribute__((section(\"license\"), used)) = \"GPL\";" 23 - "int _version __attribute__((section(\"version\"), used)) = 0x40100;"; 24 - 25 14 #ifdef HAVE_LIBBPF_SUPPORT 26 15 static int test__bpf_parsing(void *obj_buf, size_t obj_buf_sz) 27 16 { 28 17 struct bpf_object *obj; 29 18 30 19 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, NULL); 31 - if (!obj) 32 - return -1; 20 + if (IS_ERR(obj)) 21 + return TEST_FAIL; 33 22 bpf_object__close(obj); 34 - return 0; 23 + return TEST_OK; 35 24 } 36 25 #else 37 26 static int test__bpf_parsing(void *obj_buf __maybe_unused, 38 27 size_t obj_buf_sz __maybe_unused) 39 28 { 40 - fprintf(stderr, " (skip bpf parsing)"); 41 - return 0; 29 + pr_debug("Skip bpf parsing\n"); 30 + return TEST_OK; 42 31 } 43 32 #endif 44 33 45 - int test__llvm(void) 34 + static struct { 35 + const char *source; 36 + const char *desc; 37 + } bpf_source_table[__LLVM_TESTCASE_MAX] = { 38 + [LLVM_TESTCASE_BASE] = { 39 + .source = test_llvm__bpf_base_prog, 40 + .desc = "Basic BPF llvm compiling test", 41 + }, 42 + [LLVM_TESTCASE_KBUILD] = { 43 + .source = test_llvm__bpf_test_kbuild_prog, 44 + .desc = "Test kbuild searching", 45 + }, 46 + }; 47 + 48 + 49 + int 50 + test_llvm__fetch_bpf_obj(void **p_obj_buf, 51 + size_t *p_obj_buf_sz, 52 + enum test_llvm__testcase index, 53 + bool force) 46 54 { 47 - char *tmpl_new, *clang_opt_new; 48 - void *obj_buf; 49 - size_t obj_buf_sz; 50 - int err, old_verbose; 55 + const char *source; 56 + const char *desc; 57 + const char *tmpl_old, *clang_opt_old; 58 + char *tmpl_new = NULL, *clang_opt_new = NULL; 59 + int err, old_verbose, ret = TEST_FAIL; 60 + 61 + if (index >= __LLVM_TESTCASE_MAX) 62 + return TEST_FAIL; 63 + 64 + source = bpf_source_table[index].source; 65 + desc = bpf_source_table[index].desc; 51 66 52 67 perf_config(perf_config_cb, NULL); 53 68 ··· 71 54 * Skip this test if user's .perfconfig doesn't set [llvm] section 72 55 * and clang is not found in $PATH, and this is not perf test -v 73 56 */ 74 - if (verbose == 0 && !llvm_param.user_set_param && llvm__search_clang()) { 75 - fprintf(stderr, " (no clang, try 'perf test -v LLVM')"); 57 + if (!force && (verbose == 0 && 58 + !llvm_param.user_set_param && 59 + llvm__search_clang())) { 60 + pr_debug("No clang and no verbosive, skip this test\n"); 76 61 return TEST_SKIP; 77 62 } 78 63 79 - old_verbose = verbose; 80 64 /* 81 65 * llvm is verbosity when error. Suppress all error output if 82 66 * not 'perf test -v'. 83 67 */ 68 + old_verbose = verbose; 84 69 if (verbose == 0) 85 70 verbose = -1; 86 71 72 + *p_obj_buf = NULL; 73 + *p_obj_buf_sz = 0; 74 + 87 75 if (!llvm_param.clang_bpf_cmd_template) 88 - return -1; 76 + goto out; 89 77 90 78 if (!llvm_param.clang_opt) 91 79 llvm_param.clang_opt = strdup(""); 92 80 93 - err = asprintf(&tmpl_new, "echo '%s' | %s", test_bpf_prog, 94 - llvm_param.clang_bpf_cmd_template); 81 + err = asprintf(&tmpl_new, "echo '%s' | %s%s", source, 82 + llvm_param.clang_bpf_cmd_template, 83 + old_verbose ? "" : " 2>/dev/null"); 95 84 if (err < 0) 96 - return -1; 85 + goto out; 97 86 err = asprintf(&clang_opt_new, "-xc %s", llvm_param.clang_opt); 98 87 if (err < 0) 99 - return -1; 88 + goto out; 100 89 90 + tmpl_old = llvm_param.clang_bpf_cmd_template; 101 91 llvm_param.clang_bpf_cmd_template = tmpl_new; 92 + clang_opt_old = llvm_param.clang_opt; 102 93 llvm_param.clang_opt = clang_opt_new; 103 - err = llvm__compile_bpf("-", &obj_buf, &obj_buf_sz); 94 + 95 + err = llvm__compile_bpf("-", p_obj_buf, p_obj_buf_sz); 96 + 97 + llvm_param.clang_bpf_cmd_template = tmpl_old; 98 + llvm_param.clang_opt = clang_opt_old; 104 99 105 100 verbose = old_verbose; 106 - if (err) { 107 - if (!verbose) 108 - fprintf(stderr, " (use -v to see error message)"); 109 - return -1; 110 - } 101 + if (err) 102 + goto out; 111 103 112 - err = test__bpf_parsing(obj_buf, obj_buf_sz); 113 - free(obj_buf); 114 - return err; 104 + ret = TEST_OK; 105 + out: 106 + free(tmpl_new); 107 + free(clang_opt_new); 108 + if (ret != TEST_OK) 109 + pr_debug("Failed to compile test case: '%s'\n", desc); 110 + return ret; 111 + } 112 + 113 + int test__llvm(void) 114 + { 115 + enum test_llvm__testcase i; 116 + 117 + for (i = 0; i < __LLVM_TESTCASE_MAX; i++) { 118 + int ret; 119 + void *obj_buf = NULL; 120 + size_t obj_buf_sz = 0; 121 + 122 + ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz, 123 + i, false); 124 + 125 + if (ret == TEST_OK) { 126 + ret = test__bpf_parsing(obj_buf, obj_buf_sz); 127 + if (ret != TEST_OK) 128 + pr_debug("Failed to parse test case '%s'\n", 129 + bpf_source_table[i].desc); 130 + } 131 + free(obj_buf); 132 + 133 + switch (ret) { 134 + case TEST_SKIP: 135 + return TEST_SKIP; 136 + case TEST_OK: 137 + break; 138 + default: 139 + /* 140 + * Test 0 is the basic LLVM test. If test 0 141 + * fail, the basic LLVM support not functional 142 + * so the whole test should fail. If other test 143 + * case fail, it can be fixed by adjusting 144 + * config so don't report error. 145 + */ 146 + if (i == 0) 147 + return TEST_FAIL; 148 + else 149 + return TEST_SKIP; 150 + } 151 + } 152 + return TEST_OK; 115 153 }
+18
tools/perf/tests/llvm.h
··· 1 + #ifndef PERF_TEST_LLVM_H 2 + #define PERF_TEST_LLVM_H 3 + 4 + #include <stddef.h> /* for size_t */ 5 + #include <stdbool.h> /* for bool */ 6 + 7 + extern const char test_llvm__bpf_base_prog[]; 8 + extern const char test_llvm__bpf_test_kbuild_prog[]; 9 + 10 + enum test_llvm__testcase { 11 + LLVM_TESTCASE_BASE, 12 + LLVM_TESTCASE_KBUILD, 13 + __LLVM_TESTCASE_MAX, 14 + }; 15 + 16 + int test_llvm__fetch_bpf_obj(void **p_obj_buf, size_t *p_obj_buf_sz, 17 + enum test_llvm__testcase index, bool force); 18 + #endif
+5
tools/perf/tests/make
··· 221 221 222 222 all: 223 223 224 + ifdef SHUF 225 + run := $(shell shuf -e $(run)) 226 + run_O := $(shell shuf -e $(run_O)) 227 + endif 228 + 224 229 ifdef DEBUG 225 230 d := $(info run $(run)) 226 231 d := $(info run_O $(run_O))
+2 -2
tools/perf/tests/switch-tracking.c
··· 366 366 367 367 /* Third event */ 368 368 if (!perf_evlist__can_select_event(evlist, sched_switch)) { 369 - fprintf(stderr, " (no sched_switch)"); 369 + pr_debug("No sched_switch\n"); 370 370 err = 0; 371 371 goto out; 372 372 } ··· 442 442 } 443 443 444 444 if (perf_evlist__open(evlist) < 0) { 445 - fprintf(stderr, " (not supported)"); 445 + pr_debug("Not supported\n"); 446 446 err = 0; 447 447 goto out; 448 448 }
+1
tools/perf/tests/tests.h
··· 66 66 int test__kmod_path__parse(void); 67 67 int test__thread_map(void); 68 68 int test__llvm(void); 69 + int test__bpf(void); 69 70 int test_session_topology(void); 70 71 71 72 #if defined(__arm__) || defined(__aarch64__)
+19 -2
tools/perf/util/annotate.c
··· 1084 1084 struct kcore_extract kce; 1085 1085 bool delete_extract = false; 1086 1086 int lineno = 0; 1087 + int nline; 1087 1088 1088 1089 if (filename) 1089 1090 symbol__join_symfs(symfs_filename, filename); ··· 1180 1179 1181 1180 ret = decompress_to_file(m.ext, symfs_filename, fd); 1182 1181 1182 + if (ret) 1183 + pr_err("Cannot decompress %s %s\n", m.ext, symfs_filename); 1184 + 1183 1185 free(m.ext); 1184 1186 close(fd); 1185 1187 ··· 1208 1204 pr_debug("Executing: %s\n", command); 1209 1205 1210 1206 file = popen(command, "r"); 1211 - if (!file) 1207 + if (!file) { 1208 + pr_err("Failure running %s\n", command); 1209 + /* 1210 + * If we were using debug info should retry with 1211 + * original binary. 1212 + */ 1212 1213 goto out_remove_tmp; 1214 + } 1213 1215 1214 - while (!feof(file)) 1216 + nline = 0; 1217 + while (!feof(file)) { 1215 1218 if (symbol__parse_objdump_line(sym, map, file, privsize, 1216 1219 &lineno) < 0) 1217 1220 break; 1221 + nline++; 1222 + } 1223 + 1224 + if (nline == 0) 1225 + pr_err("No output from %s\n", command); 1218 1226 1219 1227 /* 1220 1228 * kallsyms does not have symbol sizes so there may a nop at the end. ··· 1620 1604 len = symbol__size(sym); 1621 1605 1622 1606 if (print_lines) { 1607 + srcline_full_filename = full_paths; 1623 1608 symbol__get_source_line(sym, map, evsel, &source_line, len); 1624 1609 print_summary(&source_line, dso->long_name); 1625 1610 }
+124 -19
tools/perf/util/bpf-loader.c
··· 26 26 return ret; \ 27 27 } 28 28 29 - DEFINE_PRINT_FN(warning, 0) 30 - DEFINE_PRINT_FN(info, 0) 29 + DEFINE_PRINT_FN(warning, 1) 30 + DEFINE_PRINT_FN(info, 1) 31 31 DEFINE_PRINT_FN(debug, 1) 32 32 33 33 struct bpf_prog_priv { 34 34 struct perf_probe_event pev; 35 35 }; 36 36 37 + static bool libbpf_initialized; 38 + 39 + struct bpf_object * 40 + bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name) 41 + { 42 + struct bpf_object *obj; 43 + 44 + if (!libbpf_initialized) { 45 + libbpf_set_print(libbpf_warning, 46 + libbpf_info, 47 + libbpf_debug); 48 + libbpf_initialized = true; 49 + } 50 + 51 + obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name); 52 + if (IS_ERR(obj)) { 53 + pr_debug("bpf: failed to load buffer\n"); 54 + return ERR_PTR(-EINVAL); 55 + } 56 + 57 + return obj; 58 + } 59 + 37 60 struct bpf_object *bpf__prepare_load(const char *filename, bool source) 38 61 { 39 62 struct bpf_object *obj; 40 - static bool libbpf_initialized; 41 63 42 64 if (!libbpf_initialized) { 43 65 libbpf_set_print(libbpf_warning, ··· 75 53 76 54 err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz); 77 55 if (err) 78 - return ERR_PTR(err); 56 + return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE); 79 57 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename); 80 58 free(obj_buf); 81 59 } else 82 60 obj = bpf_object__open(filename); 83 61 84 - if (!obj) { 62 + if (IS_ERR(obj)) { 85 63 pr_debug("bpf: failed to load %s\n", filename); 86 - return ERR_PTR(-EINVAL); 64 + return obj; 87 65 } 88 66 89 67 return obj; ··· 118 96 int err; 119 97 120 98 config_str = bpf_program__title(prog, false); 121 - if (!config_str) { 99 + if (IS_ERR(config_str)) { 122 100 pr_debug("bpf: unable to get title for program\n"); 123 - return -EINVAL; 101 + return PTR_ERR(config_str); 124 102 } 125 103 126 104 priv = calloc(sizeof(*priv), 1); ··· 135 113 if (err < 0) { 136 114 pr_debug("bpf: '%s' is not a valid config string\n", 137 115 config_str); 138 - err = -EINVAL; 116 + err = -BPF_LOADER_ERRNO__CONFIG; 139 117 goto errout; 140 118 } 141 119 142 120 if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) { 143 121 pr_debug("bpf: '%s': group for event is set and not '%s'.\n", 144 122 config_str, PERF_BPF_PROBE_GROUP); 145 - err = -EINVAL; 123 + err = -BPF_LOADER_ERRNO__GROUP; 146 124 goto errout; 147 125 } else if (!pev->group) 148 126 pev->group = strdup(PERF_BPF_PROBE_GROUP); ··· 154 132 } 155 133 156 134 if (!pev->event) { 157 - pr_debug("bpf: '%s': event name is missing\n", 135 + pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n", 158 136 config_str); 159 - err = -EINVAL; 137 + err = -BPF_LOADER_ERRNO__EVENTNAME; 160 138 goto errout; 161 139 } 162 140 pr_debug("bpf: config '%s' is ok\n", config_str); ··· 307 285 (void **)&priv); 308 286 if (err || !priv) { 309 287 pr_debug("bpf: failed to get private field\n"); 310 - return -EINVAL; 288 + return -BPF_LOADER_ERRNO__INTERNAL; 311 289 } 312 290 313 291 pev = &priv->pev; ··· 330 308 return 0; 331 309 } 332 310 311 + #define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START) 312 + #define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c) 313 + #define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START) 314 + 315 + static const char *bpf_loader_strerror_table[NR_ERRNO] = { 316 + [ERRCODE_OFFSET(CONFIG)] = "Invalid config string", 317 + [ERRCODE_OFFSET(GROUP)] = "Invalid group name", 318 + [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string", 319 + [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error", 320 + [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet", 321 + }; 322 + 323 + static int 324 + bpf_loader_strerror(int err, char *buf, size_t size) 325 + { 326 + char sbuf[STRERR_BUFSIZE]; 327 + const char *msg; 328 + 329 + if (!buf || !size) 330 + return -1; 331 + 332 + err = err > 0 ? err : -err; 333 + 334 + if (err >= __LIBBPF_ERRNO__START) 335 + return libbpf_strerror(err, buf, size); 336 + 337 + if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) { 338 + msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)]; 339 + snprintf(buf, size, "%s", msg); 340 + buf[size - 1] = '\0'; 341 + return 0; 342 + } 343 + 344 + if (err >= __BPF_LOADER_ERRNO__END) 345 + snprintf(buf, size, "Unknown bpf loader error %d", err); 346 + else 347 + snprintf(buf, size, "%s", 348 + strerror_r(err, sbuf, sizeof(sbuf))); 349 + 350 + buf[size - 1] = '\0'; 351 + return -1; 352 + } 353 + 333 354 #define bpf__strerror_head(err, buf, size) \ 334 355 char sbuf[STRERR_BUFSIZE], *emsg;\ 335 356 if (!size)\ 336 357 return 0;\ 337 358 if (err < 0)\ 338 359 err = -err;\ 339 - emsg = strerror_r(err, sbuf, sizeof(sbuf));\ 360 + bpf_loader_strerror(err, sbuf, sizeof(sbuf));\ 361 + emsg = sbuf;\ 340 362 switch (err) {\ 341 363 default:\ 342 364 scnprintf(buf, size, "%s", emsg);\ ··· 396 330 }\ 397 331 buf[size - 1] = '\0'; 398 332 333 + int bpf__strerror_prepare_load(const char *filename, bool source, 334 + int err, char *buf, size_t size) 335 + { 336 + size_t n; 337 + int ret; 338 + 339 + n = snprintf(buf, size, "Failed to load %s%s: ", 340 + filename, source ? " from source" : ""); 341 + if (n >= size) { 342 + buf[size - 1] = '\0'; 343 + return 0; 344 + } 345 + buf += n; 346 + size -= n; 347 + 348 + ret = bpf_loader_strerror(err, buf, size); 349 + buf[size - 1] = '\0'; 350 + return ret; 351 + } 352 + 399 353 int bpf__strerror_probe(struct bpf_object *obj __maybe_unused, 400 354 int err, char *buf, size_t size) 401 355 { 402 356 bpf__strerror_head(err, buf, size); 403 357 bpf__strerror_entry(EEXIST, "Probe point exist. Try use 'perf probe -d \"*\"'"); 404 - bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0\n"); 405 - bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file\n"); 358 + bpf__strerror_entry(EACCES, "You need to be root"); 359 + bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0"); 360 + bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file"); 406 361 bpf__strerror_end(buf, size); 407 362 return 0; 408 363 } 409 364 410 - int bpf__strerror_load(struct bpf_object *obj __maybe_unused, 365 + int bpf__strerror_load(struct bpf_object *obj, 411 366 int err, char *buf, size_t size) 412 367 { 413 368 bpf__strerror_head(err, buf, size); 414 - bpf__strerror_entry(EINVAL, "%s: Are you root and runing a CONFIG_BPF_SYSCALL kernel?", 415 - emsg) 369 + case LIBBPF_ERRNO__KVER: { 370 + unsigned int obj_kver = bpf_object__get_kversion(obj); 371 + unsigned int real_kver; 372 + 373 + if (fetch_kernel_version(&real_kver, NULL, 0)) { 374 + scnprintf(buf, size, "Unable to fetch kernel version"); 375 + break; 376 + } 377 + 378 + if (obj_kver != real_kver) { 379 + scnprintf(buf, size, 380 + "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")", 381 + KVER_PARAM(obj_kver), 382 + KVER_PARAM(real_kver)); 383 + break; 384 + } 385 + 386 + scnprintf(buf, size, "Failed to load program for unknown reason"); 387 + break; 388 + } 416 389 bpf__strerror_end(buf, size); 417 390 return 0; 418 391 }
+33
tools/perf/util/bpf-loader.h
··· 8 8 #include <linux/compiler.h> 9 9 #include <linux/err.h> 10 10 #include <string.h> 11 + #include <bpf/libbpf.h> 11 12 #include "probe-event.h" 12 13 #include "debug.h" 14 + 15 + enum bpf_loader_errno { 16 + __BPF_LOADER_ERRNO__START = __LIBBPF_ERRNO__START - 100, 17 + /* Invalid config string */ 18 + BPF_LOADER_ERRNO__CONFIG = __BPF_LOADER_ERRNO__START, 19 + BPF_LOADER_ERRNO__GROUP, /* Invalid group name */ 20 + BPF_LOADER_ERRNO__EVENTNAME, /* Event name is missing */ 21 + BPF_LOADER_ERRNO__INTERNAL, /* BPF loader internal error */ 22 + BPF_LOADER_ERRNO__COMPILE, /* Error when compiling BPF scriptlet */ 23 + __BPF_LOADER_ERRNO__END, 24 + }; 13 25 14 26 struct bpf_object; 15 27 #define PERF_BPF_PROBE_GROUP "perf_bpf_probe" ··· 31 19 32 20 #ifdef HAVE_LIBBPF_SUPPORT 33 21 struct bpf_object *bpf__prepare_load(const char *filename, bool source); 22 + int bpf__strerror_prepare_load(const char *filename, bool source, 23 + int err, char *buf, size_t size); 24 + 25 + struct bpf_object *bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, 26 + const char *name); 34 27 35 28 void bpf__clear(void); 36 29 ··· 55 38 bool source __maybe_unused) 56 39 { 57 40 pr_debug("ERROR: eBPF object loading is disabled during compiling.\n"); 41 + return ERR_PTR(-ENOTSUP); 42 + } 43 + 44 + static inline struct bpf_object * 45 + bpf__prepare_load_buffer(void *obj_buf __maybe_unused, 46 + size_t obj_buf_sz __maybe_unused) 47 + { 58 48 return ERR_PTR(-ENOTSUP); 59 49 } 60 50 ··· 89 65 size); 90 66 buf[size - 1] = '\0'; 91 67 return 0; 68 + } 69 + 70 + static inline 71 + int bpf__strerror_prepare_load(const char *filename __maybe_unused, 72 + bool source __maybe_unused, 73 + int err __maybe_unused, 74 + char *buf, size_t size) 75 + { 76 + return __bpf_strerror(buf, size); 92 77 } 93 78 94 79 static inline int
+40 -18
tools/perf/util/llvm-utils.c
··· 4 4 */ 5 5 6 6 #include <stdio.h> 7 - #include <sys/utsname.h> 8 7 #include "util.h" 9 8 #include "debug.h" 10 9 #include "llvm-utils.h" 11 10 #include "cache.h" 12 11 13 12 #define CLANG_BPF_CMD_DEFAULT_TEMPLATE \ 14 - "$CLANG_EXEC -D__KERNEL__ $CLANG_OPTIONS " \ 15 - "$KERNEL_INC_OPTIONS -Wno-unused-value " \ 16 - "-Wno-pointer-sign -working-directory " \ 17 - "$WORKING_DIR -c \"$CLANG_SOURCE\" -target bpf -O2 -o -" 13 + "$CLANG_EXEC -D__KERNEL__ -D__NR_CPUS__=$NR_CPUS "\ 14 + "-DLINUX_VERSION_CODE=$LINUX_VERSION_CODE " \ 15 + "$CLANG_OPTIONS $KERNEL_INC_OPTIONS " \ 16 + "-Wno-unused-value -Wno-pointer-sign " \ 17 + "-working-directory $WORKING_DIR " \ 18 + "-c \"$CLANG_SOURCE\" -target bpf -O2 -o -" 18 19 19 20 struct llvm_param llvm_param = { 20 21 .clang_path = "clang", ··· 215 214 const char *suffix_dir = ""; 216 215 217 216 char *autoconf_path; 218 - struct utsname utsname; 219 217 220 218 int err; 221 219 222 220 if (!test_dir) { 223 - err = uname(&utsname); 224 - if (err) { 225 - pr_warning("uname failed: %s\n", strerror(errno)); 226 - return -EINVAL; 227 - } 221 + /* _UTSNAME_LENGTH is 65 */ 222 + char release[128]; 228 223 229 - test_dir = utsname.release; 224 + err = fetch_kernel_version(NULL, release, 225 + sizeof(release)); 226 + if (err) 227 + return -EINVAL; 228 + 229 + test_dir = release; 230 230 prefix_dir = "/lib/modules/"; 231 231 suffix_dir = "/build"; 232 232 } ··· 328 326 int llvm__compile_bpf(const char *path, void **p_obj_buf, 329 327 size_t *p_obj_buf_sz) 330 328 { 331 - int err; 332 - char clang_path[PATH_MAX]; 333 - const char *clang_opt = llvm_param.clang_opt; 334 - const char *template = llvm_param.clang_bpf_cmd_template; 335 - char *kbuild_dir = NULL, *kbuild_include_opts = NULL; 336 - void *obj_buf = NULL; 337 329 size_t obj_buf_sz; 330 + void *obj_buf = NULL; 331 + int err, nr_cpus_avail; 332 + unsigned int kernel_version; 333 + char linux_version_code_str[64]; 334 + const char *clang_opt = llvm_param.clang_opt; 335 + char clang_path[PATH_MAX], nr_cpus_avail_str[64]; 336 + char *kbuild_dir = NULL, *kbuild_include_opts = NULL; 337 + const char *template = llvm_param.clang_bpf_cmd_template; 338 338 339 339 if (!template) 340 340 template = CLANG_BPF_CMD_DEFAULT_TEMPLATE; ··· 358 354 */ 359 355 get_kbuild_opts(&kbuild_dir, &kbuild_include_opts); 360 356 357 + nr_cpus_avail = sysconf(_SC_NPROCESSORS_CONF); 358 + if (nr_cpus_avail <= 0) { 359 + pr_err( 360 + "WARNING:\tunable to get available CPUs in this system: %s\n" 361 + " \tUse 128 instead.\n", strerror(errno)); 362 + nr_cpus_avail = 128; 363 + } 364 + snprintf(nr_cpus_avail_str, sizeof(nr_cpus_avail_str), "%d", 365 + nr_cpus_avail); 366 + 367 + if (fetch_kernel_version(&kernel_version, NULL, 0)) 368 + kernel_version = 0; 369 + 370 + snprintf(linux_version_code_str, sizeof(linux_version_code_str), 371 + "0x%x", kernel_version); 372 + 373 + force_set_env("NR_CPUS", nr_cpus_avail_str); 374 + force_set_env("LINUX_VERSION_CODE", linux_version_code_str); 361 375 force_set_env("CLANG_EXEC", clang_path); 362 376 force_set_env("CLANG_OPTIONS", clang_opt); 363 377 force_set_env("KERNEL_INC_OPTIONS", kbuild_include_opts);
+8 -2
tools/perf/util/map.c
··· 644 644 return printed; 645 645 } 646 646 647 + static void __map_groups__insert(struct map_groups *mg, struct map *map) 648 + { 649 + __maps__insert(&mg->maps[map->type], map); 650 + map->groups = mg; 651 + } 652 + 647 653 static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) 648 654 { 649 655 struct rb_root *root; ··· 688 682 } 689 683 690 684 before->end = map->start; 691 - __maps__insert(maps, before); 685 + __map_groups__insert(pos->groups, before); 692 686 if (verbose >= 2) 693 687 map__fprintf(before, fp); 694 688 } ··· 702 696 } 703 697 704 698 after->start = map->end; 705 - __maps__insert(maps, after); 699 + __map_groups__insert(pos->groups, after); 706 700 if (verbose >= 2) 707 701 map__fprintf(after, fp); 708 702 }
+6 -5
tools/perf/util/parse-events.c
··· 632 632 struct bpf_object *obj; 633 633 634 634 obj = bpf__prepare_load(bpf_file_name, source); 635 - if (IS_ERR(obj) || !obj) { 635 + if (IS_ERR(obj)) { 636 636 char errbuf[BUFSIZ]; 637 637 int err; 638 638 639 - err = obj ? PTR_ERR(obj) : -EINVAL; 639 + err = PTR_ERR(obj); 640 640 641 641 if (err == -ENOTSUP) 642 642 snprintf(errbuf, sizeof(errbuf), 643 643 "BPF support is not compiled"); 644 644 else 645 - snprintf(errbuf, sizeof(errbuf), 646 - "BPF object file '%s' is invalid", 647 - bpf_file_name); 645 + bpf__strerror_prepare_load(bpf_file_name, 646 + source, 647 + -err, errbuf, 648 + sizeof(errbuf)); 648 649 649 650 data->error->help = strdup("(add -v to see detail)"); 650 651 data->error->str = strdup(errbuf);
+3 -3
tools/perf/util/probe-event.c
··· 1895 1895 sym = map__find_symbol(map, addr, NULL); 1896 1896 } else { 1897 1897 if (tp->symbol && !addr) { 1898 - ret = kernel_get_symbol_address_by_name(tp->symbol, 1899 - &addr, true, false); 1900 - if (ret < 0) 1898 + if (kernel_get_symbol_address_by_name(tp->symbol, 1899 + &addr, true, false) < 0) 1901 1900 goto out; 1902 1901 } 1903 1902 if (addr) { ··· 1904 1905 sym = __find_kernel_function(addr, &map); 1905 1906 } 1906 1907 } 1908 + 1907 1909 if (!sym) 1908 1910 goto out; 1909 1911
+6
tools/perf/util/probe-file.c
··· 138 138 char *p; 139 139 struct strlist *sl; 140 140 141 + if (fd < 0) 142 + return NULL; 143 + 141 144 sl = strlist__new(NULL, NULL); 142 145 143 146 fp = fdopen(dup(fd), "r"); ··· 273 270 struct str_node *ent; 274 271 const char *p; 275 272 int ret = -ENOENT; 273 + 274 + if (!plist) 275 + return -EINVAL; 276 276 277 277 namelist = __probe_file__get_namelist(fd, true); 278 278 if (!namelist)
+4 -4
tools/perf/util/session.c
··· 29 29 struct perf_data_file *file = session->file; 30 30 31 31 if (perf_session__read_header(session) < 0) { 32 - pr_err("incompatible file format (rerun with -v to learn more)"); 32 + pr_err("incompatible file format (rerun with -v to learn more)\n"); 33 33 return -1; 34 34 } 35 35 ··· 37 37 return 0; 38 38 39 39 if (!perf_evlist__valid_sample_type(session->evlist)) { 40 - pr_err("non matching sample_type"); 40 + pr_err("non matching sample_type\n"); 41 41 return -1; 42 42 } 43 43 44 44 if (!perf_evlist__valid_sample_id_all(session->evlist)) { 45 - pr_err("non matching sample_id_all"); 45 + pr_err("non matching sample_id_all\n"); 46 46 return -1; 47 47 } 48 48 49 49 if (!perf_evlist__valid_read_format(session->evlist)) { 50 - pr_err("non matching read_format"); 50 + pr_err("non matching read_format\n"); 51 51 return -1; 52 52 } 53 53
+5
tools/perf/util/stat-shadow.c
··· 413 413 ratio = total / avg; 414 414 415 415 fprintf(out, " # %8.0f cycles / elision ", ratio); 416 + } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) { 417 + if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0) 418 + fprintf(out, " # %8.3f CPUs utilized ", avg / ratio); 419 + else 420 + fprintf(out, " "); 416 421 } else if (runtime_nsecs_stats[cpu].n != 0) { 417 422 char unit = 'M'; 418 423
+30
tools/perf/util/util.c
··· 3 3 #include "debug.h" 4 4 #include <api/fs/fs.h> 5 5 #include <sys/mman.h> 6 + #include <sys/utsname.h> 6 7 #ifdef HAVE_BACKTRACE_SUPPORT 7 8 #include <execinfo.h> 8 9 #endif ··· 665 664 666 665 closedir(dir); 667 666 return ret ? false : true; 667 + } 668 + 669 + int 670 + fetch_kernel_version(unsigned int *puint, char *str, 671 + size_t str_size) 672 + { 673 + struct utsname utsname; 674 + int version, patchlevel, sublevel, err; 675 + 676 + if (uname(&utsname)) 677 + return -1; 678 + 679 + if (str && str_size) { 680 + strncpy(str, utsname.release, str_size); 681 + str[str_size - 1] = '\0'; 682 + } 683 + 684 + err = sscanf(utsname.release, "%d.%d.%d", 685 + &version, &patchlevel, &sublevel); 686 + 687 + if (err != 3) { 688 + pr_debug("Unablt to get kernel version from uname '%s'\n", 689 + utsname.release); 690 + return -1; 691 + } 692 + 693 + if (puint) 694 + *puint = (version << 16) + (patchlevel << 8) + sublevel; 695 + return 0; 668 696 }
+8
tools/perf/util/util.h
··· 350 350 351 351 int get_stack_size(const char *str, unsigned long *_size); 352 352 353 + int fetch_kernel_version(unsigned int *puint, 354 + char *str, size_t str_sz); 355 + #define KVER_VERSION(x) (((x) >> 16) & 0xff) 356 + #define KVER_PATCHLEVEL(x) (((x) >> 8) & 0xff) 357 + #define KVER_SUBLEVEL(x) ((x) & 0xff) 358 + #define KVER_FMT "%d.%d.%d" 359 + #define KVER_PARAM(x) KVER_VERSION(x), KVER_PATCHLEVEL(x), KVER_SUBLEVEL(x) 360 + 353 361 #endif /* GIT_COMPAT_UTIL_H */