Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3#include <ctype.h>
4#include <stdio.h>
5#include <stdlib.h>
6#include <string.h>
7#include <libelf.h>
8#include <gelf.h>
9#include <unistd.h>
10#include <linux/ptrace.h>
11#include <linux/kernel.h>
12
13/* s8 will be marked as poison while it's a reg of riscv */
14#if defined(__riscv)
15#define rv_s8 s8
16#endif
17
18#include "bpf.h"
19#include "libbpf.h"
20#include "libbpf_common.h"
21#include "libbpf_internal.h"
22#include "hashmap.h"
23
24/* libbpf's USDT support consists of BPF-side state/code and user-space
25 * state/code working together in concert. BPF-side parts are defined in
26 * usdt.bpf.h header library. User-space state is encapsulated by struct
27 * usdt_manager and all the supporting code centered around usdt_manager.
28 *
29 * usdt.bpf.h defines two BPF maps that usdt_manager expects: USDT spec map
30 * and IP-to-spec-ID map, which is auxiliary map necessary for kernels that
31 * don't support BPF cookie (see below). These two maps are implicitly
32 * embedded into user's end BPF object file when user's code included
33 * usdt.bpf.h. This means that libbpf doesn't do anything special to create
34 * these USDT support maps. They are created by normal libbpf logic of
35 * instantiating BPF maps when opening and loading BPF object.
36 *
37 * As such, libbpf is basically unaware of the need to do anything
38 * USDT-related until the very first call to bpf_program__attach_usdt(), which
39 * can be called by user explicitly or happen automatically during skeleton
40 * attach (or, equivalently, through generic bpf_program__attach() call). At
41 * this point, libbpf will instantiate and initialize struct usdt_manager and
42 * store it in bpf_object. USDT manager is per-BPF object construct, as each
43 * independent BPF object might or might not have USDT programs, and thus all
44 * the expected USDT-related state. There is no coordination between two
45 * bpf_object in parts of USDT attachment, they are oblivious of each other's
46 * existence and libbpf is just oblivious, dealing with bpf_object-specific
47 * USDT state.
48 *
49 * Quick crash course on USDTs.
50 *
51 * From user-space application's point of view, USDT is essentially just
52 * a slightly special function call that normally has zero overhead, unless it
53 * is being traced by some external entity (e.g, BPF-based tool). Here's how
54 * a typical application can trigger USDT probe:
55 *
56 * #include <sys/sdt.h> // provided by systemtap-sdt-devel package
57 * // folly also provide similar functionality in folly/tracing/StaticTracepoint.h
58 *
59 * STAP_PROBE3(my_usdt_provider, my_usdt_probe_name, 123, x, &y);
60 *
61 * USDT is identified by its <provider-name>:<probe-name> pair of names. Each
62 * individual USDT has a fixed number of arguments (3 in the above example)
63 * and specifies values of each argument as if it was a function call.
64 *
65 * USDT call is actually not a function call, but is instead replaced by
66 * a single NOP instruction (thus zero overhead, effectively). But in addition
67 * to that, those USDT macros generate special SHT_NOTE ELF records in
68 * .note.stapsdt ELF section. Here's an example USDT definition as emitted by
69 * `readelf -n <binary>`:
70 *
71 * stapsdt 0x00000089 NT_STAPSDT (SystemTap probe descriptors)
72 * Provider: test
73 * Name: usdt12
74 * Location: 0x0000000000549df3, Base: 0x00000000008effa4, Semaphore: 0x0000000000a4606e
75 * Arguments: -4@-1204(%rbp) -4@%edi -8@-1216(%rbp) -8@%r8 -4@$5 -8@%r9 8@%rdx 8@%r10 -4@$-9 -2@%cx -2@%ax -1@%sil
76 *
77 * In this case we have USDT test:usdt12 with 12 arguments.
78 *
79 * Location and base are offsets used to calculate absolute IP address of that
80 * NOP instruction that kernel can replace with an interrupt instruction to
81 * trigger instrumentation code (BPF program for all that we care about).
82 *
83 * Semaphore above is an optional feature. It records an address of a 2-byte
84 * refcount variable (normally in '.probes' ELF section) used for signaling if
85 * there is anything that is attached to USDT. This is useful for user
86 * applications if, for example, they need to prepare some arguments that are
87 * passed only to USDTs and preparation is expensive. By checking if USDT is
88 * "activated", an application can avoid paying those costs unnecessarily.
89 * Recent enough kernel has built-in support for automatically managing this
90 * refcount, which libbpf expects and relies on. If USDT is defined without
91 * associated semaphore, this value will be zero. See selftests for semaphore
92 * examples.
93 *
94 * Arguments is the most interesting part. This USDT specification string is
95 * providing information about all the USDT arguments and their locations. The
96 * part before @ sign defined byte size of the argument (1, 2, 4, or 8) and
97 * whether the argument is signed or unsigned (negative size means signed).
98 * The part after @ sign is assembly-like definition of argument location
99 * (see [0] for more details). Technically, assembler can provide some pretty
100 * advanced definitions, but libbpf is currently supporting three most common
101 * cases:
102 * 1) immediate constant, see 5th and 9th args above (-4@$5 and -4@-9);
103 * 2) register value, e.g., 8@%rdx, which means "unsigned 8-byte integer
104 * whose value is in register %rdx";
105 * 3) memory dereference addressed by register, e.g., -4@-1204(%rbp), which
106 * specifies signed 32-bit integer stored at offset -1204 bytes from
107 * memory address stored in %rbp.
108 *
109 * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
110 *
111 * During attachment, libbpf parses all the relevant USDT specifications and
112 * prepares `struct usdt_spec` (USDT spec), which is then provided to BPF-side
113 * code through spec map. This allows BPF applications to quickly fetch the
114 * actual value at runtime using a simple BPF-side code.
115 *
116 * With basics out of the way, let's go over less immediately obvious aspects
117 * of supporting USDTs.
118 *
119 * First, there is no special USDT BPF program type. It is actually just
120 * a uprobe BPF program (which for kernel, at least currently, is just a kprobe
121 * program, so BPF_PROG_TYPE_KPROBE program type). With the only difference
122 * that uprobe is usually attached at the function entry, while USDT will
123 * normally be somewhere inside the function. But it should always be
124 * pointing to NOP instruction, which makes such uprobes the fastest uprobe
125 * kind.
126 *
127 * Second, it's important to realize that such STAP_PROBEn(provider, name, ...)
128 * macro invocations can end up being inlined many-many times, depending on
129 * specifics of each individual user application. So single conceptual USDT
130 * (identified by provider:name pair of identifiers) is, generally speaking,
131 * multiple uprobe locations (USDT call sites) in different places in user
132 * application. Further, again due to inlining, each USDT call site might end
133 * up having the same argument #N be located in a different place. In one call
134 * site it could be a constant, in another will end up in a register, and in
135 * yet another could be some other register or even somewhere on the stack.
136 *
137 * As such, "attaching to USDT" means (in general case) attaching the same
138 * uprobe BPF program to multiple target locations in user application, each
139 * potentially having a completely different USDT spec associated with it.
140 * To wire all this up together libbpf allocates a unique integer spec ID for
141 * each unique USDT spec. Spec IDs are allocated as sequential small integers
142 * so that they can be used as keys in array BPF map (for performance reasons).
143 * Spec ID allocation and accounting is big part of what usdt_manager is
144 * about. This state has to be maintained per-BPF object and coordinate
145 * between different USDT attachments within the same BPF object.
146 *
147 * Spec ID is the key in spec BPF map, value is the actual USDT spec layed out
148 * as struct usdt_spec. Each invocation of BPF program at runtime needs to
149 * know its associated spec ID. It gets it either through BPF cookie, which
150 * libbpf sets to spec ID during attach time, or, if kernel is too old to
151 * support BPF cookie, through IP-to-spec-ID map that libbpf maintains in such
152 * case. The latter means that some modes of operation can't be supported
153 * without BPF cookie. Such a mode is attaching to shared library "generically",
154 * without specifying target process. In such case, it's impossible to
155 * calculate absolute IP addresses for IP-to-spec-ID map, and thus such mode
156 * is not supported without BPF cookie support.
157 *
158 * Note that libbpf is using BPF cookie functionality for its own internal
159 * needs, so user itself can't rely on BPF cookie feature. To that end, libbpf
160 * provides conceptually equivalent USDT cookie support. It's still u64
161 * user-provided value that can be associated with USDT attachment. Note that
162 * this will be the same value for all USDT call sites within the same single
163 * *logical* USDT attachment. This makes sense because to user attaching to
164 * USDT is a single BPF program triggered for singular USDT probe. The fact
165 * that this is done at multiple actual locations is a mostly hidden
166 * implementation details. This USDT cookie value can be fetched with
167 * bpf_usdt_cookie(ctx) API provided by usdt.bpf.h
168 *
169 * Lastly, while single USDT can have tons of USDT call sites, it doesn't
170 * necessarily have that many different USDT specs. It very well might be
171 * that 1000 USDT call sites only need 5 different USDT specs, because all the
172 * arguments are typically contained in a small set of registers or stack
173 * locations. As such, it's wasteful to allocate as many USDT spec IDs as
174 * there are USDT call sites. So libbpf tries to be frugal and performs
175 * on-the-fly deduplication during a single USDT attachment to only allocate
176 * the minimal required amount of unique USDT specs (and thus spec IDs). This
177 * is trivially achieved by using USDT spec string (Arguments string from USDT
178 * note) as a lookup key in a hashmap. USDT spec string uniquely defines
179 * everything about how to fetch USDT arguments, so two USDT call sites
180 * sharing USDT spec string can safely share the same USDT spec and spec ID.
181 * Note, this spec string deduplication is happening only during the same USDT
182 * attachment, so each USDT spec shares the same USDT cookie value. This is
183 * not generally true for other USDT attachments within the same BPF object,
184 * as even if USDT spec string is the same, USDT cookie value can be
185 * different. It was deemed excessive to try to deduplicate across independent
186 * USDT attachments by taking into account USDT spec string *and* USDT cookie
187 * value, which would complicate spec ID accounting significantly for little
188 * gain.
189 */
190
191#define USDT_BASE_SEC ".stapsdt.base"
192#define USDT_SEMA_SEC ".probes"
193#define USDT_NOTE_SEC ".note.stapsdt"
194#define USDT_NOTE_TYPE 3
195#define USDT_NOTE_NAME "stapsdt"
196
197/* should match exactly enum __bpf_usdt_arg_type from usdt.bpf.h */
198enum usdt_arg_type {
199 USDT_ARG_CONST,
200 USDT_ARG_REG,
201 USDT_ARG_REG_DEREF,
202 USDT_ARG_SIB,
203};
204
205/* should match exactly struct __bpf_usdt_arg_spec from usdt.bpf.h */
206struct usdt_arg_spec {
207 __u64 val_off;
208#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
209 enum usdt_arg_type arg_type: 8;
210 __u16 idx_reg_off: 12;
211 __u16 scale_bitshift: 4;
212 __u8 __reserved: 8; /* keep reg_off offset stable */
213#else
214 __u8 __reserved: 8; /* keep reg_off offset stable */
215 __u16 idx_reg_off: 12;
216 __u16 scale_bitshift: 4;
217 enum usdt_arg_type arg_type: 8;
218#endif
219 short reg_off;
220 bool arg_signed;
221 char arg_bitshift;
222};
223
224/* should match BPF_USDT_MAX_ARG_CNT in usdt.bpf.h */
225#define USDT_MAX_ARG_CNT 12
226
227/* should match struct __bpf_usdt_spec from usdt.bpf.h */
228struct usdt_spec {
229 struct usdt_arg_spec args[USDT_MAX_ARG_CNT];
230 __u64 usdt_cookie;
231 short arg_cnt;
232};
233
234struct usdt_note {
235 const char *provider;
236 const char *name;
237 /* USDT args specification string, e.g.:
238 * "-4@%esi -4@-24(%rbp) -4@%ecx 2@%ax 8@%rdx"
239 */
240 const char *args;
241 long loc_addr;
242 long base_addr;
243 long sema_addr;
244};
245
246struct usdt_target {
247 long abs_ip;
248 long rel_ip;
249 long sema_off;
250 struct usdt_spec spec;
251 const char *spec_str;
252};
253
254struct usdt_manager {
255 struct bpf_map *specs_map;
256 struct bpf_map *ip_to_spec_id_map;
257
258 int *free_spec_ids;
259 size_t free_spec_cnt;
260 size_t next_free_spec_id;
261
262 bool has_bpf_cookie;
263 bool has_sema_refcnt;
264 bool has_uprobe_multi;
265 bool has_uprobe_syscall;
266};
267
268struct usdt_manager *usdt_manager_new(struct bpf_object *obj)
269{
270 static const char *ref_ctr_sysfs_path = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset";
271 struct usdt_manager *man;
272 struct bpf_map *specs_map, *ip_to_spec_id_map;
273
274 specs_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_specs");
275 ip_to_spec_id_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_ip_to_spec_id");
276 if (!specs_map || !ip_to_spec_id_map) {
277 pr_warn("usdt: failed to find USDT support BPF maps, did you forget to include bpf/usdt.bpf.h?\n");
278 return ERR_PTR(-ESRCH);
279 }
280
281 man = calloc(1, sizeof(*man));
282 if (!man)
283 return ERR_PTR(-ENOMEM);
284
285 man->specs_map = specs_map;
286 man->ip_to_spec_id_map = ip_to_spec_id_map;
287
288 /* Detect if BPF cookie is supported for kprobes.
289 * We don't need IP-to-ID mapping if we can use BPF cookies.
290 * Added in: 7adfc6c9b315 ("bpf: Add bpf_get_attach_cookie() BPF helper to access bpf_cookie value")
291 */
292 man->has_bpf_cookie = kernel_supports(obj, FEAT_BPF_COOKIE);
293
294 /* Detect kernel support for automatic refcounting of USDT semaphore.
295 * If this is not supported, USDTs with semaphores will not be supported.
296 * Added in: a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe")
297 */
298 man->has_sema_refcnt = faccessat(AT_FDCWD, ref_ctr_sysfs_path, F_OK, AT_EACCESS) == 0;
299
300 /*
301 * Detect kernel support for uprobe multi link to be used for attaching
302 * usdt probes.
303 */
304 man->has_uprobe_multi = kernel_supports(obj, FEAT_UPROBE_MULTI_LINK);
305
306 /*
307 * Detect kernel support for uprobe() syscall, it's presence means we can
308 * take advantage of faster nop5 uprobe handling.
309 * Added in: 56101b69c919 ("uprobes/x86: Add uprobe syscall to speed up uprobe")
310 */
311 man->has_uprobe_syscall = kernel_supports(obj, FEAT_UPROBE_SYSCALL);
312 return man;
313}
314
315void usdt_manager_free(struct usdt_manager *man)
316{
317 if (IS_ERR_OR_NULL(man))
318 return;
319
320 free(man->free_spec_ids);
321 free(man);
322}
323
324static int sanity_check_usdt_elf(Elf *elf, const char *path)
325{
326 GElf_Ehdr ehdr;
327 int endianness;
328
329 if (elf_kind(elf) != ELF_K_ELF) {
330 pr_warn("usdt: unrecognized ELF kind %d for '%s'\n", elf_kind(elf), path);
331 return -EBADF;
332 }
333
334 switch (gelf_getclass(elf)) {
335 case ELFCLASS64:
336 if (sizeof(void *) != 8) {
337 pr_warn("usdt: attaching to 64-bit ELF binary '%s' is not supported\n", path);
338 return -EBADF;
339 }
340 break;
341 case ELFCLASS32:
342 if (sizeof(void *) != 4) {
343 pr_warn("usdt: attaching to 32-bit ELF binary '%s' is not supported\n", path);
344 return -EBADF;
345 }
346 break;
347 default:
348 pr_warn("usdt: unsupported ELF class for '%s'\n", path);
349 return -EBADF;
350 }
351
352 if (!gelf_getehdr(elf, &ehdr))
353 return -EINVAL;
354
355 if (ehdr.e_type != ET_EXEC && ehdr.e_type != ET_DYN) {
356 pr_warn("usdt: unsupported type of ELF binary '%s' (%d), only ET_EXEC and ET_DYN are supported\n",
357 path, ehdr.e_type);
358 return -EBADF;
359 }
360
361#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
362 endianness = ELFDATA2LSB;
363#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
364 endianness = ELFDATA2MSB;
365#else
366# error "Unrecognized __BYTE_ORDER__"
367#endif
368 if (endianness != ehdr.e_ident[EI_DATA]) {
369 pr_warn("usdt: ELF endianness mismatch for '%s'\n", path);
370 return -EBADF;
371 }
372
373 return 0;
374}
375
376static int find_elf_sec_by_name(Elf *elf, const char *sec_name, GElf_Shdr *shdr, Elf_Scn **scn)
377{
378 Elf_Scn *sec = NULL;
379 size_t shstrndx;
380
381 if (elf_getshdrstrndx(elf, &shstrndx))
382 return -EINVAL;
383
384 /* check if ELF is corrupted and avoid calling elf_strptr if yes */
385 if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL))
386 return -EINVAL;
387
388 while ((sec = elf_nextscn(elf, sec)) != NULL) {
389 char *name;
390
391 if (!gelf_getshdr(sec, shdr))
392 return -EINVAL;
393
394 name = elf_strptr(elf, shstrndx, shdr->sh_name);
395 if (name && strcmp(sec_name, name) == 0) {
396 *scn = sec;
397 return 0;
398 }
399 }
400
401 return -ENOENT;
402}
403
404struct elf_seg {
405 long start;
406 long end;
407 long offset;
408 bool is_exec;
409};
410
411static int cmp_elf_segs(const void *_a, const void *_b)
412{
413 const struct elf_seg *a = _a;
414 const struct elf_seg *b = _b;
415
416 return a->start < b->start ? -1 : 1;
417}
418
419static int parse_elf_segs(Elf *elf, const char *path, struct elf_seg **segs, size_t *seg_cnt)
420{
421 GElf_Phdr phdr;
422 size_t n;
423 int i, err;
424 struct elf_seg *seg;
425 void *tmp;
426
427 *seg_cnt = 0;
428
429 if (elf_getphdrnum(elf, &n)) {
430 err = -errno;
431 return err;
432 }
433
434 for (i = 0; i < n; i++) {
435 if (!gelf_getphdr(elf, i, &phdr)) {
436 err = -errno;
437 return err;
438 }
439
440 pr_debug("usdt: discovered PHDR #%d in '%s': vaddr 0x%lx memsz 0x%lx offset 0x%lx type 0x%lx flags 0x%lx\n",
441 i, path, (long)phdr.p_vaddr, (long)phdr.p_memsz, (long)phdr.p_offset,
442 (long)phdr.p_type, (long)phdr.p_flags);
443 if (phdr.p_type != PT_LOAD)
444 continue;
445
446 tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs));
447 if (!tmp)
448 return -ENOMEM;
449
450 *segs = tmp;
451 seg = *segs + *seg_cnt;
452 (*seg_cnt)++;
453
454 seg->start = phdr.p_vaddr;
455 seg->end = phdr.p_vaddr + phdr.p_memsz;
456 seg->offset = phdr.p_offset;
457 seg->is_exec = phdr.p_flags & PF_X;
458 }
459
460 if (*seg_cnt == 0) {
461 pr_warn("usdt: failed to find PT_LOAD program headers in '%s'\n", path);
462 return -ESRCH;
463 }
464
465 qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs);
466 return 0;
467}
468
469static int parse_vma_segs(int pid, const char *lib_path, struct elf_seg **segs, size_t *seg_cnt)
470{
471 char path[PATH_MAX], line[PATH_MAX], mode[16];
472 size_t seg_start, seg_end, seg_off;
473 struct elf_seg *seg;
474 int tmp_pid, i, err;
475 FILE *f;
476
477 *seg_cnt = 0;
478
479 /* Handle containerized binaries only accessible from
480 * /proc/<pid>/root/<path>. They will be reported as just /<path> in
481 * /proc/<pid>/maps.
482 */
483 if (sscanf(lib_path, "/proc/%d/root%s", &tmp_pid, path) == 2 && pid == tmp_pid)
484 goto proceed;
485
486 if (!realpath(lib_path, path)) {
487 pr_warn("usdt: failed to get absolute path of '%s' (err %s), using path as is...\n",
488 lib_path, errstr(-errno));
489 libbpf_strlcpy(path, lib_path, sizeof(path));
490 }
491
492proceed:
493 sprintf(line, "/proc/%d/maps", pid);
494 f = fopen(line, "re");
495 if (!f) {
496 err = -errno;
497 pr_warn("usdt: failed to open '%s' to get base addr of '%s': %s\n",
498 line, lib_path, errstr(err));
499 return err;
500 }
501
502 /* We need to handle lines with no path at the end:
503 *
504 * 7f5c6f5d1000-7f5c6f5d3000 rw-p 001c7000 08:04 21238613 /usr/lib64/libc-2.17.so
505 * 7f5c6f5d3000-7f5c6f5d8000 rw-p 00000000 00:00 0
506 * 7f5c6f5d8000-7f5c6f5d9000 r-xp 00000000 103:01 362990598 /data/users/andriin/linux/tools/bpf/usdt/libhello_usdt.so
507 */
508 while (fscanf(f, "%zx-%zx %s %zx %*s %*d%[^\n]\n",
509 &seg_start, &seg_end, mode, &seg_off, line) == 5) {
510 void *tmp;
511
512 /* to handle no path case (see above) we need to capture line
513 * without skipping any whitespaces. So we need to strip
514 * leading whitespaces manually here
515 */
516 i = 0;
517 while (isblank(line[i]))
518 i++;
519 if (strcmp(line + i, path) != 0)
520 continue;
521
522 pr_debug("usdt: discovered segment for lib '%s': addrs %zx-%zx mode %s offset %zx\n",
523 path, seg_start, seg_end, mode, seg_off);
524
525 /* ignore non-executable sections for shared libs */
526 if (mode[2] != 'x')
527 continue;
528
529 tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs));
530 if (!tmp) {
531 err = -ENOMEM;
532 goto err_out;
533 }
534
535 *segs = tmp;
536 seg = *segs + *seg_cnt;
537 *seg_cnt += 1;
538
539 seg->start = seg_start;
540 seg->end = seg_end;
541 seg->offset = seg_off;
542 seg->is_exec = true;
543 }
544
545 if (*seg_cnt == 0) {
546 pr_warn("usdt: failed to find '%s' (resolved to '%s') within PID %d memory mappings\n",
547 lib_path, path, pid);
548 err = -ESRCH;
549 goto err_out;
550 }
551
552 qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs);
553 err = 0;
554err_out:
555 fclose(f);
556 return err;
557}
558
559static struct elf_seg *find_elf_seg(struct elf_seg *segs, size_t seg_cnt, long virtaddr)
560{
561 struct elf_seg *seg;
562 int i;
563
564 /* for ELF binaries (both executables and shared libraries), we are
565 * given virtual address (absolute for executables, relative for
566 * libraries) which should match address range of [seg_start, seg_end)
567 */
568 for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
569 if (seg->start <= virtaddr && virtaddr < seg->end)
570 return seg;
571 }
572 return NULL;
573}
574
575static struct elf_seg *find_vma_seg(struct elf_seg *segs, size_t seg_cnt, long offset)
576{
577 struct elf_seg *seg;
578 int i;
579
580 /* for VMA segments from /proc/<pid>/maps file, provided "address" is
581 * actually a file offset, so should be fall within logical
582 * offset-based range of [offset_start, offset_end)
583 */
584 for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
585 if (seg->offset <= offset && offset < seg->offset + (seg->end - seg->start))
586 return seg;
587 }
588 return NULL;
589}
590
591static int parse_usdt_note(GElf_Nhdr *nhdr, const char *data, size_t name_off,
592 size_t desc_off, struct usdt_note *usdt_note);
593
594static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie);
595
596#if defined(__x86_64__)
597static bool has_nop_combo(int fd, long off)
598{
599 unsigned char nop_combo[6] = {
600 0x90, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nop,nop5 */
601 };
602 unsigned char buf[6];
603
604 if (pread(fd, buf, 6, off) != 6)
605 return false;
606 return memcmp(buf, nop_combo, 6) == 0;
607}
608#else
609static bool has_nop_combo(int fd, long off)
610{
611 return false;
612}
613#endif
614
615static int collect_usdt_targets(struct usdt_manager *man, struct elf_fd *elf_fd, const char *path,
616 pid_t pid, const char *usdt_provider, const char *usdt_name,
617 __u64 usdt_cookie, struct usdt_target **out_targets,
618 size_t *out_target_cnt)
619{
620 size_t off, name_off, desc_off, seg_cnt = 0, vma_seg_cnt = 0, target_cnt = 0;
621 struct elf_seg *segs = NULL, *vma_segs = NULL;
622 struct usdt_target *targets = NULL, *target;
623 Elf *elf = elf_fd->elf;
624 long base_addr = 0;
625 Elf_Scn *notes_scn, *base_scn;
626 GElf_Shdr base_shdr, notes_shdr;
627 GElf_Ehdr ehdr;
628 GElf_Nhdr nhdr;
629 Elf_Data *data;
630 int err;
631
632 *out_targets = NULL;
633 *out_target_cnt = 0;
634
635 err = find_elf_sec_by_name(elf, USDT_NOTE_SEC, ¬es_shdr, ¬es_scn);
636 if (err) {
637 pr_warn("usdt: no USDT notes section (%s) found in '%s'\n", USDT_NOTE_SEC, path);
638 return err;
639 }
640
641 if (notes_shdr.sh_type != SHT_NOTE || !gelf_getehdr(elf, &ehdr)) {
642 pr_warn("usdt: invalid USDT notes section (%s) in '%s'\n", USDT_NOTE_SEC, path);
643 return -EINVAL;
644 }
645
646 err = parse_elf_segs(elf, path, &segs, &seg_cnt);
647 if (err) {
648 pr_warn("usdt: failed to process ELF program segments for '%s': %s\n",
649 path, errstr(err));
650 goto err_out;
651 }
652
653 /* .stapsdt.base ELF section is optional, but is used for prelink
654 * offset compensation (see a big comment further below)
655 */
656 if (find_elf_sec_by_name(elf, USDT_BASE_SEC, &base_shdr, &base_scn) == 0)
657 base_addr = base_shdr.sh_addr;
658
659 data = elf_getdata(notes_scn, 0);
660 off = 0;
661 while ((off = gelf_getnote(data, off, &nhdr, &name_off, &desc_off)) > 0) {
662 long usdt_abs_ip, usdt_rel_ip, usdt_sema_off = 0;
663 struct usdt_note note;
664 struct elf_seg *seg = NULL;
665 void *tmp;
666
667 err = parse_usdt_note(&nhdr, data->d_buf, name_off, desc_off, ¬e);
668 if (err)
669 goto err_out;
670
671 if (strcmp(note.provider, usdt_provider) != 0 || strcmp(note.name, usdt_name) != 0)
672 continue;
673
674 /* We need to compensate "prelink effect". See [0] for details,
675 * relevant parts quoted here:
676 *
677 * Each SDT probe also expands into a non-allocated ELF note. You can
678 * find this by looking at SHT_NOTE sections and decoding the format;
679 * see below for details. Because the note is non-allocated, it means
680 * there is no runtime cost, and also preserved in both stripped files
681 * and .debug files.
682 *
683 * However, this means that prelink won't adjust the note's contents
684 * for address offsets. Instead, this is done via the .stapsdt.base
685 * section. This is a special section that is added to the text. We
686 * will only ever have one of these sections in a final link and it
687 * will only ever be one byte long. Nothing about this section itself
688 * matters, we just use it as a marker to detect prelink address
689 * adjustments.
690 *
691 * Each probe note records the link-time address of the .stapsdt.base
692 * section alongside the probe PC address. The decoder compares the
693 * base address stored in the note with the .stapsdt.base section's
694 * sh_addr. Initially these are the same, but the section header will
695 * be adjusted by prelink. So the decoder applies the difference to
696 * the probe PC address to get the correct prelinked PC address; the
697 * same adjustment is applied to the semaphore address, if any.
698 *
699 * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
700 */
701 usdt_abs_ip = note.loc_addr;
702 if (base_addr && note.base_addr)
703 usdt_abs_ip += base_addr - note.base_addr;
704
705 /* When attaching uprobes (which is what USDTs basically are)
706 * kernel expects file offset to be specified, not a relative
707 * virtual address, so we need to translate virtual address to
708 * file offset, for both ET_EXEC and ET_DYN binaries.
709 */
710 seg = find_elf_seg(segs, seg_cnt, usdt_abs_ip);
711 if (!seg) {
712 err = -ESRCH;
713 pr_warn("usdt: failed to find ELF program segment for '%s:%s' in '%s' at IP 0x%lx\n",
714 usdt_provider, usdt_name, path, usdt_abs_ip);
715 goto err_out;
716 }
717 if (!seg->is_exec) {
718 err = -ESRCH;
719 pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx) for '%s:%s' at IP 0x%lx is not executable\n",
720 path, seg->start, seg->end, usdt_provider, usdt_name,
721 usdt_abs_ip);
722 goto err_out;
723 }
724 /* translate from virtual address to file offset */
725 usdt_rel_ip = usdt_abs_ip - seg->start + seg->offset;
726
727 if (ehdr.e_type == ET_DYN && !man->has_bpf_cookie) {
728 /* If we don't have BPF cookie support but need to
729 * attach to a shared library, we'll need to know and
730 * record absolute addresses of attach points due to
731 * the need to lookup USDT spec by absolute IP of
732 * triggered uprobe. Doing this resolution is only
733 * possible when we have a specific PID of the process
734 * that's using specified shared library. BPF cookie
735 * removes the absolute address limitation as we don't
736 * need to do this lookup (we just use BPF cookie as
737 * an index of USDT spec), so for newer kernels with
738 * BPF cookie support libbpf supports USDT attachment
739 * to shared libraries with no PID filter.
740 */
741 if (pid < 0) {
742 pr_warn("usdt: attaching to shared libraries without specific PID is not supported on current kernel\n");
743 err = -ENOTSUP;
744 goto err_out;
745 }
746
747 /* vma_segs are lazily initialized only if necessary */
748 if (vma_seg_cnt == 0) {
749 err = parse_vma_segs(pid, path, &vma_segs, &vma_seg_cnt);
750 if (err) {
751 pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %s\n",
752 pid, path, errstr(err));
753 goto err_out;
754 }
755 }
756
757 seg = find_vma_seg(vma_segs, vma_seg_cnt, usdt_rel_ip);
758 if (!seg) {
759 err = -ESRCH;
760 pr_warn("usdt: failed to find shared lib memory segment for '%s:%s' in '%s' at relative IP 0x%lx\n",
761 usdt_provider, usdt_name, path, usdt_rel_ip);
762 goto err_out;
763 }
764
765 usdt_abs_ip = seg->start - seg->offset + usdt_rel_ip;
766 }
767
768 pr_debug("usdt: probe for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved abs_ip 0x%lx rel_ip 0x%lx) args '%s' in segment [0x%lx, 0x%lx) at offset 0x%lx\n",
769 usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ", path,
770 note.loc_addr, note.base_addr, usdt_abs_ip, usdt_rel_ip, note.args,
771 seg ? seg->start : 0, seg ? seg->end : 0, seg ? seg->offset : 0);
772
773 /* Adjust semaphore address to be a file offset */
774 if (note.sema_addr) {
775 if (!man->has_sema_refcnt) {
776 pr_warn("usdt: kernel doesn't support USDT semaphore refcounting for '%s:%s' in '%s'\n",
777 usdt_provider, usdt_name, path);
778 err = -ENOTSUP;
779 goto err_out;
780 }
781
782 seg = find_elf_seg(segs, seg_cnt, note.sema_addr);
783 if (!seg) {
784 err = -ESRCH;
785 pr_warn("usdt: failed to find ELF loadable segment with semaphore of '%s:%s' in '%s' at 0x%lx\n",
786 usdt_provider, usdt_name, path, note.sema_addr);
787 goto err_out;
788 }
789 if (seg->is_exec) {
790 err = -ESRCH;
791 pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx] for semaphore of '%s:%s' at 0x%lx is executable\n",
792 path, seg->start, seg->end, usdt_provider, usdt_name,
793 note.sema_addr);
794 goto err_out;
795 }
796
797 usdt_sema_off = note.sema_addr - seg->start + seg->offset;
798
799 pr_debug("usdt: sema for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved 0x%lx) in segment [0x%lx, 0x%lx] at offset 0x%lx\n",
800 usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ",
801 path, note.sema_addr, note.base_addr, usdt_sema_off,
802 seg->start, seg->end, seg->offset);
803 }
804
805 /* Record adjusted addresses and offsets and parse USDT spec */
806 tmp = libbpf_reallocarray(targets, target_cnt + 1, sizeof(*targets));
807 if (!tmp) {
808 err = -ENOMEM;
809 goto err_out;
810 }
811 targets = tmp;
812
813 target = &targets[target_cnt];
814 memset(target, 0, sizeof(*target));
815
816 /*
817 * We have uprobe syscall and usdt with nop,nop5 instructions combo,
818 * so we can place the uprobe directly on nop5 (+1) and get this probe
819 * optimized.
820 */
821 if (man->has_uprobe_syscall && has_nop_combo(elf_fd->fd, usdt_rel_ip)) {
822 usdt_abs_ip++;
823 usdt_rel_ip++;
824 }
825
826 target->abs_ip = usdt_abs_ip;
827 target->rel_ip = usdt_rel_ip;
828 target->sema_off = usdt_sema_off;
829
830 /* notes.args references strings from ELF itself, so they can
831 * be referenced safely until elf_end() call
832 */
833 target->spec_str = note.args;
834
835 err = parse_usdt_spec(&target->spec, ¬e, usdt_cookie);
836 if (err)
837 goto err_out;
838
839 target_cnt++;
840 }
841
842 *out_targets = targets;
843 *out_target_cnt = target_cnt;
844 err = target_cnt;
845
846err_out:
847 free(segs);
848 free(vma_segs);
849 if (err < 0)
850 free(targets);
851 return err;
852}
853
854struct bpf_link_usdt {
855 struct bpf_link link;
856
857 struct usdt_manager *usdt_man;
858
859 size_t spec_cnt;
860 int *spec_ids;
861
862 size_t uprobe_cnt;
863 struct {
864 long abs_ip;
865 struct bpf_link *link;
866 } *uprobes;
867
868 struct bpf_link *multi_link;
869};
870
871static int bpf_link_usdt_detach(struct bpf_link *link)
872{
873 struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link);
874 struct usdt_manager *man = usdt_link->usdt_man;
875 int i;
876
877 bpf_link__destroy(usdt_link->multi_link);
878
879 /* When having multi_link, uprobe_cnt is 0 */
880 for (i = 0; i < usdt_link->uprobe_cnt; i++) {
881 /* detach underlying uprobe link */
882 bpf_link__destroy(usdt_link->uprobes[i].link);
883 /* there is no need to update specs map because it will be
884 * unconditionally overwritten on subsequent USDT attaches,
885 * but if BPF cookies are not used we need to remove entry
886 * from ip_to_spec_id map, otherwise we'll run into false
887 * conflicting IP errors
888 */
889 if (!man->has_bpf_cookie) {
890 /* not much we can do about errors here */
891 (void)bpf_map_delete_elem(bpf_map__fd(man->ip_to_spec_id_map),
892 &usdt_link->uprobes[i].abs_ip);
893 }
894 }
895
896 /* try to return the list of previously used spec IDs to usdt_manager
897 * for future reuse for subsequent USDT attaches
898 */
899 if (!man->free_spec_ids) {
900 /* if there were no free spec IDs yet, just transfer our IDs */
901 man->free_spec_ids = usdt_link->spec_ids;
902 man->free_spec_cnt = usdt_link->spec_cnt;
903 usdt_link->spec_ids = NULL;
904 } else {
905 /* otherwise concat IDs */
906 size_t new_cnt = man->free_spec_cnt + usdt_link->spec_cnt;
907 int *new_free_ids;
908
909 new_free_ids = libbpf_reallocarray(man->free_spec_ids, new_cnt,
910 sizeof(*new_free_ids));
911 /* If we couldn't resize free_spec_ids, we'll just leak
912 * a bunch of free IDs; this is very unlikely to happen and if
913 * system is so exhausted on memory, it's the least of user's
914 * concerns, probably.
915 * So just do our best here to return those IDs to usdt_manager.
916 * Another edge case when we can legitimately get NULL is when
917 * new_cnt is zero, which can happen in some edge cases, so we
918 * need to be careful about that.
919 */
920 if (new_free_ids || new_cnt == 0) {
921 memcpy(new_free_ids + man->free_spec_cnt, usdt_link->spec_ids,
922 usdt_link->spec_cnt * sizeof(*usdt_link->spec_ids));
923 man->free_spec_ids = new_free_ids;
924 man->free_spec_cnt = new_cnt;
925 }
926 }
927
928 return 0;
929}
930
931static void bpf_link_usdt_dealloc(struct bpf_link *link)
932{
933 struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link);
934
935 free(usdt_link->spec_ids);
936 free(usdt_link->uprobes);
937 free(usdt_link);
938}
939
940static size_t specs_hash_fn(long key, void *ctx)
941{
942 return str_hash((char *)key);
943}
944
945static bool specs_equal_fn(long key1, long key2, void *ctx)
946{
947 return strcmp((char *)key1, (char *)key2) == 0;
948}
949
950static int allocate_spec_id(struct usdt_manager *man, struct hashmap *specs_hash,
951 struct bpf_link_usdt *link, struct usdt_target *target,
952 int *spec_id, bool *is_new)
953{
954 long tmp;
955 void *new_ids;
956 int err;
957
958 /* check if we already allocated spec ID for this spec string */
959 if (hashmap__find(specs_hash, target->spec_str, &tmp)) {
960 *spec_id = tmp;
961 *is_new = false;
962 return 0;
963 }
964
965 /* otherwise it's a new ID that needs to be set up in specs map and
966 * returned back to usdt_manager when USDT link is detached
967 */
968 new_ids = libbpf_reallocarray(link->spec_ids, link->spec_cnt + 1, sizeof(*link->spec_ids));
969 if (!new_ids)
970 return -ENOMEM;
971 link->spec_ids = new_ids;
972
973 /* get next free spec ID, giving preference to free list, if not empty */
974 if (man->free_spec_cnt) {
975 *spec_id = man->free_spec_ids[man->free_spec_cnt - 1];
976
977 /* cache spec ID for current spec string for future lookups */
978 err = hashmap__add(specs_hash, target->spec_str, *spec_id);
979 if (err)
980 return err;
981
982 man->free_spec_cnt--;
983 } else {
984 /* don't allocate spec ID bigger than what fits in specs map */
985 if (man->next_free_spec_id >= bpf_map__max_entries(man->specs_map))
986 return -E2BIG;
987
988 *spec_id = man->next_free_spec_id;
989
990 /* cache spec ID for current spec string for future lookups */
991 err = hashmap__add(specs_hash, target->spec_str, *spec_id);
992 if (err)
993 return err;
994
995 man->next_free_spec_id++;
996 }
997
998 /* remember new spec ID in the link for later return back to free list on detach */
999 link->spec_ids[link->spec_cnt] = *spec_id;
1000 link->spec_cnt++;
1001 *is_new = true;
1002 return 0;
1003}
1004
1005struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct bpf_program *prog,
1006 pid_t pid, const char *path,
1007 const char *usdt_provider, const char *usdt_name,
1008 __u64 usdt_cookie)
1009{
1010 unsigned long *offsets = NULL, *ref_ctr_offsets = NULL;
1011 int i, err, spec_map_fd, ip_map_fd;
1012 LIBBPF_OPTS(bpf_uprobe_opts, opts);
1013 struct hashmap *specs_hash = NULL;
1014 struct bpf_link_usdt *link = NULL;
1015 struct usdt_target *targets = NULL;
1016 __u64 *cookies = NULL;
1017 struct elf_fd elf_fd;
1018 size_t target_cnt;
1019
1020 spec_map_fd = bpf_map__fd(man->specs_map);
1021 ip_map_fd = bpf_map__fd(man->ip_to_spec_id_map);
1022
1023 err = elf_open(path, &elf_fd);
1024 if (err)
1025 return libbpf_err_ptr(err);
1026
1027 err = sanity_check_usdt_elf(elf_fd.elf, path);
1028 if (err)
1029 goto err_out;
1030
1031 /* normalize PID filter */
1032 if (pid < 0)
1033 pid = -1;
1034 else if (pid == 0)
1035 pid = getpid();
1036
1037 /* discover USDT in given binary, optionally limiting
1038 * activations to a given PID, if pid > 0
1039 */
1040 err = collect_usdt_targets(man, &elf_fd, path, pid, usdt_provider, usdt_name,
1041 usdt_cookie, &targets, &target_cnt);
1042 if (err <= 0) {
1043 err = (err == 0) ? -ENOENT : err;
1044 goto err_out;
1045 }
1046
1047 specs_hash = hashmap__new(specs_hash_fn, specs_equal_fn, NULL);
1048 if (IS_ERR(specs_hash)) {
1049 err = PTR_ERR(specs_hash);
1050 goto err_out;
1051 }
1052
1053 link = calloc(1, sizeof(*link));
1054 if (!link) {
1055 err = -ENOMEM;
1056 goto err_out;
1057 }
1058
1059 link->usdt_man = man;
1060 link->link.detach = &bpf_link_usdt_detach;
1061 link->link.dealloc = &bpf_link_usdt_dealloc;
1062
1063 if (man->has_uprobe_multi) {
1064 offsets = calloc(target_cnt, sizeof(*offsets));
1065 cookies = calloc(target_cnt, sizeof(*cookies));
1066 ref_ctr_offsets = calloc(target_cnt, sizeof(*ref_ctr_offsets));
1067
1068 if (!offsets || !ref_ctr_offsets || !cookies) {
1069 err = -ENOMEM;
1070 goto err_out;
1071 }
1072 } else {
1073 link->uprobes = calloc(target_cnt, sizeof(*link->uprobes));
1074 if (!link->uprobes) {
1075 err = -ENOMEM;
1076 goto err_out;
1077 }
1078 }
1079
1080 for (i = 0; i < target_cnt; i++) {
1081 struct usdt_target *target = &targets[i];
1082 struct bpf_link *uprobe_link;
1083 bool is_new;
1084 int spec_id;
1085
1086 /* Spec ID can be either reused or newly allocated. If it is
1087 * newly allocated, we'll need to fill out spec map, otherwise
1088 * entire spec should be valid and can be just used by a new
1089 * uprobe. We reuse spec when USDT arg spec is identical. We
1090 * also never share specs between two different USDT
1091 * attachments ("links"), so all the reused specs already
1092 * share USDT cookie value implicitly.
1093 */
1094 err = allocate_spec_id(man, specs_hash, link, target, &spec_id, &is_new);
1095 if (err)
1096 goto err_out;
1097
1098 if (is_new && bpf_map_update_elem(spec_map_fd, &spec_id, &target->spec, BPF_ANY)) {
1099 err = -errno;
1100 pr_warn("usdt: failed to set USDT spec #%d for '%s:%s' in '%s': %s\n",
1101 spec_id, usdt_provider, usdt_name, path, errstr(err));
1102 goto err_out;
1103 }
1104 if (!man->has_bpf_cookie &&
1105 bpf_map_update_elem(ip_map_fd, &target->abs_ip, &spec_id, BPF_NOEXIST)) {
1106 err = -errno;
1107 if (err == -EEXIST) {
1108 pr_warn("usdt: IP collision detected for spec #%d for '%s:%s' in '%s'\n",
1109 spec_id, usdt_provider, usdt_name, path);
1110 } else {
1111 pr_warn("usdt: failed to map IP 0x%lx to spec #%d for '%s:%s' in '%s': %s\n",
1112 target->abs_ip, spec_id, usdt_provider, usdt_name,
1113 path, errstr(err));
1114 }
1115 goto err_out;
1116 }
1117
1118 if (man->has_uprobe_multi) {
1119 offsets[i] = target->rel_ip;
1120 ref_ctr_offsets[i] = target->sema_off;
1121 cookies[i] = spec_id;
1122 } else {
1123 opts.ref_ctr_offset = target->sema_off;
1124 opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0;
1125 uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path,
1126 target->rel_ip, &opts);
1127 err = libbpf_get_error(uprobe_link);
1128 if (err) {
1129 pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %s\n",
1130 i, usdt_provider, usdt_name, path, errstr(err));
1131 goto err_out;
1132 }
1133
1134 link->uprobes[i].link = uprobe_link;
1135 link->uprobes[i].abs_ip = target->abs_ip;
1136 link->uprobe_cnt++;
1137 }
1138 }
1139
1140 if (man->has_uprobe_multi) {
1141 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts_multi,
1142 .ref_ctr_offsets = ref_ctr_offsets,
1143 .offsets = offsets,
1144 .cookies = cookies,
1145 .cnt = target_cnt,
1146 );
1147
1148 link->multi_link = bpf_program__attach_uprobe_multi(prog, pid, path,
1149 NULL, &opts_multi);
1150 if (!link->multi_link) {
1151 err = -errno;
1152 pr_warn("usdt: failed to attach uprobe multi for '%s:%s' in '%s': %s\n",
1153 usdt_provider, usdt_name, path, errstr(err));
1154 goto err_out;
1155 }
1156
1157 free(offsets);
1158 free(ref_ctr_offsets);
1159 free(cookies);
1160 }
1161
1162 free(targets);
1163 hashmap__free(specs_hash);
1164 elf_close(&elf_fd);
1165 return &link->link;
1166
1167err_out:
1168 free(offsets);
1169 free(ref_ctr_offsets);
1170 free(cookies);
1171
1172 if (link)
1173 bpf_link__destroy(&link->link);
1174 free(targets);
1175 hashmap__free(specs_hash);
1176 elf_close(&elf_fd);
1177 return libbpf_err_ptr(err);
1178}
1179
1180/* Parse out USDT ELF note from '.note.stapsdt' section.
1181 * Logic inspired by perf's code.
1182 */
1183static int parse_usdt_note(GElf_Nhdr *nhdr, const char *data, size_t name_off, size_t desc_off,
1184 struct usdt_note *note)
1185{
1186 const char *provider, *name, *args;
1187 long addrs[3];
1188 size_t len;
1189
1190 /* sanity check USDT note name and type first */
1191 if (strncmp(data + name_off, USDT_NOTE_NAME, nhdr->n_namesz) != 0)
1192 return -EINVAL;
1193 if (nhdr->n_type != USDT_NOTE_TYPE)
1194 return -EINVAL;
1195
1196 /* sanity check USDT note contents ("description" in ELF terminology) */
1197 len = nhdr->n_descsz;
1198 data = data + desc_off;
1199
1200 /* +3 is the very minimum required to store three empty strings */
1201 if (len < sizeof(addrs) + 3)
1202 return -EINVAL;
1203
1204 /* get location, base, and semaphore addrs */
1205 memcpy(&addrs, data, sizeof(addrs));
1206
1207 /* parse string fields: provider, name, args */
1208 provider = data + sizeof(addrs);
1209
1210 name = (const char *)memchr(provider, '\0', data + len - provider);
1211 if (!name) /* non-zero-terminated provider */
1212 return -EINVAL;
1213 name++;
1214 if (name >= data + len || *name == '\0') /* missing or empty name */
1215 return -EINVAL;
1216
1217 args = memchr(name, '\0', data + len - name);
1218 if (!args) /* non-zero-terminated name */
1219 return -EINVAL;
1220 ++args;
1221 if (args >= data + len) /* missing arguments spec */
1222 return -EINVAL;
1223
1224 note->provider = provider;
1225 note->name = name;
1226 if (*args == '\0' || *args == ':')
1227 note->args = "";
1228 else
1229 note->args = args;
1230 note->loc_addr = addrs[0];
1231 note->base_addr = addrs[1];
1232 note->sema_addr = addrs[2];
1233
1234 return 0;
1235}
1236
1237static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz);
1238
1239static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie)
1240{
1241 struct usdt_arg_spec *arg;
1242 const char *s;
1243 int arg_sz, len;
1244
1245 spec->usdt_cookie = usdt_cookie;
1246 spec->arg_cnt = 0;
1247
1248 s = note->args;
1249 while (s[0]) {
1250 if (spec->arg_cnt >= USDT_MAX_ARG_CNT) {
1251 pr_warn("usdt: too many USDT arguments (> %d) for '%s:%s' with args spec '%s'\n",
1252 USDT_MAX_ARG_CNT, note->provider, note->name, note->args);
1253 return -E2BIG;
1254 }
1255
1256 arg = &spec->args[spec->arg_cnt];
1257 len = parse_usdt_arg(s, spec->arg_cnt, arg, &arg_sz);
1258 if (len < 0)
1259 return len;
1260
1261 arg->arg_signed = arg_sz < 0;
1262 if (arg_sz < 0)
1263 arg_sz = -arg_sz;
1264
1265 switch (arg_sz) {
1266 case 1: case 2: case 4: case 8:
1267 arg->arg_bitshift = 64 - arg_sz * 8;
1268 break;
1269 default:
1270 pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
1271 spec->arg_cnt, s, arg_sz);
1272 return -EINVAL;
1273 }
1274
1275 s += len;
1276 spec->arg_cnt++;
1277 }
1278
1279 return 0;
1280}
1281
1282/* Architecture-specific logic for parsing USDT argument location specs */
1283
1284#if defined(__x86_64__) || defined(__i386__)
1285
1286static int calc_pt_regs_off(const char *reg_name)
1287{
1288 static struct {
1289 const char *names[4];
1290 size_t pt_regs_off;
1291 } reg_map[] = {
1292#ifdef __x86_64__
1293#define reg_off(reg64, reg32) offsetof(struct pt_regs, reg64)
1294#else
1295#define reg_off(reg64, reg32) offsetof(struct pt_regs, reg32)
1296#endif
1297 { {"rip", "eip", "", ""}, reg_off(rip, eip) },
1298 { {"rax", "eax", "ax", "al"}, reg_off(rax, eax) },
1299 { {"rbx", "ebx", "bx", "bl"}, reg_off(rbx, ebx) },
1300 { {"rcx", "ecx", "cx", "cl"}, reg_off(rcx, ecx) },
1301 { {"rdx", "edx", "dx", "dl"}, reg_off(rdx, edx) },
1302 { {"rsi", "esi", "si", "sil"}, reg_off(rsi, esi) },
1303 { {"rdi", "edi", "di", "dil"}, reg_off(rdi, edi) },
1304 { {"rbp", "ebp", "bp", "bpl"}, reg_off(rbp, ebp) },
1305 { {"rsp", "esp", "sp", "spl"}, reg_off(rsp, esp) },
1306#undef reg_off
1307#ifdef __x86_64__
1308 { {"r8", "r8d", "r8w", "r8b"}, offsetof(struct pt_regs, r8) },
1309 { {"r9", "r9d", "r9w", "r9b"}, offsetof(struct pt_regs, r9) },
1310 { {"r10", "r10d", "r10w", "r10b"}, offsetof(struct pt_regs, r10) },
1311 { {"r11", "r11d", "r11w", "r11b"}, offsetof(struct pt_regs, r11) },
1312 { {"r12", "r12d", "r12w", "r12b"}, offsetof(struct pt_regs, r12) },
1313 { {"r13", "r13d", "r13w", "r13b"}, offsetof(struct pt_regs, r13) },
1314 { {"r14", "r14d", "r14w", "r14b"}, offsetof(struct pt_regs, r14) },
1315 { {"r15", "r15d", "r15w", "r15b"}, offsetof(struct pt_regs, r15) },
1316#endif
1317 };
1318 int i, j;
1319
1320 for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
1321 for (j = 0; j < ARRAY_SIZE(reg_map[i].names); j++) {
1322 if (strcmp(reg_name, reg_map[i].names[j]) == 0)
1323 return reg_map[i].pt_regs_off;
1324 }
1325 }
1326
1327 pr_warn("usdt: unrecognized register '%s'\n", reg_name);
1328 return -ENOENT;
1329}
1330
1331static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
1332{
1333 char reg_name[16] = {0}, idx_reg_name[16] = {0};
1334 int len, reg_off, idx_reg_off, scale = 1;
1335 long off = 0;
1336
1337 if (sscanf(arg_str, " %d @ %ld ( %%%15[^,] , %%%15[^,] , %d ) %n",
1338 arg_sz, &off, reg_name, idx_reg_name, &scale, &len) == 5 ||
1339 sscanf(arg_str, " %d @ ( %%%15[^,] , %%%15[^,] , %d ) %n",
1340 arg_sz, reg_name, idx_reg_name, &scale, &len) == 4 ||
1341 sscanf(arg_str, " %d @ %ld ( %%%15[^,] , %%%15[^)] ) %n",
1342 arg_sz, &off, reg_name, idx_reg_name, &len) == 4 ||
1343 sscanf(arg_str, " %d @ ( %%%15[^,] , %%%15[^)] ) %n",
1344 arg_sz, reg_name, idx_reg_name, &len) == 3
1345 ) {
1346 /*
1347 * Scale Index Base case:
1348 * 1@-96(%rbp,%rax,8)
1349 * 1@(%rbp,%rax,8)
1350 * 1@-96(%rbp,%rax)
1351 * 1@(%rbp,%rax)
1352 */
1353 arg->arg_type = USDT_ARG_SIB;
1354 arg->val_off = off;
1355
1356 reg_off = calc_pt_regs_off(reg_name);
1357 if (reg_off < 0)
1358 return reg_off;
1359 arg->reg_off = reg_off;
1360
1361 idx_reg_off = calc_pt_regs_off(idx_reg_name);
1362 if (idx_reg_off < 0)
1363 return idx_reg_off;
1364 arg->idx_reg_off = idx_reg_off;
1365
1366 /* validate scale factor and set fields directly */
1367 switch (scale) {
1368 case 1: arg->scale_bitshift = 0; break;
1369 case 2: arg->scale_bitshift = 1; break;
1370 case 4: arg->scale_bitshift = 2; break;
1371 case 8: arg->scale_bitshift = 3; break;
1372 default:
1373 pr_warn("usdt: invalid SIB scale %d, expected 1, 2, 4, 8\n", scale);
1374 return -EINVAL;
1375 }
1376 } else if (sscanf(arg_str, " %d @ %ld ( %%%15[^)] ) %n",
1377 arg_sz, &off, reg_name, &len) == 3) {
1378 /* Memory dereference case, e.g., -4@-20(%rbp) */
1379 arg->arg_type = USDT_ARG_REG_DEREF;
1380 arg->val_off = off;
1381 reg_off = calc_pt_regs_off(reg_name);
1382 if (reg_off < 0)
1383 return reg_off;
1384 arg->reg_off = reg_off;
1385 } else if (sscanf(arg_str, " %d @ ( %%%15[^)] ) %n", arg_sz, reg_name, &len) == 2) {
1386 /* Memory dereference case without offset, e.g., 8@(%rsp) */
1387 arg->arg_type = USDT_ARG_REG_DEREF;
1388 arg->val_off = 0;
1389 reg_off = calc_pt_regs_off(reg_name);
1390 if (reg_off < 0)
1391 return reg_off;
1392 arg->reg_off = reg_off;
1393 } else if (sscanf(arg_str, " %d @ %%%15s %n", arg_sz, reg_name, &len) == 2) {
1394 /* Register read case, e.g., -4@%eax */
1395 arg->arg_type = USDT_ARG_REG;
1396 /* register read has no memory offset */
1397 arg->val_off = 0;
1398
1399 reg_off = calc_pt_regs_off(reg_name);
1400 if (reg_off < 0)
1401 return reg_off;
1402 arg->reg_off = reg_off;
1403 } else if (sscanf(arg_str, " %d @ $%ld %n", arg_sz, &off, &len) == 2) {
1404 /* Constant value case, e.g., 4@$71 */
1405 arg->arg_type = USDT_ARG_CONST;
1406 arg->val_off = off;
1407 arg->reg_off = 0;
1408 } else {
1409 pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1410 return -EINVAL;
1411 }
1412
1413 return len;
1414}
1415
1416#elif defined(__s390x__)
1417
1418static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
1419{
1420 unsigned int reg;
1421 int len;
1422 long off;
1423
1424 if (sscanf(arg_str, " %d @ %ld ( %%r%u ) %n", arg_sz, &off, ®, &len) == 3) {
1425 /* Memory dereference case, e.g., -2@-28(%r15) */
1426 arg->arg_type = USDT_ARG_REG_DEREF;
1427 arg->val_off = off;
1428 if (reg > 15) {
1429 pr_warn("usdt: unrecognized register '%%r%u'\n", reg);
1430 return -EINVAL;
1431 }
1432 arg->reg_off = offsetof(user_pt_regs, gprs[reg]);
1433 } else if (sscanf(arg_str, " %d @ %%r%u %n", arg_sz, ®, &len) == 2) {
1434 /* Register read case, e.g., -8@%r0 */
1435 arg->arg_type = USDT_ARG_REG;
1436 arg->val_off = 0;
1437 if (reg > 15) {
1438 pr_warn("usdt: unrecognized register '%%r%u'\n", reg);
1439 return -EINVAL;
1440 }
1441 arg->reg_off = offsetof(user_pt_regs, gprs[reg]);
1442 } else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) {
1443 /* Constant value case, e.g., 4@71 */
1444 arg->arg_type = USDT_ARG_CONST;
1445 arg->val_off = off;
1446 arg->reg_off = 0;
1447 } else {
1448 pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1449 return -EINVAL;
1450 }
1451
1452 return len;
1453}
1454
1455#elif defined(__aarch64__)
1456
1457static int calc_pt_regs_off(const char *reg_name)
1458{
1459 int reg_num;
1460
1461 if (sscanf(reg_name, "x%d", ®_num) == 1) {
1462 if (reg_num >= 0 && reg_num < 31)
1463 return offsetof(struct user_pt_regs, regs[reg_num]);
1464 } else if (strcmp(reg_name, "sp") == 0) {
1465 return offsetof(struct user_pt_regs, sp);
1466 }
1467 pr_warn("usdt: unrecognized register '%s'\n", reg_name);
1468 return -ENOENT;
1469}
1470
1471static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
1472{
1473 char reg_name[16];
1474 int len, reg_off;
1475 long off;
1476
1477 if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] , %ld ] %n", arg_sz, reg_name, &off, &len) == 3) {
1478 /* Memory dereference case, e.g., -4@[sp, 96] */
1479 arg->arg_type = USDT_ARG_REG_DEREF;
1480 arg->val_off = off;
1481 reg_off = calc_pt_regs_off(reg_name);
1482 if (reg_off < 0)
1483 return reg_off;
1484 arg->reg_off = reg_off;
1485 } else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", arg_sz, reg_name, &len) == 2) {
1486 /* Memory dereference case, e.g., -4@[sp] */
1487 arg->arg_type = USDT_ARG_REG_DEREF;
1488 arg->val_off = 0;
1489 reg_off = calc_pt_regs_off(reg_name);
1490 if (reg_off < 0)
1491 return reg_off;
1492 arg->reg_off = reg_off;
1493 } else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) {
1494 /* Constant value case, e.g., 4@5 */
1495 arg->arg_type = USDT_ARG_CONST;
1496 arg->val_off = off;
1497 arg->reg_off = 0;
1498 } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) {
1499 /* Register read case, e.g., -8@x4 */
1500 arg->arg_type = USDT_ARG_REG;
1501 arg->val_off = 0;
1502 reg_off = calc_pt_regs_off(reg_name);
1503 if (reg_off < 0)
1504 return reg_off;
1505 arg->reg_off = reg_off;
1506 } else {
1507 pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1508 return -EINVAL;
1509 }
1510
1511 return len;
1512}
1513
1514#elif defined(__riscv)
1515
1516static int calc_pt_regs_off(const char *reg_name)
1517{
1518 static struct {
1519 const char *name;
1520 size_t pt_regs_off;
1521 } reg_map[] = {
1522 { "ra", offsetof(struct user_regs_struct, ra) },
1523 { "sp", offsetof(struct user_regs_struct, sp) },
1524 { "gp", offsetof(struct user_regs_struct, gp) },
1525 { "tp", offsetof(struct user_regs_struct, tp) },
1526 { "a0", offsetof(struct user_regs_struct, a0) },
1527 { "a1", offsetof(struct user_regs_struct, a1) },
1528 { "a2", offsetof(struct user_regs_struct, a2) },
1529 { "a3", offsetof(struct user_regs_struct, a3) },
1530 { "a4", offsetof(struct user_regs_struct, a4) },
1531 { "a5", offsetof(struct user_regs_struct, a5) },
1532 { "a6", offsetof(struct user_regs_struct, a6) },
1533 { "a7", offsetof(struct user_regs_struct, a7) },
1534 { "s0", offsetof(struct user_regs_struct, s0) },
1535 { "s1", offsetof(struct user_regs_struct, s1) },
1536 { "s2", offsetof(struct user_regs_struct, s2) },
1537 { "s3", offsetof(struct user_regs_struct, s3) },
1538 { "s4", offsetof(struct user_regs_struct, s4) },
1539 { "s5", offsetof(struct user_regs_struct, s5) },
1540 { "s6", offsetof(struct user_regs_struct, s6) },
1541 { "s7", offsetof(struct user_regs_struct, s7) },
1542 { "s8", offsetof(struct user_regs_struct, rv_s8) },
1543 { "s9", offsetof(struct user_regs_struct, s9) },
1544 { "s10", offsetof(struct user_regs_struct, s10) },
1545 { "s11", offsetof(struct user_regs_struct, s11) },
1546 { "t0", offsetof(struct user_regs_struct, t0) },
1547 { "t1", offsetof(struct user_regs_struct, t1) },
1548 { "t2", offsetof(struct user_regs_struct, t2) },
1549 { "t3", offsetof(struct user_regs_struct, t3) },
1550 { "t4", offsetof(struct user_regs_struct, t4) },
1551 { "t5", offsetof(struct user_regs_struct, t5) },
1552 { "t6", offsetof(struct user_regs_struct, t6) },
1553 };
1554 int i;
1555
1556 for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
1557 if (strcmp(reg_name, reg_map[i].name) == 0)
1558 return reg_map[i].pt_regs_off;
1559 }
1560
1561 pr_warn("usdt: unrecognized register '%s'\n", reg_name);
1562 return -ENOENT;
1563}
1564
1565static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
1566{
1567 char reg_name[16];
1568 int len, reg_off;
1569 long off;
1570
1571 if (sscanf(arg_str, " %d @ %ld ( %15[a-z0-9] ) %n", arg_sz, &off, reg_name, &len) == 3) {
1572 /* Memory dereference case, e.g., -8@-88(s0) */
1573 arg->arg_type = USDT_ARG_REG_DEREF;
1574 arg->val_off = off;
1575 reg_off = calc_pt_regs_off(reg_name);
1576 if (reg_off < 0)
1577 return reg_off;
1578 arg->reg_off = reg_off;
1579 } else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) {
1580 /* Constant value case, e.g., 4@5 */
1581 arg->arg_type = USDT_ARG_CONST;
1582 arg->val_off = off;
1583 arg->reg_off = 0;
1584 } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) {
1585 /* Register read case, e.g., -8@a1 */
1586 arg->arg_type = USDT_ARG_REG;
1587 arg->val_off = 0;
1588 reg_off = calc_pt_regs_off(reg_name);
1589 if (reg_off < 0)
1590 return reg_off;
1591 arg->reg_off = reg_off;
1592 } else {
1593 pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1594 return -EINVAL;
1595 }
1596
1597 return len;
1598}
1599
1600#elif defined(__arm__)
1601
1602static int calc_pt_regs_off(const char *reg_name)
1603{
1604 static struct {
1605 const char *name;
1606 size_t pt_regs_off;
1607 } reg_map[] = {
1608 { "r0", offsetof(struct pt_regs, uregs[0]) },
1609 { "r1", offsetof(struct pt_regs, uregs[1]) },
1610 { "r2", offsetof(struct pt_regs, uregs[2]) },
1611 { "r3", offsetof(struct pt_regs, uregs[3]) },
1612 { "r4", offsetof(struct pt_regs, uregs[4]) },
1613 { "r5", offsetof(struct pt_regs, uregs[5]) },
1614 { "r6", offsetof(struct pt_regs, uregs[6]) },
1615 { "r7", offsetof(struct pt_regs, uregs[7]) },
1616 { "r8", offsetof(struct pt_regs, uregs[8]) },
1617 { "r9", offsetof(struct pt_regs, uregs[9]) },
1618 { "r10", offsetof(struct pt_regs, uregs[10]) },
1619 { "fp", offsetof(struct pt_regs, uregs[11]) },
1620 { "ip", offsetof(struct pt_regs, uregs[12]) },
1621 { "sp", offsetof(struct pt_regs, uregs[13]) },
1622 { "lr", offsetof(struct pt_regs, uregs[14]) },
1623 { "pc", offsetof(struct pt_regs, uregs[15]) },
1624 };
1625 int i;
1626
1627 for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
1628 if (strcmp(reg_name, reg_map[i].name) == 0)
1629 return reg_map[i].pt_regs_off;
1630 }
1631
1632 pr_warn("usdt: unrecognized register '%s'\n", reg_name);
1633 return -ENOENT;
1634}
1635
1636static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
1637{
1638 char reg_name[16];
1639 int len, reg_off;
1640 long off;
1641
1642 if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] , #%ld ] %n",
1643 arg_sz, reg_name, &off, &len) == 3) {
1644 /* Memory dereference case, e.g., -4@[fp, #96] */
1645 arg->arg_type = USDT_ARG_REG_DEREF;
1646 arg->val_off = off;
1647 reg_off = calc_pt_regs_off(reg_name);
1648 if (reg_off < 0)
1649 return reg_off;
1650 arg->reg_off = reg_off;
1651 } else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", arg_sz, reg_name, &len) == 2) {
1652 /* Memory dereference case, e.g., -4@[sp] */
1653 arg->arg_type = USDT_ARG_REG_DEREF;
1654 arg->val_off = 0;
1655 reg_off = calc_pt_regs_off(reg_name);
1656 if (reg_off < 0)
1657 return reg_off;
1658 arg->reg_off = reg_off;
1659 } else if (sscanf(arg_str, " %d @ #%ld %n", arg_sz, &off, &len) == 2) {
1660 /* Constant value case, e.g., 4@#5 */
1661 arg->arg_type = USDT_ARG_CONST;
1662 arg->val_off = off;
1663 arg->reg_off = 0;
1664 } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) {
1665 /* Register read case, e.g., -8@r4 */
1666 arg->arg_type = USDT_ARG_REG;
1667 arg->val_off = 0;
1668 reg_off = calc_pt_regs_off(reg_name);
1669 if (reg_off < 0)
1670 return reg_off;
1671 arg->reg_off = reg_off;
1672 } else {
1673 pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1674 return -EINVAL;
1675 }
1676
1677 return len;
1678}
1679
1680#else
1681
1682static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
1683{
1684 pr_warn("usdt: libbpf doesn't support USDTs on current architecture\n");
1685 return -ENOTSUP;
1686}
1687
1688#endif