Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3#include <test_progs.h>
4
5#define _SDT_HAS_SEMAPHORES 1
6#include "../sdt.h"
7
8#include "test_usdt.skel.h"
9#include "test_urandom_usdt.skel.h"
10
11int lets_test_this(int);
12
13static volatile int idx = 2;
14static volatile __u64 bla = 0xFEDCBA9876543210ULL;
15static volatile short nums[] = {-1, -2, -3, -4};
16
17static volatile struct {
18 int x;
19 signed char y;
20} t1 = { 1, -127 };
21
22#define SEC(name) __attribute__((section(name), used))
23
24unsigned short test_usdt0_semaphore SEC(".probes");
25unsigned short test_usdt3_semaphore SEC(".probes");
26unsigned short test_usdt12_semaphore SEC(".probes");
27
28static void __always_inline trigger_func(int x) {
29 long y = 42;
30
31 if (test_usdt0_semaphore)
32 STAP_PROBE(test, usdt0);
33 if (test_usdt3_semaphore)
34 STAP_PROBE3(test, usdt3, x, y, &bla);
35 if (test_usdt12_semaphore) {
36 STAP_PROBE12(test, usdt12,
37 x, x + 1, y, x + y, 5,
38 y / 7, bla, &bla, -9, nums[x],
39 nums[idx], t1.y);
40 }
41}
42
43#if defined(__x86_64__) || defined(__i386__)
44/*
45 * SIB (Scale-Index-Base) addressing format: "size@(base_reg, index_reg, scale)"
46 * - 'size' is the size in bytes of the array element, and its sign indicates
47 * whether the type is signed (negative) or unsigned (positive).
48 * - 'base_reg' is the register holding the base address, normally rdx or edx
49 * - 'index_reg' is the register holding the index, normally rax or eax
50 * - 'scale' is the scaling factor (typically 1, 2, 4, or 8), which matches the
51 * size of the element type.
52 *
53 * For example, for an array of 'short' (signed 2-byte elements), the SIB spec would be:
54 * - size: -2 (negative because 'short' is signed)
55 * - scale: 2 (since sizeof(short) == 2)
56 *
57 * The resulting SIB format: "-2@(%%rdx,%%rax,2)" for x86_64, "-2@(%%edx,%%eax,2)" for i386
58 */
59static volatile short array[] = {-1, -2, -3, -4};
60
61#if defined(__x86_64__)
62#define USDT_SIB_ARG_SPEC -2@(%%rdx,%%rax,2)
63#else
64#define USDT_SIB_ARG_SPEC -2@(%%edx,%%eax,2)
65#endif
66
67unsigned short test_usdt_sib_semaphore SEC(".probes");
68
69static void trigger_sib_spec(void)
70{
71 /*
72 * Force SIB addressing with inline assembly.
73 *
74 * You must compile with -std=gnu99 or -std=c99 to use the
75 * STAP_PROBE_ASM macro.
76 *
77 * The STAP_PROBE_ASM macro generates a quoted string that gets
78 * inserted between the surrounding assembly instructions. In this
79 * case, USDT_SIB_ARG_SPEC is embedded directly into the instruction
80 * stream, creating a probe point between the asm statement boundaries.
81 * It works fine with gcc/clang.
82 *
83 * Register constraints:
84 * - "d"(array): Binds the 'array' variable to %rdx or %edx register
85 * - "a"(0): Binds the constant 0 to %rax or %eax register
86 * These ensure that when USDT_SIB_ARG_SPEC references %%rdx(%edx) and
87 * %%rax(%eax), they contain the expected values for SIB addressing.
88 *
89 * The "memory" clobber prevents the compiler from reordering memory
90 * accesses around the probe point, ensuring that the probe behavior
91 * is predictable and consistent.
92 */
93 asm volatile(
94 STAP_PROBE_ASM(test, usdt_sib, USDT_SIB_ARG_SPEC)
95 :
96 : "d"(array), "a"(0)
97 : "memory"
98 );
99}
100#endif
101
102static void subtest_basic_usdt(bool optimized)
103{
104 LIBBPF_OPTS(bpf_usdt_opts, opts);
105 struct test_usdt *skel;
106 struct test_usdt__bss *bss;
107 int err, i, called;
108 const __u64 expected_cookie = 0xcafedeadbeeffeed;
109
110#define TRIGGER(x) ({ \
111 trigger_func(x); \
112 if (optimized) \
113 trigger_func(x); \
114 optimized ? 2 : 1; \
115 })
116
117 skel = test_usdt__open_and_load();
118 if (!ASSERT_OK_PTR(skel, "skel_open"))
119 return;
120
121 bss = skel->bss;
122 bss->my_pid = getpid();
123
124 err = test_usdt__attach(skel);
125 if (!ASSERT_OK(err, "skel_attach"))
126 goto cleanup;
127
128 /* usdt0 won't be auto-attached */
129 opts.usdt_cookie = expected_cookie;
130 skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0,
131 0 /*self*/, "/proc/self/exe",
132 "test", "usdt0", &opts);
133 if (!ASSERT_OK_PTR(skel->links.usdt0, "usdt0_link"))
134 goto cleanup;
135
136#if defined(__x86_64__) || defined(__i386__)
137 opts.usdt_cookie = expected_cookie;
138 skel->links.usdt_sib = bpf_program__attach_usdt(skel->progs.usdt_sib,
139 0 /*self*/, "/proc/self/exe",
140 "test", "usdt_sib", &opts);
141 if (!ASSERT_OK_PTR(skel->links.usdt_sib, "usdt_sib_link"))
142 goto cleanup;
143#endif
144
145 called = TRIGGER(1);
146
147 ASSERT_EQ(bss->usdt0_called, called, "usdt0_called");
148 ASSERT_EQ(bss->usdt3_called, called, "usdt3_called");
149 ASSERT_EQ(bss->usdt12_called, called, "usdt12_called");
150
151 ASSERT_EQ(bss->usdt0_cookie, expected_cookie, "usdt0_cookie");
152 ASSERT_EQ(bss->usdt0_arg_cnt, 0, "usdt0_arg_cnt");
153 ASSERT_EQ(bss->usdt0_arg_ret, -ENOENT, "usdt0_arg_ret");
154 ASSERT_EQ(bss->usdt0_arg_size, -ENOENT, "usdt0_arg_size");
155
156 /* auto-attached usdt3 gets default zero cookie value */
157 ASSERT_EQ(bss->usdt3_cookie, 0, "usdt3_cookie");
158 ASSERT_EQ(bss->usdt3_arg_cnt, 3, "usdt3_arg_cnt");
159
160 ASSERT_EQ(bss->usdt3_arg_rets[0], 0, "usdt3_arg1_ret");
161 ASSERT_EQ(bss->usdt3_arg_rets[1], 0, "usdt3_arg2_ret");
162 ASSERT_EQ(bss->usdt3_arg_rets[2], 0, "usdt3_arg3_ret");
163 ASSERT_EQ(bss->usdt3_args[0], 1, "usdt3_arg1");
164 ASSERT_EQ(bss->usdt3_args[1], 42, "usdt3_arg2");
165 ASSERT_EQ(bss->usdt3_args[2], (uintptr_t)&bla, "usdt3_arg3");
166 ASSERT_EQ(bss->usdt3_arg_sizes[0], 4, "usdt3_arg1_size");
167 ASSERT_EQ(bss->usdt3_arg_sizes[1], 8, "usdt3_arg2_size");
168 ASSERT_EQ(bss->usdt3_arg_sizes[2], 8, "usdt3_arg3_size");
169
170 /* auto-attached usdt12 gets default zero cookie value */
171 ASSERT_EQ(bss->usdt12_cookie, 0, "usdt12_cookie");
172 ASSERT_EQ(bss->usdt12_arg_cnt, 12, "usdt12_arg_cnt");
173
174 ASSERT_EQ(bss->usdt12_args[0], 1, "usdt12_arg1");
175 ASSERT_EQ(bss->usdt12_args[1], 1 + 1, "usdt12_arg2");
176 ASSERT_EQ(bss->usdt12_args[2], 42, "usdt12_arg3");
177 ASSERT_EQ(bss->usdt12_args[3], 42 + 1, "usdt12_arg4");
178 ASSERT_EQ(bss->usdt12_args[4], 5, "usdt12_arg5");
179 ASSERT_EQ(bss->usdt12_args[5], 42 / 7, "usdt12_arg6");
180 ASSERT_EQ(bss->usdt12_args[6], bla, "usdt12_arg7");
181 ASSERT_EQ(bss->usdt12_args[7], (uintptr_t)&bla, "usdt12_arg8");
182 ASSERT_EQ(bss->usdt12_args[8], -9, "usdt12_arg9");
183 ASSERT_EQ(bss->usdt12_args[9], nums[1], "usdt12_arg10");
184 ASSERT_EQ(bss->usdt12_args[10], nums[idx], "usdt12_arg11");
185 ASSERT_EQ(bss->usdt12_args[11], t1.y, "usdt12_arg12");
186
187 int usdt12_expected_arg_sizes[12] = { 4, 4, 8, 8, 4, 8, 8, 8, 4, 2, 2, 1 };
188
189 for (i = 0; i < 12; i++)
190 ASSERT_EQ(bss->usdt12_arg_sizes[i], usdt12_expected_arg_sizes[i], "usdt12_arg_size");
191
192 /* trigger_func() is marked __always_inline, so USDT invocations will be
193 * inlined in two different places, meaning that each USDT will have
194 * at least 2 different places to be attached to. This verifies that
195 * bpf_program__attach_usdt() handles this properly and attaches to
196 * all possible places of USDT invocation.
197 */
198 called += TRIGGER(2);
199
200 ASSERT_EQ(bss->usdt0_called, called, "usdt0_called");
201 ASSERT_EQ(bss->usdt3_called, called, "usdt3_called");
202 ASSERT_EQ(bss->usdt12_called, called, "usdt12_called");
203
204 /* only check values that depend on trigger_func()'s input value */
205 ASSERT_EQ(bss->usdt3_args[0], 2, "usdt3_arg1");
206
207 ASSERT_EQ(bss->usdt12_args[0], 2, "usdt12_arg1");
208 ASSERT_EQ(bss->usdt12_args[1], 2 + 1, "usdt12_arg2");
209 ASSERT_EQ(bss->usdt12_args[3], 42 + 2, "usdt12_arg4");
210 ASSERT_EQ(bss->usdt12_args[9], nums[2], "usdt12_arg10");
211
212 /* detach and re-attach usdt3 */
213 bpf_link__destroy(skel->links.usdt3);
214
215 opts.usdt_cookie = 0xBADC00C51E;
216 skel->links.usdt3 = bpf_program__attach_usdt(skel->progs.usdt3, -1 /* any pid */,
217 "/proc/self/exe", "test", "usdt3", &opts);
218 if (!ASSERT_OK_PTR(skel->links.usdt3, "usdt3_reattach"))
219 goto cleanup;
220
221 called += TRIGGER(3);
222
223 ASSERT_EQ(bss->usdt3_called, called, "usdt3_called");
224 /* this time usdt3 has custom cookie */
225 ASSERT_EQ(bss->usdt3_cookie, 0xBADC00C51E, "usdt3_cookie");
226 ASSERT_EQ(bss->usdt3_arg_cnt, 3, "usdt3_arg_cnt");
227
228 ASSERT_EQ(bss->usdt3_arg_rets[0], 0, "usdt3_arg1_ret");
229 ASSERT_EQ(bss->usdt3_arg_rets[1], 0, "usdt3_arg2_ret");
230 ASSERT_EQ(bss->usdt3_arg_rets[2], 0, "usdt3_arg3_ret");
231 ASSERT_EQ(bss->usdt3_args[0], 3, "usdt3_arg1");
232 ASSERT_EQ(bss->usdt3_args[1], 42, "usdt3_arg2");
233 ASSERT_EQ(bss->usdt3_args[2], (uintptr_t)&bla, "usdt3_arg3");
234
235#if defined(__x86_64__) || defined(__i386__)
236 trigger_sib_spec();
237 ASSERT_EQ(bss->usdt_sib_called, 1, "usdt_sib_called");
238 ASSERT_EQ(bss->usdt_sib_cookie, expected_cookie, "usdt_sib_cookie");
239 ASSERT_EQ(bss->usdt_sib_arg_cnt, 1, "usdt_sib_arg_cnt");
240 ASSERT_EQ(bss->usdt_sib_arg, nums[0], "usdt_sib_arg");
241 ASSERT_EQ(bss->usdt_sib_arg_ret, 0, "usdt_sib_arg_ret");
242 ASSERT_EQ(bss->usdt_sib_arg_size, sizeof(nums[0]), "usdt_sib_arg_size");
243#endif
244
245cleanup:
246 test_usdt__destroy(skel);
247#undef TRIGGER
248}
249
250#ifdef __x86_64__
251extern void usdt_1(void);
252extern void usdt_2(void);
253
254static unsigned char nop1[1] = { 0x90 };
255static unsigned char nop1_nop5_combo[6] = { 0x90, 0x0f, 0x1f, 0x44, 0x00, 0x00 };
256
257static void *find_instr(void *fn, unsigned char *instr, size_t cnt)
258{
259 int i;
260
261 for (i = 0; i < 10; i++) {
262 if (!memcmp(instr, fn + i, cnt))
263 return fn + i;
264 }
265 return NULL;
266}
267
268static void subtest_optimized_attach(void)
269{
270 struct test_usdt *skel;
271 __u8 *addr_1, *addr_2;
272
273 /* usdt_1 USDT probe has single nop instruction */
274 addr_1 = find_instr(usdt_1, nop1_nop5_combo, 6);
275 if (!ASSERT_NULL(addr_1, "usdt_1_find_nop1_nop5_combo"))
276 return;
277
278 addr_1 = find_instr(usdt_1, nop1, 1);
279 if (!ASSERT_OK_PTR(addr_1, "usdt_1_find_nop1"))
280 return;
281
282 /* usdt_2 USDT probe has nop,nop5 instructions combo */
283 addr_2 = find_instr(usdt_2, nop1_nop5_combo, 6);
284 if (!ASSERT_OK_PTR(addr_2, "usdt_2_find_nop1_nop5_combo"))
285 return;
286
287 skel = test_usdt__open_and_load();
288 if (!ASSERT_OK_PTR(skel, "test_usdt__open_and_load"))
289 return;
290
291 skel->bss->expected_ip = (unsigned long) addr_1;
292
293 /*
294 * Attach program on top of usdt_1 which is single nop probe,
295 * so the probe won't get optimized.
296 */
297 skel->links.usdt_executed = bpf_program__attach_usdt(skel->progs.usdt_executed,
298 0 /*self*/, "/proc/self/exe",
299 "optimized_attach", "usdt_1", NULL);
300 if (!ASSERT_OK_PTR(skel->links.usdt_executed, "bpf_program__attach_usdt"))
301 goto cleanup;
302
303 usdt_1();
304 usdt_1();
305
306 /* int3 is on addr_1 address */
307 ASSERT_EQ(*addr_1, 0xcc, "int3");
308 ASSERT_EQ(skel->bss->executed, 2, "executed");
309
310 bpf_link__destroy(skel->links.usdt_executed);
311
312 /* we expect the nop5 ip */
313 skel->bss->expected_ip = (unsigned long) addr_2 + 1;
314
315 /*
316 * Attach program on top of usdt_2 which is probe defined on top
317 * of nop1,nop5 combo, so the probe gets optimized on top of nop5.
318 */
319 skel->links.usdt_executed = bpf_program__attach_usdt(skel->progs.usdt_executed,
320 0 /*self*/, "/proc/self/exe",
321 "optimized_attach", "usdt_2", NULL);
322 if (!ASSERT_OK_PTR(skel->links.usdt_executed, "bpf_program__attach_usdt"))
323 goto cleanup;
324
325 usdt_2();
326 usdt_2();
327
328 /* nop stays on addr_2 address */
329 ASSERT_EQ(*addr_2, 0x90, "nop");
330
331 /* call is on addr_2 + 1 address */
332 ASSERT_EQ(*(addr_2 + 1), 0xe8, "call");
333 ASSERT_EQ(skel->bss->executed, 4, "executed");
334
335cleanup:
336 test_usdt__destroy(skel);
337}
338#endif
339
340unsigned short test_usdt_100_semaphore SEC(".probes");
341unsigned short test_usdt_300_semaphore SEC(".probes");
342unsigned short test_usdt_400_semaphore SEC(".probes");
343
344#define R10(F, X) F(X+0); F(X+1);F(X+2); F(X+3); F(X+4); \
345 F(X+5); F(X+6); F(X+7); F(X+8); F(X+9);
346#define R100(F, X) R10(F,X+ 0);R10(F,X+10);R10(F,X+20);R10(F,X+30);R10(F,X+40); \
347 R10(F,X+50);R10(F,X+60);R10(F,X+70);R10(F,X+80);R10(F,X+90);
348
349/* carefully control that we get exactly 100 inlines by preventing inlining */
350static void __always_inline f100(int x)
351{
352 STAP_PROBE1(test, usdt_100, x);
353}
354
355__weak void trigger_100_usdts(void)
356{
357 R100(f100, 0);
358}
359
360/* we shouldn't be able to attach to test:usdt2_300 USDT as we don't have as
361 * many slots for specs. It's important that each STAP_PROBE2() invocation
362 * (after untolling) gets different arg spec due to compiler inlining i as
363 * a constant
364 */
365static void __always_inline f300(int x)
366{
367 STAP_PROBE1(test, usdt_300, x);
368}
369
370__weak void trigger_300_usdts(void)
371{
372 R100(f300, 0);
373 R100(f300, 100);
374 R100(f300, 200);
375}
376
377static void __always_inline f400(int x __attribute__((unused)))
378{
379 STAP_PROBE1(test, usdt_400, 400);
380}
381
382/* this time we have 400 different USDT call sites, but they have uniform
383 * argument location, so libbpf's spec string deduplication logic should keep
384 * spec count use very small and so we should be able to attach to all 400
385 * call sites
386 */
387__weak void trigger_400_usdts(void)
388{
389 R100(f400, 0);
390 R100(f400, 100);
391 R100(f400, 200);
392 R100(f400, 300);
393}
394
395static void subtest_multispec_usdt(void)
396{
397 LIBBPF_OPTS(bpf_usdt_opts, opts);
398 struct test_usdt *skel;
399 struct test_usdt__bss *bss;
400 int err, i;
401
402 skel = test_usdt__open_and_load();
403 if (!ASSERT_OK_PTR(skel, "skel_open"))
404 return;
405
406 bss = skel->bss;
407 bss->my_pid = getpid();
408
409 err = test_usdt__attach(skel);
410 if (!ASSERT_OK(err, "skel_attach"))
411 goto cleanup;
412
413 /* usdt_100 is auto-attached and there are 100 inlined call sites,
414 * let's validate that all of them are properly attached to and
415 * handled from BPF side
416 */
417 trigger_100_usdts();
418
419 ASSERT_EQ(bss->usdt_100_called, 100, "usdt_100_called");
420 ASSERT_EQ(bss->usdt_100_sum, 99 * 100 / 2, "usdt_100_sum");
421
422 /* Stress test free spec ID tracking. By default libbpf allows up to
423 * 256 specs to be used, so if we don't return free spec IDs back
424 * after few detachments and re-attachments we should run out of
425 * available spec IDs.
426 */
427 for (i = 0; i < 2; i++) {
428 bpf_link__destroy(skel->links.usdt_100);
429
430 skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1,
431 "/proc/self/exe",
432 "test", "usdt_100", NULL);
433 if (!ASSERT_OK_PTR(skel->links.usdt_100, "usdt_100_reattach"))
434 goto cleanup;
435
436 bss->usdt_100_sum = 0;
437 trigger_100_usdts();
438
439 ASSERT_EQ(bss->usdt_100_called, (i + 1) * 100 + 100, "usdt_100_called");
440 ASSERT_EQ(bss->usdt_100_sum, 99 * 100 / 2, "usdt_100_sum");
441 }
442
443 /* Now let's step it up and try to attach USDT that requires more than
444 * 256 attach points with different specs for each.
445 * Note that we need trigger_300_usdts() only to actually have 300
446 * USDT call sites, we are not going to actually trace them.
447 */
448 trigger_300_usdts();
449
450 bpf_link__destroy(skel->links.usdt_100);
451
452 bss->usdt_100_called = 0;
453 bss->usdt_100_sum = 0;
454
455 /* If built with arm64/clang, there will be much less number of specs
456 * for usdt_300 call sites.
457 */
458#if !defined(__aarch64__) || !defined(__clang__)
459 /* we'll reuse usdt_100 BPF program for usdt_300 test */
460 skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1, "/proc/self/exe",
461 "test", "usdt_300", NULL);
462 err = -errno;
463 if (!ASSERT_ERR_PTR(skel->links.usdt_100, "usdt_300_bad_attach"))
464 goto cleanup;
465 ASSERT_EQ(err, -E2BIG, "usdt_300_attach_err");
466
467 /* let's check that there are no "dangling" BPF programs attached due
468 * to partial success of the above test:usdt_300 attachment
469 */
470 f300(777); /* this is 301st instance of usdt_300 */
471
472 ASSERT_EQ(bss->usdt_100_called, 0, "usdt_301_called");
473 ASSERT_EQ(bss->usdt_100_sum, 0, "usdt_301_sum");
474#endif
475
476 /* This time we have USDT with 400 inlined invocations, but arg specs
477 * should be the same across all sites, so libbpf will only need to
478 * use one spec and thus we'll be able to attach 400 uprobes
479 * successfully.
480 *
481 * Again, we are reusing usdt_100 BPF program.
482 */
483 skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1,
484 "/proc/self/exe",
485 "test", "usdt_400", NULL);
486 if (!ASSERT_OK_PTR(skel->links.usdt_100, "usdt_400_attach"))
487 goto cleanup;
488
489 trigger_400_usdts();
490
491 ASSERT_EQ(bss->usdt_100_called, 400, "usdt_400_called");
492 ASSERT_EQ(bss->usdt_100_sum, 400 * 400, "usdt_400_sum");
493
494cleanup:
495 test_usdt__destroy(skel);
496}
497
498static FILE *urand_spawn(int *pid)
499{
500 FILE *f;
501
502 /* urandom_read's stdout is wired into f */
503 f = popen("./urandom_read 1 report-pid", "r");
504 if (!f)
505 return NULL;
506
507 if (fscanf(f, "%d", pid) != 1) {
508 pclose(f);
509 errno = EINVAL;
510 return NULL;
511 }
512
513 return f;
514}
515
516static int urand_trigger(FILE **urand_pipe)
517{
518 int exit_code;
519
520 /* pclose() waits for child process to exit and returns their exit code */
521 exit_code = pclose(*urand_pipe);
522 *urand_pipe = NULL;
523
524 return exit_code;
525}
526
527static void subtest_urandom_usdt(bool auto_attach)
528{
529 struct test_urandom_usdt *skel;
530 struct test_urandom_usdt__bss *bss;
531 struct bpf_link *l;
532 FILE *urand_pipe = NULL;
533 int err, urand_pid = 0;
534
535 skel = test_urandom_usdt__open_and_load();
536 if (!ASSERT_OK_PTR(skel, "skel_open"))
537 return;
538
539 urand_pipe = urand_spawn(&urand_pid);
540 if (!ASSERT_OK_PTR(urand_pipe, "urand_spawn"))
541 goto cleanup;
542
543 bss = skel->bss;
544 bss->urand_pid = urand_pid;
545
546 if (auto_attach) {
547 err = test_urandom_usdt__attach(skel);
548 if (!ASSERT_OK(err, "skel_auto_attach"))
549 goto cleanup;
550 } else {
551 l = bpf_program__attach_usdt(skel->progs.urand_read_without_sema,
552 urand_pid, "./urandom_read",
553 "urand", "read_without_sema", NULL);
554 if (!ASSERT_OK_PTR(l, "urand_without_sema_attach"))
555 goto cleanup;
556 skel->links.urand_read_without_sema = l;
557
558 l = bpf_program__attach_usdt(skel->progs.urand_read_with_sema,
559 urand_pid, "./urandom_read",
560 "urand", "read_with_sema", NULL);
561 if (!ASSERT_OK_PTR(l, "urand_with_sema_attach"))
562 goto cleanup;
563 skel->links.urand_read_with_sema = l;
564
565 l = bpf_program__attach_usdt(skel->progs.urandlib_read_without_sema,
566 urand_pid, "./liburandom_read.so",
567 "urandlib", "read_without_sema", NULL);
568 if (!ASSERT_OK_PTR(l, "urandlib_without_sema_attach"))
569 goto cleanup;
570 skel->links.urandlib_read_without_sema = l;
571
572 l = bpf_program__attach_usdt(skel->progs.urandlib_read_with_sema,
573 urand_pid, "./liburandom_read.so",
574 "urandlib", "read_with_sema", NULL);
575 if (!ASSERT_OK_PTR(l, "urandlib_with_sema_attach"))
576 goto cleanup;
577 skel->links.urandlib_read_with_sema = l;
578
579 }
580
581 /* trigger urandom_read USDTs */
582 ASSERT_OK(urand_trigger(&urand_pipe), "urand_exit_code");
583
584 ASSERT_EQ(bss->urand_read_without_sema_call_cnt, 1, "urand_wo_sema_cnt");
585 ASSERT_EQ(bss->urand_read_without_sema_buf_sz_sum, 256, "urand_wo_sema_sum");
586
587 ASSERT_EQ(bss->urand_read_with_sema_call_cnt, 1, "urand_w_sema_cnt");
588 ASSERT_EQ(bss->urand_read_with_sema_buf_sz_sum, 256, "urand_w_sema_sum");
589
590 ASSERT_EQ(bss->urandlib_read_without_sema_call_cnt, 1, "urandlib_wo_sema_cnt");
591 ASSERT_EQ(bss->urandlib_read_without_sema_buf_sz_sum, 256, "urandlib_wo_sema_sum");
592
593 ASSERT_EQ(bss->urandlib_read_with_sema_call_cnt, 1, "urandlib_w_sema_cnt");
594 ASSERT_EQ(bss->urandlib_read_with_sema_buf_sz_sum, 256, "urandlib_w_sema_sum");
595
596cleanup:
597 if (urand_pipe)
598 pclose(urand_pipe);
599 test_urandom_usdt__destroy(skel);
600}
601
602void test_usdt(void)
603{
604 if (test__start_subtest("basic"))
605 subtest_basic_usdt(false);
606#ifdef __x86_64__
607 if (test__start_subtest("basic_optimized"))
608 subtest_basic_usdt(true);
609 if (test__start_subtest("optimized_attach"))
610 subtest_optimized_attach();
611#endif
612 if (test__start_subtest("multispec"))
613 subtest_multispec_usdt();
614 if (test__start_subtest("urand_auto_attach"))
615 subtest_urandom_usdt(true /* auto_attach */);
616 if (test__start_subtest("urand_pid_attach"))
617 subtest_urandom_usdt(false /* auto_attach */);
618}