Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3#include "test_attach_kprobe_sleepable.skel.h"
4#include "test_attach_probe_manual.skel.h"
5#include "test_attach_probe.skel.h"
6#include "kprobe_write_ctx.skel.h"
7
8/* this is how USDT semaphore is actually defined, except volatile modifier */
9volatile unsigned short uprobe_ref_ctr __attribute__((unused)) __attribute((section(".probes")));
10
11/* uprobe attach point */
12static noinline void trigger_func(void)
13{
14 asm volatile ("");
15}
16
17/* attach point for byname uprobe */
18static noinline void trigger_func2(void)
19{
20 asm volatile ("");
21}
22
23/* attach point for byname sleepable uprobe */
24static noinline void trigger_func3(void)
25{
26 asm volatile ("");
27}
28
29/* attach point for ref_ctr */
30static noinline void trigger_func4(void)
31{
32 asm volatile ("");
33}
34
35static char test_data[] = "test_data";
36
37/* manual attach kprobe/kretprobe/uprobe/uretprobe testings */
38static void test_attach_probe_manual(enum probe_attach_mode attach_mode)
39{
40 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
41 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
42 struct bpf_link *kprobe_link, *kretprobe_link;
43 struct bpf_link *uprobe_link, *uretprobe_link;
44 struct test_attach_probe_manual *skel;
45 ssize_t uprobe_offset;
46
47 skel = test_attach_probe_manual__open_and_load();
48 if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
49 return;
50
51 uprobe_offset = get_uprobe_offset(&trigger_func);
52 if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
53 goto cleanup;
54
55 /* manual-attach kprobe/kretprobe */
56 kprobe_opts.attach_mode = attach_mode;
57 kprobe_opts.retprobe = false;
58 kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
59 SYS_NANOSLEEP_KPROBE_NAME,
60 &kprobe_opts);
61 if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe"))
62 goto cleanup;
63 skel->links.handle_kprobe = kprobe_link;
64
65 kprobe_opts.retprobe = true;
66 kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
67 SYS_NANOSLEEP_KPROBE_NAME,
68 &kprobe_opts);
69 if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe"))
70 goto cleanup;
71 skel->links.handle_kretprobe = kretprobe_link;
72
73 /* manual-attach uprobe/uretprobe */
74 uprobe_opts.attach_mode = attach_mode;
75 uprobe_opts.ref_ctr_offset = 0;
76 uprobe_opts.retprobe = false;
77 uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe,
78 0 /* self pid */,
79 "/proc/self/exe",
80 uprobe_offset,
81 &uprobe_opts);
82 if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe"))
83 goto cleanup;
84 skel->links.handle_uprobe = uprobe_link;
85
86 uprobe_opts.retprobe = true;
87 uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe,
88 -1 /* any pid */,
89 "/proc/self/exe",
90 uprobe_offset, &uprobe_opts);
91 if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe"))
92 goto cleanup;
93 skel->links.handle_uretprobe = uretprobe_link;
94
95 /* attach uprobe by function name manually */
96 uprobe_opts.func_name = "trigger_func2";
97 uprobe_opts.retprobe = false;
98 uprobe_opts.ref_ctr_offset = 0;
99 skel->links.handle_uprobe_byname =
100 bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname,
101 0 /* this pid */,
102 "/proc/self/exe",
103 0, &uprobe_opts);
104 if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname, "attach_uprobe_byname"))
105 goto cleanup;
106
107 /* trigger & validate kprobe && kretprobe */
108 usleep(1);
109
110 /* trigger & validate uprobe & uretprobe */
111 trigger_func();
112
113 /* trigger & validate uprobe attached by name */
114 trigger_func2();
115
116 ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
117 ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
118 ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res");
119 ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res");
120 ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res");
121
122cleanup:
123 test_attach_probe_manual__destroy(skel);
124}
125
126/* manual attach address-based kprobe/kretprobe testings */
127static void test_attach_kprobe_by_addr(enum probe_attach_mode attach_mode)
128{
129 LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
130 struct test_attach_probe_manual *skel;
131 unsigned long func_addr;
132
133 if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
134 return;
135
136 func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME);
137 if (!ASSERT_NEQ(func_addr, 0UL, "func_addr"))
138 return;
139
140 skel = test_attach_probe_manual__open_and_load();
141 if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
142 return;
143
144 kprobe_opts.attach_mode = attach_mode;
145 kprobe_opts.retprobe = false;
146 kprobe_opts.offset = func_addr;
147 skel->links.handle_kprobe =
148 bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
149 NULL, &kprobe_opts);
150 if (!ASSERT_OK_PTR(skel->links.handle_kprobe, "attach_kprobe_by_addr"))
151 goto cleanup;
152
153 kprobe_opts.retprobe = true;
154 skel->links.handle_kretprobe =
155 bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
156 NULL, &kprobe_opts);
157 if (!ASSERT_OK_PTR(skel->links.handle_kretprobe, "attach_kretprobe_by_addr"))
158 goto cleanup;
159
160 /* trigger & validate kprobe && kretprobe */
161 usleep(1);
162
163 ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
164 ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
165
166cleanup:
167 test_attach_probe_manual__destroy(skel);
168}
169
170/* reject legacy address-based kprobe attach */
171static void test_attach_kprobe_legacy_by_addr_reject(void)
172{
173 LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
174 struct test_attach_probe_manual *skel;
175 unsigned long func_addr;
176
177 if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
178 return;
179
180 func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME);
181 if (!ASSERT_NEQ(func_addr, 0UL, "func_addr"))
182 return;
183
184 skel = test_attach_probe_manual__open_and_load();
185 if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
186 return;
187
188 kprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY;
189 kprobe_opts.offset = func_addr;
190 skel->links.handle_kprobe =
191 bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
192 NULL, &kprobe_opts);
193 ASSERT_ERR_PTR(skel->links.handle_kprobe, "attach_kprobe_legacy_by_addr");
194 ASSERT_EQ(libbpf_get_error(skel->links.handle_kprobe),
195 -EOPNOTSUPP, "attach_kprobe_legacy_by_addr_err");
196
197 test_attach_probe_manual__destroy(skel);
198}
199
200/*
201 * bpf_fentry_shadow_test exists in both vmlinux (net/bpf/test_run.c) and
202 * bpf_testmod (bpf_testmod.c). When bpf_testmod is loaded the symbol is
203 * duplicated. Test that kprobe attachment handles this correctly:
204 * - Unqualified name ("bpf_fentry_shadow_test") attaches to vmlinux.
205 * - MOD:SYM name ("bpf_testmod:bpf_fentry_shadow_test") attaches to module.
206 *
207 * Note: bpf_fentry_shadow_test is not invoked via test_run, so we only
208 * verify that attach and detach succeed without triggering the probe.
209 */
210static void test_attach_probe_dup_sym(enum probe_attach_mode attach_mode)
211{
212 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
213 struct bpf_link *kprobe_link, *kretprobe_link;
214 struct test_attach_probe_manual *skel;
215
216 skel = test_attach_probe_manual__open_and_load();
217 if (!ASSERT_OK_PTR(skel, "skel_dup_sym_open_and_load"))
218 return;
219
220 kprobe_opts.attach_mode = attach_mode;
221
222 /* Unqualified: should attach to vmlinux symbol */
223 kprobe_opts.retprobe = false;
224 kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
225 "bpf_fentry_shadow_test",
226 &kprobe_opts);
227 if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe_vmlinux"))
228 goto cleanup;
229 bpf_link__destroy(kprobe_link);
230
231 kprobe_opts.retprobe = true;
232 kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
233 "bpf_fentry_shadow_test",
234 &kprobe_opts);
235 if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe_vmlinux"))
236 goto cleanup;
237 bpf_link__destroy(kretprobe_link);
238
239 /* MOD:SYM qualified: should attach to module symbol */
240 kprobe_opts.retprobe = false;
241 kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
242 "bpf_testmod:bpf_fentry_shadow_test",
243 &kprobe_opts);
244 if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe_module"))
245 goto cleanup;
246 bpf_link__destroy(kprobe_link);
247
248 kprobe_opts.retprobe = true;
249 kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
250 "bpf_testmod:bpf_fentry_shadow_test",
251 &kprobe_opts);
252 if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe_module"))
253 goto cleanup;
254 bpf_link__destroy(kretprobe_link);
255
256cleanup:
257 test_attach_probe_manual__destroy(skel);
258}
259
260/* attach uprobe/uretprobe long event name testings */
261static void test_attach_uprobe_long_event_name(void)
262{
263 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
264 struct bpf_link *uprobe_link, *uretprobe_link;
265 struct test_attach_probe_manual *skel;
266 ssize_t uprobe_offset;
267 char path[PATH_MAX] = {0};
268
269 skel = test_attach_probe_manual__open_and_load();
270 if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
271 return;
272
273 uprobe_offset = get_uprobe_offset(&trigger_func);
274 if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
275 goto cleanup;
276
277 if (!ASSERT_GT(readlink("/proc/self/exe", path, PATH_MAX - 1), 0, "readlink"))
278 goto cleanup;
279
280 /* manual-attach uprobe/uretprobe */
281 uprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY;
282 uprobe_opts.ref_ctr_offset = 0;
283 uprobe_opts.retprobe = false;
284 uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe,
285 0 /* self pid */,
286 path,
287 uprobe_offset,
288 &uprobe_opts);
289 if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe_long_event_name"))
290 goto cleanup;
291 skel->links.handle_uprobe = uprobe_link;
292
293 uprobe_opts.retprobe = true;
294 uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe,
295 -1 /* any pid */,
296 path,
297 uprobe_offset, &uprobe_opts);
298 if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe_long_event_name"))
299 goto cleanup;
300 skel->links.handle_uretprobe = uretprobe_link;
301
302cleanup:
303 test_attach_probe_manual__destroy(skel);
304}
305
306/* attach kprobe/kretprobe long event name testings */
307static void test_attach_kprobe_long_event_name(void)
308{
309 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
310 struct bpf_link *kprobe_link, *kretprobe_link;
311 struct test_attach_probe_manual *skel;
312
313 skel = test_attach_probe_manual__open_and_load();
314 if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
315 return;
316
317 /* manual-attach kprobe/kretprobe */
318 kprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY;
319 kprobe_opts.retprobe = false;
320 kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
321 "bpf_testmod_looooooooooooooooooooooooooooooong_name",
322 &kprobe_opts);
323 if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe_long_event_name"))
324 goto cleanup;
325 skel->links.handle_kprobe = kprobe_link;
326
327 kprobe_opts.retprobe = true;
328 kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
329 "bpf_testmod_looooooooooooooooooooooooooooooong_name",
330 &kprobe_opts);
331 if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe_long_event_name"))
332 goto cleanup;
333 skel->links.handle_kretprobe = kretprobe_link;
334
335cleanup:
336 test_attach_probe_manual__destroy(skel);
337}
338
339#ifdef __x86_64__
340/* attach kprobe/kretprobe long event name testings */
341static void test_attach_kprobe_write_ctx(void)
342{
343 struct kprobe_write_ctx *skel = NULL;
344 struct bpf_link *link = NULL;
345
346 skel = kprobe_write_ctx__open_and_load();
347 if (!ASSERT_OK_PTR(skel, "kprobe_write_ctx__open_and_load"))
348 return;
349
350 link = bpf_program__attach_kprobe_opts(skel->progs.kprobe_write_ctx,
351 "bpf_fentry_test1", NULL);
352 if (!ASSERT_ERR_PTR(link, "bpf_program__attach_kprobe_opts"))
353 bpf_link__destroy(link);
354
355 kprobe_write_ctx__destroy(skel);
356}
357
358static void test_freplace_kprobe_write_ctx(void)
359{
360 struct bpf_program *prog_kprobe, *prog_ext, *prog_fentry;
361 struct kprobe_write_ctx *skel_kprobe, *skel_ext = NULL;
362 struct bpf_link *link_kprobe = NULL, *link_ext = NULL;
363 int err, prog_fd;
364 LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
365 LIBBPF_OPTS(bpf_test_run_opts, topts);
366
367 skel_kprobe = kprobe_write_ctx__open();
368 if (!ASSERT_OK_PTR(skel_kprobe, "kprobe_write_ctx__open kprobe"))
369 return;
370
371 prog_kprobe = skel_kprobe->progs.kprobe_dummy;
372 bpf_program__set_autoload(prog_kprobe, true);
373
374 prog_fentry = skel_kprobe->progs.fentry;
375 bpf_program__set_autoload(prog_fentry, true);
376
377 err = kprobe_write_ctx__load(skel_kprobe);
378 if (!ASSERT_OK(err, "kprobe_write_ctx__load kprobe"))
379 goto out;
380
381 skel_ext = kprobe_write_ctx__open();
382 if (!ASSERT_OK_PTR(skel_ext, "kprobe_write_ctx__open ext"))
383 goto out;
384
385 prog_ext = skel_ext->progs.freplace_kprobe;
386 bpf_program__set_autoload(prog_ext, true);
387
388 prog_fd = bpf_program__fd(skel_kprobe->progs.kprobe_write_ctx);
389 bpf_program__set_attach_target(prog_ext, prog_fd, "kprobe_write_ctx");
390
391 err = kprobe_write_ctx__load(skel_ext);
392 if (!ASSERT_OK(err, "kprobe_write_ctx__load ext"))
393 goto out;
394
395 prog_fd = bpf_program__fd(prog_kprobe);
396 link_ext = bpf_program__attach_freplace(prog_ext, prog_fd, "kprobe_dummy");
397 ASSERT_ERR_PTR(link_ext, "bpf_program__attach_freplace link");
398 ASSERT_EQ(libbpf_get_error(link_ext), -EINVAL, "bpf_program__attach_freplace error");
399
400 link_kprobe = bpf_program__attach_kprobe_opts(prog_kprobe, "bpf_fentry_test1",
401 &kprobe_opts);
402 if (!ASSERT_OK_PTR(link_kprobe, "bpf_program__attach_kprobe_opts"))
403 goto out;
404
405 err = bpf_prog_test_run_opts(bpf_program__fd(prog_fentry), &topts);
406 ASSERT_OK(err, "bpf_prog_test_run_opts");
407
408out:
409 bpf_link__destroy(link_ext);
410 bpf_link__destroy(link_kprobe);
411 kprobe_write_ctx__destroy(skel_ext);
412 kprobe_write_ctx__destroy(skel_kprobe);
413}
414#else
415static void test_attach_kprobe_write_ctx(void)
416{
417 test__skip();
418}
419
420static void test_freplace_kprobe_write_ctx(void)
421{
422 test__skip();
423}
424#endif
425
426static void test_attach_probe_auto(struct test_attach_probe *skel)
427{
428 struct bpf_link *uprobe_err_link;
429
430 /* auto-attachable kprobe and kretprobe */
431 skel->links.handle_kprobe_auto = bpf_program__attach(skel->progs.handle_kprobe_auto);
432 ASSERT_OK_PTR(skel->links.handle_kprobe_auto, "attach_kprobe_auto");
433
434 skel->links.handle_kretprobe_auto = bpf_program__attach(skel->progs.handle_kretprobe_auto);
435 ASSERT_OK_PTR(skel->links.handle_kretprobe_auto, "attach_kretprobe_auto");
436
437 /* verify auto-attach fails for old-style uprobe definition */
438 uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname);
439 if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP,
440 "auto-attach should fail for old-style name"))
441 return;
442
443 /* verify auto-attach works */
444 skel->links.handle_uretprobe_byname =
445 bpf_program__attach(skel->progs.handle_uretprobe_byname);
446 if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname, "attach_uretprobe_byname"))
447 return;
448
449 /* trigger & validate kprobe && kretprobe */
450 usleep(1);
451
452 /* trigger & validate uprobe attached by name */
453 trigger_func2();
454
455 ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res");
456 ASSERT_EQ(skel->bss->kretprobe2_res, 22, "check_kretprobe_auto_res");
457 ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res");
458}
459
460static void test_uprobe_lib(struct test_attach_probe *skel)
461{
462 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
463 FILE *devnull;
464
465 /* test attach by name for a library function, using the library
466 * as the binary argument. libc.so.6 will be resolved via dlopen()/dlinfo().
467 */
468 uprobe_opts.func_name = "fopen";
469 uprobe_opts.retprobe = false;
470 skel->links.handle_uprobe_byname2 =
471 bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname2,
472 0 /* this pid */,
473 "libc.so.6",
474 0, &uprobe_opts);
475 if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname2, "attach_uprobe_byname2"))
476 return;
477
478 uprobe_opts.func_name = "fclose";
479 uprobe_opts.retprobe = true;
480 skel->links.handle_uretprobe_byname2 =
481 bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_byname2,
482 -1 /* any pid */,
483 "libc.so.6",
484 0, &uprobe_opts);
485 if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname2, "attach_uretprobe_byname2"))
486 return;
487
488 /* trigger & validate shared library u[ret]probes attached by name */
489 devnull = fopen("/dev/null", "r");
490 fclose(devnull);
491
492 ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res");
493 ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res");
494}
495
496static void test_uprobe_ref_ctr(struct test_attach_probe *skel)
497{
498 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
499 struct bpf_link *uprobe_link, *uretprobe_link;
500 ssize_t uprobe_offset, ref_ctr_offset;
501
502 uprobe_offset = get_uprobe_offset(&trigger_func4);
503 if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset_ref_ctr"))
504 return;
505
506 ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr);
507 if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset"))
508 return;
509
510 ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before");
511
512 uprobe_opts.retprobe = false;
513 uprobe_opts.ref_ctr_offset = ref_ctr_offset;
514 uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_ref_ctr,
515 0 /* self pid */,
516 "/proc/self/exe",
517 uprobe_offset,
518 &uprobe_opts);
519 if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe_ref_ctr"))
520 return;
521 skel->links.handle_uprobe_ref_ctr = uprobe_link;
522
523 ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after");
524
525 /* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */
526 uprobe_opts.retprobe = true;
527 uprobe_opts.ref_ctr_offset = ref_ctr_offset;
528 uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_ref_ctr,
529 -1 /* any pid */,
530 "/proc/self/exe",
531 uprobe_offset, &uprobe_opts);
532 if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe_ref_ctr"))
533 return;
534 skel->links.handle_uretprobe_ref_ctr = uretprobe_link;
535}
536
537static void test_kprobe_sleepable(void)
538{
539 struct test_attach_kprobe_sleepable *skel;
540
541 skel = test_attach_kprobe_sleepable__open();
542 if (!ASSERT_OK_PTR(skel, "skel_kprobe_sleepable_open"))
543 return;
544
545 /* sleepable kprobe test case needs flags set before loading */
546 if (!ASSERT_OK(bpf_program__set_flags(skel->progs.handle_kprobe_sleepable,
547 BPF_F_SLEEPABLE), "kprobe_sleepable_flags"))
548 goto cleanup;
549
550 if (!ASSERT_OK(test_attach_kprobe_sleepable__load(skel),
551 "skel_kprobe_sleepable_load"))
552 goto cleanup;
553
554 /* sleepable kprobes should not attach successfully */
555 skel->links.handle_kprobe_sleepable = bpf_program__attach(skel->progs.handle_kprobe_sleepable);
556 ASSERT_ERR_PTR(skel->links.handle_kprobe_sleepable, "attach_kprobe_sleepable");
557
558cleanup:
559 test_attach_kprobe_sleepable__destroy(skel);
560}
561
562static void test_uprobe_sleepable(struct test_attach_probe *skel)
563{
564 /* test sleepable uprobe and uretprobe variants */
565 skel->links.handle_uprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uprobe_byname3_sleepable);
566 if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3_sleepable, "attach_uprobe_byname3_sleepable"))
567 return;
568
569 skel->links.handle_uprobe_byname3 = bpf_program__attach(skel->progs.handle_uprobe_byname3);
570 if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3, "attach_uprobe_byname3"))
571 return;
572
573 skel->links.handle_uretprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uretprobe_byname3_sleepable);
574 if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3_sleepable, "attach_uretprobe_byname3_sleepable"))
575 return;
576
577 skel->links.handle_uretprobe_byname3 = bpf_program__attach(skel->progs.handle_uretprobe_byname3);
578 if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3, "attach_uretprobe_byname3"))
579 return;
580
581 skel->bss->user_ptr = test_data;
582
583 /* trigger & validate sleepable uprobe attached by name */
584 trigger_func3();
585
586 ASSERT_EQ(skel->bss->uprobe_byname3_sleepable_res, 9, "check_uprobe_byname3_sleepable_res");
587 ASSERT_EQ(skel->bss->uprobe_byname3_str_sleepable_res, 10, "check_uprobe_byname3_str_sleepable_res");
588 ASSERT_EQ(skel->bss->uprobe_byname3_res, 11, "check_uprobe_byname3_res");
589 ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 12, "check_uretprobe_byname3_sleepable_res");
590 ASSERT_EQ(skel->bss->uretprobe_byname3_str_sleepable_res, 13, "check_uretprobe_byname3_str_sleepable_res");
591 ASSERT_EQ(skel->bss->uretprobe_byname3_res, 14, "check_uretprobe_byname3_res");
592}
593
594void test_attach_probe(void)
595{
596 struct test_attach_probe *skel;
597
598 skel = test_attach_probe__open();
599 if (!ASSERT_OK_PTR(skel, "skel_open"))
600 return;
601
602 if (!ASSERT_OK(test_attach_probe__load(skel), "skel_load"))
603 goto cleanup;
604 if (!ASSERT_OK_PTR(skel->bss, "check_bss"))
605 goto cleanup;
606
607 if (test__start_subtest("manual-default"))
608 test_attach_probe_manual(PROBE_ATTACH_MODE_DEFAULT);
609 if (test__start_subtest("manual-legacy"))
610 test_attach_probe_manual(PROBE_ATTACH_MODE_LEGACY);
611 if (test__start_subtest("manual-perf"))
612 test_attach_probe_manual(PROBE_ATTACH_MODE_PERF);
613 if (test__start_subtest("manual-link"))
614 test_attach_probe_manual(PROBE_ATTACH_MODE_LINK);
615 if (test__start_subtest("kprobe-perf-by-addr"))
616 test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_PERF);
617 if (test__start_subtest("kprobe-link-by-addr"))
618 test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_LINK);
619 if (test__start_subtest("kprobe-legacy-by-addr-reject"))
620 test_attach_kprobe_legacy_by_addr_reject();
621
622 if (test__start_subtest("dup-sym-default"))
623 test_attach_probe_dup_sym(PROBE_ATTACH_MODE_DEFAULT);
624 if (test__start_subtest("dup-sym-legacy"))
625 test_attach_probe_dup_sym(PROBE_ATTACH_MODE_LEGACY);
626 if (test__start_subtest("dup-sym-perf"))
627 test_attach_probe_dup_sym(PROBE_ATTACH_MODE_PERF);
628 if (test__start_subtest("dup-sym-link"))
629 test_attach_probe_dup_sym(PROBE_ATTACH_MODE_LINK);
630
631 if (test__start_subtest("auto"))
632 test_attach_probe_auto(skel);
633 if (test__start_subtest("kprobe-sleepable"))
634 test_kprobe_sleepable();
635 if (test__start_subtest("uprobe-lib"))
636 test_uprobe_lib(skel);
637 if (test__start_subtest("uprobe-sleepable"))
638 test_uprobe_sleepable(skel);
639 if (test__start_subtest("uprobe-ref_ctr"))
640 test_uprobe_ref_ctr(skel);
641
642 if (test__start_subtest("uprobe-long_name"))
643 test_attach_uprobe_long_event_name();
644 if (test__start_subtest("kprobe-long_name"))
645 test_attach_kprobe_long_event_name();
646 if (test__start_subtest("kprobe-write-ctx"))
647 test_attach_kprobe_write_ctx();
648 if (test__start_subtest("freplace-kprobe-write-ctx"))
649 test_freplace_kprobe_write_ctx();
650
651cleanup:
652 test_attach_probe__destroy(skel);
653 ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_cleanup");
654}