Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2021 Facebook */
3#define _GNU_SOURCE
4#include <pthread.h>
5#include <sched.h>
6#include <sys/syscall.h>
7#include <sys/mman.h>
8#include <unistd.h>
9#include <linux/compiler.h>
10#include <test_progs.h>
11#include <network_helpers.h>
12#include <bpf/btf.h>
13#include "test_bpf_cookie.skel.h"
14#include "kprobe_multi.skel.h"
15#include "uprobe_multi.skel.h"
16
17/* uprobe attach point */
18static noinline void trigger_func(void)
19{
20 asm volatile ("");
21}
22
23static void kprobe_subtest(struct test_bpf_cookie *skel)
24{
25 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
26 struct bpf_link *link1 = NULL, *link2 = NULL;
27 struct bpf_link *retlink1 = NULL, *retlink2 = NULL;
28
29 /* attach two kprobes */
30 opts.bpf_cookie = 0x1;
31 opts.retprobe = false;
32 link1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
33 SYS_NANOSLEEP_KPROBE_NAME, &opts);
34 if (!ASSERT_OK_PTR(link1, "link1"))
35 goto cleanup;
36
37 opts.bpf_cookie = 0x2;
38 opts.retprobe = false;
39 link2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
40 SYS_NANOSLEEP_KPROBE_NAME, &opts);
41 if (!ASSERT_OK_PTR(link2, "link2"))
42 goto cleanup;
43
44 /* attach two kretprobes */
45 opts.bpf_cookie = 0x10;
46 opts.retprobe = true;
47 retlink1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
48 SYS_NANOSLEEP_KPROBE_NAME, &opts);
49 if (!ASSERT_OK_PTR(retlink1, "retlink1"))
50 goto cleanup;
51
52 opts.bpf_cookie = 0x20;
53 opts.retprobe = true;
54 retlink2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
55 SYS_NANOSLEEP_KPROBE_NAME, &opts);
56 if (!ASSERT_OK_PTR(retlink2, "retlink2"))
57 goto cleanup;
58
59 /* trigger kprobe && kretprobe */
60 usleep(1);
61
62 ASSERT_EQ(skel->bss->kprobe_res, 0x1 | 0x2, "kprobe_res");
63 ASSERT_EQ(skel->bss->kretprobe_res, 0x10 | 0x20, "kretprobe_res");
64
65cleanup:
66 bpf_link__destroy(link1);
67 bpf_link__destroy(link2);
68 bpf_link__destroy(retlink1);
69 bpf_link__destroy(retlink2);
70}
71
72static void kprobe_multi_test_run(struct kprobe_multi *skel)
73{
74 LIBBPF_OPTS(bpf_test_run_opts, topts);
75 int err, prog_fd;
76
77 prog_fd = bpf_program__fd(skel->progs.trigger);
78 err = bpf_prog_test_run_opts(prog_fd, &topts);
79 ASSERT_OK(err, "test_run");
80 ASSERT_EQ(topts.retval, 0, "test_run");
81
82 ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result");
83 ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result");
84 ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result");
85 ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result");
86 ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result");
87 ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result");
88 ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result");
89 ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result");
90
91 ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result");
92 ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result");
93 ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result");
94 ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result");
95 ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result");
96 ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result");
97 ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result");
98 ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result");
99}
100
101static void kprobe_multi_link_api_subtest(void)
102{
103 int prog_fd, link1_fd = -1, link2_fd = -1;
104 struct kprobe_multi *skel = NULL;
105 LIBBPF_OPTS(bpf_link_create_opts, opts);
106 unsigned long long addrs[8];
107 __u64 cookies[8];
108
109 if (!env.has_testmod) {
110 test__skip();
111 return;
112 }
113
114 if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
115 goto cleanup;
116
117 skel = kprobe_multi__open_and_load();
118 if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
119 goto cleanup;
120
121 skel->bss->pid = getpid();
122 skel->bss->test_cookie = true;
123
124#define GET_ADDR(__sym, __addr) ({ \
125 __addr = ksym_get_addr(__sym); \
126 if (!ASSERT_NEQ(__addr, 0, "ksym_get_addr " #__sym)) \
127 goto cleanup; \
128})
129
130 GET_ADDR("bpf_fentry_test1", addrs[0]);
131 GET_ADDR("bpf_fentry_test3", addrs[1]);
132 GET_ADDR("bpf_fentry_test4", addrs[2]);
133 GET_ADDR("bpf_fentry_test5", addrs[3]);
134 GET_ADDR("bpf_fentry_test6", addrs[4]);
135 GET_ADDR("bpf_fentry_test7", addrs[5]);
136 GET_ADDR("bpf_fentry_test2", addrs[6]);
137 GET_ADDR("bpf_fentry_test8", addrs[7]);
138
139#undef GET_ADDR
140
141 cookies[0] = 1; /* bpf_fentry_test1 */
142 cookies[1] = 2; /* bpf_fentry_test3 */
143 cookies[2] = 3; /* bpf_fentry_test4 */
144 cookies[3] = 4; /* bpf_fentry_test5 */
145 cookies[4] = 5; /* bpf_fentry_test6 */
146 cookies[5] = 6; /* bpf_fentry_test7 */
147 cookies[6] = 7; /* bpf_fentry_test2 */
148 cookies[7] = 8; /* bpf_fentry_test8 */
149
150 opts.kprobe_multi.addrs = (const unsigned long *) &addrs;
151 opts.kprobe_multi.cnt = ARRAY_SIZE(addrs);
152 opts.kprobe_multi.cookies = (const __u64 *) &cookies;
153 prog_fd = bpf_program__fd(skel->progs.test_kprobe);
154
155 link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts);
156 if (!ASSERT_GE(link1_fd, 0, "link1_fd"))
157 goto cleanup;
158
159 cookies[0] = 8; /* bpf_fentry_test1 */
160 cookies[1] = 7; /* bpf_fentry_test3 */
161 cookies[2] = 6; /* bpf_fentry_test4 */
162 cookies[3] = 5; /* bpf_fentry_test5 */
163 cookies[4] = 4; /* bpf_fentry_test6 */
164 cookies[5] = 3; /* bpf_fentry_test7 */
165 cookies[6] = 2; /* bpf_fentry_test2 */
166 cookies[7] = 1; /* bpf_fentry_test8 */
167
168 opts.kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN;
169 prog_fd = bpf_program__fd(skel->progs.test_kretprobe);
170
171 link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts);
172 if (!ASSERT_GE(link2_fd, 0, "link2_fd"))
173 goto cleanup;
174
175 kprobe_multi_test_run(skel);
176
177cleanup:
178 close(link1_fd);
179 close(link2_fd);
180 kprobe_multi__destroy(skel);
181}
182
183static void kprobe_multi_attach_api_subtest(void)
184{
185 struct bpf_link *link1 = NULL, *link2 = NULL;
186 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
187 LIBBPF_OPTS(bpf_test_run_opts, topts);
188 struct kprobe_multi *skel = NULL;
189 const char *syms[8] = {
190 "bpf_fentry_test1",
191 "bpf_fentry_test3",
192 "bpf_fentry_test4",
193 "bpf_fentry_test5",
194 "bpf_fentry_test6",
195 "bpf_fentry_test7",
196 "bpf_fentry_test2",
197 "bpf_fentry_test8",
198 };
199 __u64 cookies[8];
200
201 if (!env.has_testmod) {
202 test__skip();
203 return;
204 }
205
206 skel = kprobe_multi__open_and_load();
207 if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
208 goto cleanup;
209
210 skel->bss->pid = getpid();
211 skel->bss->test_cookie = true;
212
213 cookies[0] = 1; /* bpf_fentry_test1 */
214 cookies[1] = 2; /* bpf_fentry_test3 */
215 cookies[2] = 3; /* bpf_fentry_test4 */
216 cookies[3] = 4; /* bpf_fentry_test5 */
217 cookies[4] = 5; /* bpf_fentry_test6 */
218 cookies[5] = 6; /* bpf_fentry_test7 */
219 cookies[6] = 7; /* bpf_fentry_test2 */
220 cookies[7] = 8; /* bpf_fentry_test8 */
221
222 opts.syms = syms;
223 opts.cnt = ARRAY_SIZE(syms);
224 opts.cookies = cookies;
225
226 link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
227 NULL, &opts);
228 if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts"))
229 goto cleanup;
230
231 cookies[0] = 8; /* bpf_fentry_test1 */
232 cookies[1] = 7; /* bpf_fentry_test3 */
233 cookies[2] = 6; /* bpf_fentry_test4 */
234 cookies[3] = 5; /* bpf_fentry_test5 */
235 cookies[4] = 4; /* bpf_fentry_test6 */
236 cookies[5] = 3; /* bpf_fentry_test7 */
237 cookies[6] = 2; /* bpf_fentry_test2 */
238 cookies[7] = 1; /* bpf_fentry_test8 */
239
240 opts.retprobe = true;
241
242 link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe,
243 NULL, &opts);
244 if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts"))
245 goto cleanup;
246
247 kprobe_multi_test_run(skel);
248
249cleanup:
250 bpf_link__destroy(link2);
251 bpf_link__destroy(link1);
252 kprobe_multi__destroy(skel);
253}
254
255/* defined in prog_tests/uprobe_multi_test.c */
256void uprobe_multi_func_1(void);
257void uprobe_multi_func_2(void);
258void uprobe_multi_func_3(void);
259
260static void uprobe_multi_test_run(struct uprobe_multi *skel)
261{
262 skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
263 skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2;
264 skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3;
265
266 skel->bss->pid = getpid();
267 skel->bss->test_cookie = true;
268
269 uprobe_multi_func_1();
270 uprobe_multi_func_2();
271 uprobe_multi_func_3();
272
273 ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 1, "uprobe_multi_func_1_result");
274 ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 1, "uprobe_multi_func_2_result");
275 ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 1, "uprobe_multi_func_3_result");
276
277 ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 1, "uretprobe_multi_func_1_result");
278 ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 1, "uretprobe_multi_func_2_result");
279 ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 1, "uretprobe_multi_func_3_result");
280}
281
282static void uprobe_multi_attach_api_subtest(void)
283{
284 struct bpf_link *link1 = NULL, *link2 = NULL;
285 struct uprobe_multi *skel = NULL;
286 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
287 const char *syms[3] = {
288 "uprobe_multi_func_1",
289 "uprobe_multi_func_2",
290 "uprobe_multi_func_3",
291 };
292 __u64 cookies[3];
293
294 cookies[0] = 3; /* uprobe_multi_func_1 */
295 cookies[1] = 1; /* uprobe_multi_func_2 */
296 cookies[2] = 2; /* uprobe_multi_func_3 */
297
298 opts.syms = syms;
299 opts.cnt = ARRAY_SIZE(syms);
300 opts.cookies = &cookies[0];
301
302 skel = uprobe_multi__open_and_load();
303 if (!ASSERT_OK_PTR(skel, "uprobe_multi"))
304 goto cleanup;
305
306 link1 = bpf_program__attach_uprobe_multi(skel->progs.uprobe, -1,
307 "/proc/self/exe", NULL, &opts);
308 if (!ASSERT_OK_PTR(link1, "bpf_program__attach_uprobe_multi"))
309 goto cleanup;
310
311 cookies[0] = 2; /* uprobe_multi_func_1 */
312 cookies[1] = 3; /* uprobe_multi_func_2 */
313 cookies[2] = 1; /* uprobe_multi_func_3 */
314
315 opts.retprobe = true;
316 link2 = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, -1,
317 "/proc/self/exe", NULL, &opts);
318 if (!ASSERT_OK_PTR(link2, "bpf_program__attach_uprobe_multi_retprobe"))
319 goto cleanup;
320
321 uprobe_multi_test_run(skel);
322
323cleanup:
324 bpf_link__destroy(link2);
325 bpf_link__destroy(link1);
326 uprobe_multi__destroy(skel);
327}
328
329static void uprobe_subtest(struct test_bpf_cookie *skel)
330{
331 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
332 struct bpf_link *link1 = NULL, *link2 = NULL;
333 struct bpf_link *retlink1 = NULL, *retlink2 = NULL;
334 ssize_t uprobe_offset;
335
336 uprobe_offset = get_uprobe_offset(&trigger_func);
337 if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
338 goto cleanup;
339
340 /* attach two uprobes */
341 opts.bpf_cookie = 0x100;
342 opts.retprobe = false;
343 link1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, 0 /* self pid */,
344 "/proc/self/exe", uprobe_offset, &opts);
345 if (!ASSERT_OK_PTR(link1, "link1"))
346 goto cleanup;
347
348 opts.bpf_cookie = 0x200;
349 opts.retprobe = false;
350 link2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, -1 /* any pid */,
351 "/proc/self/exe", uprobe_offset, &opts);
352 if (!ASSERT_OK_PTR(link2, "link2"))
353 goto cleanup;
354
355 /* attach two uretprobes */
356 opts.bpf_cookie = 0x1000;
357 opts.retprobe = true;
358 retlink1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, -1 /* any pid */,
359 "/proc/self/exe", uprobe_offset, &opts);
360 if (!ASSERT_OK_PTR(retlink1, "retlink1"))
361 goto cleanup;
362
363 opts.bpf_cookie = 0x2000;
364 opts.retprobe = true;
365 retlink2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, 0 /* self pid */,
366 "/proc/self/exe", uprobe_offset, &opts);
367 if (!ASSERT_OK_PTR(retlink2, "retlink2"))
368 goto cleanup;
369
370 /* trigger uprobe && uretprobe */
371 trigger_func();
372
373 ASSERT_EQ(skel->bss->uprobe_res, 0x100 | 0x200, "uprobe_res");
374 ASSERT_EQ(skel->bss->uretprobe_res, 0x1000 | 0x2000, "uretprobe_res");
375
376cleanup:
377 bpf_link__destroy(link1);
378 bpf_link__destroy(link2);
379 bpf_link__destroy(retlink1);
380 bpf_link__destroy(retlink2);
381}
382
383static void tp_subtest(struct test_bpf_cookie *skel)
384{
385 DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts);
386 struct bpf_link *link1 = NULL, *link2 = NULL, *link3 = NULL;
387
388 /* attach first tp prog */
389 opts.bpf_cookie = 0x10000;
390 link1 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp1,
391 "syscalls", "sys_enter_nanosleep", &opts);
392 if (!ASSERT_OK_PTR(link1, "link1"))
393 goto cleanup;
394
395 /* attach second tp prog */
396 opts.bpf_cookie = 0x20000;
397 link2 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp2,
398 "syscalls", "sys_enter_nanosleep", &opts);
399 if (!ASSERT_OK_PTR(link2, "link2"))
400 goto cleanup;
401
402 /* trigger tracepoints */
403 usleep(1);
404
405 ASSERT_EQ(skel->bss->tp_res, 0x10000 | 0x20000, "tp_res1");
406
407 /* now we detach first prog and will attach third one, which causes
408 * two internal calls to bpf_prog_array_copy(), shuffling
409 * bpf_prog_array_items around. We test here that we don't lose track
410 * of associated bpf_cookies.
411 */
412 bpf_link__destroy(link1);
413 link1 = NULL;
414 kern_sync_rcu();
415 skel->bss->tp_res = 0;
416
417 /* attach third tp prog */
418 opts.bpf_cookie = 0x40000;
419 link3 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp3,
420 "syscalls", "sys_enter_nanosleep", &opts);
421 if (!ASSERT_OK_PTR(link3, "link3"))
422 goto cleanup;
423
424 /* trigger tracepoints */
425 usleep(1);
426
427 ASSERT_EQ(skel->bss->tp_res, 0x20000 | 0x40000, "tp_res2");
428
429cleanup:
430 bpf_link__destroy(link1);
431 bpf_link__destroy(link2);
432 bpf_link__destroy(link3);
433}
434
435static void burn_cpu(long loops)
436{
437 long j = 0;
438 cpu_set_t cpu_set;
439 long i;
440 int err;
441
442 /* generate some branches on cpu 0 */
443 CPU_ZERO(&cpu_set);
444 CPU_SET(0, &cpu_set);
445 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
446 ASSERT_OK(err, "set_thread_affinity");
447
448 for (i = 0; i < loops; ++i) {
449 ++j;
450 barrier();
451 }
452}
453
454static void pe_subtest(struct test_bpf_cookie *skel)
455{
456 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, opts);
457 struct bpf_link *link = NULL;
458 struct perf_event_attr attr;
459 int pfd = -1;
460
461 /* create perf event */
462 memset(&attr, 0, sizeof(attr));
463 attr.size = sizeof(attr);
464 attr.type = PERF_TYPE_SOFTWARE;
465 attr.config = PERF_COUNT_SW_CPU_CLOCK;
466 attr.sample_period = 100000;
467 pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
468 if (!ASSERT_GE(pfd, 0, "perf_fd"))
469 goto cleanup;
470
471 opts.bpf_cookie = 0x100000;
472 link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts);
473 if (!ASSERT_OK_PTR(link, "link1"))
474 goto cleanup;
475
476 burn_cpu(100000000L); /* trigger BPF prog */
477
478 ASSERT_EQ(skel->bss->pe_res, 0x100000, "pe_res1");
479
480 /* prevent bpf_link__destroy() closing pfd itself */
481 bpf_link__disconnect(link);
482 /* close BPF link's FD explicitly */
483 close(bpf_link__fd(link));
484 /* free up memory used by struct bpf_link */
485 bpf_link__destroy(link);
486 link = NULL;
487 kern_sync_rcu();
488 skel->bss->pe_res = 0;
489
490 opts.bpf_cookie = 0x200000;
491 link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts);
492 if (!ASSERT_OK_PTR(link, "link2"))
493 goto cleanup;
494
495 burn_cpu(100000000L); /* trigger BPF prog */
496
497 ASSERT_EQ(skel->bss->pe_res, 0x200000, "pe_res2");
498
499cleanup:
500 close(pfd);
501 bpf_link__destroy(link);
502}
503
504static int verify_tracing_link_info(int fd, u64 cookie)
505{
506 struct bpf_link_info info;
507 int err;
508 u32 len = sizeof(info);
509
510 err = bpf_link_get_info_by_fd(fd, &info, &len);
511 if (!ASSERT_OK(err, "get_link_info"))
512 return -1;
513
514 if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_TRACING, "link_type"))
515 return -1;
516
517 ASSERT_EQ(info.tracing.cookie, cookie, "tracing_cookie");
518
519 return 0;
520}
521
522static void tracing_subtest(struct test_bpf_cookie *skel)
523{
524 __u64 cookie;
525 int prog_fd, err;
526 int fentry_fd = -1, fexit_fd = -1, fmod_ret_fd = -1;
527 LIBBPF_OPTS(bpf_test_run_opts, opts);
528 LIBBPF_OPTS(bpf_link_create_opts, link_opts);
529
530 skel->bss->fentry_res = 0;
531 skel->bss->fexit_res = 0;
532
533 cookie = 0x10000000000000L;
534 prog_fd = bpf_program__fd(skel->progs.fentry_test1);
535 link_opts.tracing.cookie = cookie;
536 fentry_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FENTRY, &link_opts);
537 if (!ASSERT_GE(fentry_fd, 0, "fentry.link_create"))
538 goto cleanup;
539
540 err = verify_tracing_link_info(fentry_fd, cookie);
541 if (!ASSERT_OK(err, "verify_tracing_link_info"))
542 goto cleanup;
543
544 cookie = 0x20000000000000L;
545 prog_fd = bpf_program__fd(skel->progs.fexit_test1);
546 link_opts.tracing.cookie = cookie;
547 fexit_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FEXIT, &link_opts);
548 if (!ASSERT_GE(fexit_fd, 0, "fexit.link_create"))
549 goto cleanup;
550
551 cookie = 0x30000000000000L;
552 prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
553 link_opts.tracing.cookie = cookie;
554 fmod_ret_fd = bpf_link_create(prog_fd, 0, BPF_MODIFY_RETURN, &link_opts);
555 if (!ASSERT_GE(fmod_ret_fd, 0, "fmod_ret.link_create"))
556 goto cleanup;
557
558 prog_fd = bpf_program__fd(skel->progs.fentry_test1);
559 bpf_prog_test_run_opts(prog_fd, &opts);
560
561 prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
562 bpf_prog_test_run_opts(prog_fd, &opts);
563
564 ASSERT_EQ(skel->bss->fentry_res, 0x10000000000000L, "fentry_res");
565 ASSERT_EQ(skel->bss->fexit_res, 0x20000000000000L, "fexit_res");
566 ASSERT_EQ(skel->bss->fmod_ret_res, 0x30000000000000L, "fmod_ret_res");
567
568cleanup:
569 if (fentry_fd >= 0)
570 close(fentry_fd);
571 if (fexit_fd >= 0)
572 close(fexit_fd);
573 if (fmod_ret_fd >= 0)
574 close(fmod_ret_fd);
575}
576
577int stack_mprotect(void);
578
579static void lsm_subtest(struct test_bpf_cookie *skel)
580{
581 __u64 cookie;
582 int prog_fd;
583 int lsm_fd = -1;
584 LIBBPF_OPTS(bpf_link_create_opts, link_opts);
585 int err;
586
587 skel->bss->lsm_res = 0;
588
589 cookie = 0x90000000000090L;
590 prog_fd = bpf_program__fd(skel->progs.test_int_hook);
591 link_opts.tracing.cookie = cookie;
592 lsm_fd = bpf_link_create(prog_fd, 0, BPF_LSM_MAC, &link_opts);
593 if (!ASSERT_GE(lsm_fd, 0, "lsm.link_create"))
594 goto cleanup;
595
596 err = stack_mprotect();
597 if (!ASSERT_EQ(err, -1, "stack_mprotect") ||
598 !ASSERT_EQ(errno, EPERM, "stack_mprotect"))
599 goto cleanup;
600
601 usleep(1);
602
603 ASSERT_EQ(skel->bss->lsm_res, 0x90000000000090L, "fentry_res");
604
605cleanup:
606 if (lsm_fd >= 0)
607 close(lsm_fd);
608}
609
610static void tp_btf_subtest(struct test_bpf_cookie *skel)
611{
612 __u64 cookie;
613 int prog_fd, link_fd = -1;
614 struct bpf_link *link = NULL;
615 LIBBPF_OPTS(bpf_link_create_opts, link_opts);
616 LIBBPF_OPTS(bpf_raw_tp_opts, raw_tp_opts);
617 LIBBPF_OPTS(bpf_trace_opts, trace_opts);
618
619 /* There are three different ways to attach tp_btf (BTF-aware raw
620 * tracepoint) programs. Let's test all of them.
621 */
622 prog_fd = bpf_program__fd(skel->progs.handle_tp_btf);
623
624 /* low-level BPF_RAW_TRACEPOINT_OPEN command wrapper */
625 skel->bss->tp_btf_res = 0;
626
627 raw_tp_opts.cookie = cookie = 0x11000000000000L;
628 link_fd = bpf_raw_tracepoint_open_opts(prog_fd, &raw_tp_opts);
629 if (!ASSERT_GE(link_fd, 0, "bpf_raw_tracepoint_open_opts"))
630 goto cleanup;
631
632 usleep(1); /* trigger */
633 close(link_fd); /* detach */
634 link_fd = -1;
635
636 ASSERT_EQ(skel->bss->tp_btf_res, cookie, "raw_tp_open_res");
637
638 /* low-level generic bpf_link_create() API */
639 skel->bss->tp_btf_res = 0;
640
641 link_opts.tracing.cookie = cookie = 0x22000000000000L;
642 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_RAW_TP, &link_opts);
643 if (!ASSERT_GE(link_fd, 0, "bpf_link_create"))
644 goto cleanup;
645
646 usleep(1); /* trigger */
647 close(link_fd); /* detach */
648 link_fd = -1;
649
650 ASSERT_EQ(skel->bss->tp_btf_res, cookie, "link_create_res");
651
652 /* high-level bpf_link-based bpf_program__attach_trace_opts() API */
653 skel->bss->tp_btf_res = 0;
654
655 trace_opts.cookie = cookie = 0x33000000000000L;
656 link = bpf_program__attach_trace_opts(skel->progs.handle_tp_btf, &trace_opts);
657 if (!ASSERT_OK_PTR(link, "attach_trace_opts"))
658 goto cleanup;
659
660 usleep(1); /* trigger */
661 bpf_link__destroy(link); /* detach */
662 link = NULL;
663
664 ASSERT_EQ(skel->bss->tp_btf_res, cookie, "attach_trace_opts_res");
665
666cleanup:
667 if (link_fd >= 0)
668 close(link_fd);
669 bpf_link__destroy(link);
670}
671
672static int verify_raw_tp_link_info(int fd, u64 cookie)
673{
674 struct bpf_link_info info;
675 int err;
676 u32 len = sizeof(info);
677
678 memset(&info, 0, sizeof(info));
679 err = bpf_link_get_info_by_fd(fd, &info, &len);
680 if (!ASSERT_OK(err, "get_link_info"))
681 return -1;
682
683 if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_RAW_TRACEPOINT, "link_type"))
684 return -1;
685
686 ASSERT_EQ(info.raw_tracepoint.cookie, cookie, "raw_tp_cookie");
687
688 return 0;
689}
690
691static void raw_tp_subtest(struct test_bpf_cookie *skel)
692{
693 __u64 cookie;
694 int err, prog_fd, link_fd = -1;
695 struct bpf_link *link = NULL;
696 LIBBPF_OPTS(bpf_raw_tp_opts, raw_tp_opts);
697 LIBBPF_OPTS(bpf_raw_tracepoint_opts, opts);
698
699 /* There are two different ways to attach raw_tp programs */
700 prog_fd = bpf_program__fd(skel->progs.handle_raw_tp);
701
702 /* low-level BPF_RAW_TRACEPOINT_OPEN command wrapper */
703 skel->bss->raw_tp_res = 0;
704
705 raw_tp_opts.tp_name = "sys_enter";
706 raw_tp_opts.cookie = cookie = 0x55000000000000L;
707 link_fd = bpf_raw_tracepoint_open_opts(prog_fd, &raw_tp_opts);
708 if (!ASSERT_GE(link_fd, 0, "bpf_raw_tracepoint_open_opts"))
709 goto cleanup;
710
711 usleep(1); /* trigger */
712
713 err = verify_raw_tp_link_info(link_fd, cookie);
714 if (!ASSERT_OK(err, "verify_raw_tp_link_info"))
715 goto cleanup;
716
717 close(link_fd); /* detach */
718 link_fd = -1;
719
720 ASSERT_EQ(skel->bss->raw_tp_res, cookie, "raw_tp_open_res");
721
722 /* high-level bpf_link-based bpf_program__attach_raw_tracepoint_opts() API */
723 skel->bss->raw_tp_res = 0;
724
725 opts.cookie = cookie = 0x66000000000000L;
726 link = bpf_program__attach_raw_tracepoint_opts(skel->progs.handle_raw_tp,
727 "sys_enter", &opts);
728 if (!ASSERT_OK_PTR(link, "attach_raw_tp_opts"))
729 goto cleanup;
730
731 usleep(1); /* trigger */
732 bpf_link__destroy(link); /* detach */
733 link = NULL;
734
735 ASSERT_EQ(skel->bss->raw_tp_res, cookie, "attach_raw_tp_opts_res");
736
737cleanup:
738 if (link_fd >= 0)
739 close(link_fd);
740 bpf_link__destroy(link);
741}
742
743void test_bpf_cookie(void)
744{
745 struct test_bpf_cookie *skel;
746
747 skel = test_bpf_cookie__open_and_load();
748 if (!ASSERT_OK_PTR(skel, "skel_open"))
749 return;
750
751 skel->bss->my_tid = sys_gettid();
752
753 if (test__start_subtest("kprobe"))
754 kprobe_subtest(skel);
755 if (test__start_subtest("multi_kprobe_link_api"))
756 kprobe_multi_link_api_subtest();
757 if (test__start_subtest("multi_kprobe_attach_api"))
758 kprobe_multi_attach_api_subtest();
759 if (test__start_subtest("uprobe"))
760 uprobe_subtest(skel);
761 if (test__start_subtest("multi_uprobe_attach_api"))
762 uprobe_multi_attach_api_subtest();
763 if (test__start_subtest("tracepoint"))
764 tp_subtest(skel);
765 if (test__start_subtest("perf_event"))
766 pe_subtest(skel);
767 if (test__start_subtest("trampoline"))
768 tracing_subtest(skel);
769 if (test__start_subtest("lsm"))
770 lsm_subtest(skel);
771 if (test__start_subtest("tp_btf"))
772 tp_btf_subtest(skel);
773 if (test__start_subtest("raw_tp"))
774 raw_tp_subtest(skel);
775 test_bpf_cookie__destroy(skel);
776}