Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2020 Facebook
3#include "vmlinux.h"
4#include <asm/unistd.h>
5#include <bpf/bpf_helpers.h>
6#include <bpf/bpf_tracing.h>
7#include "bpf_misc.h"
8#include "bpf/usdt.bpf.h"
9
10char _license[] SEC("license") = "GPL";
11
12#define CPU_MASK 255
13#define MAX_CPUS (CPU_MASK + 1) /* should match MAX_BUCKETS in benchs/bench_trigger.c */
14
15/* matches struct counter in bench.h */
16struct counter {
17 long value;
18} __attribute__((aligned(128)));
19
20struct counter hits[MAX_CPUS];
21
22static __always_inline void inc_counter(void)
23{
24 int cpu = bpf_get_smp_processor_id();
25
26 __sync_add_and_fetch(&hits[cpu & CPU_MASK].value, 1);
27}
28
29volatile const int stacktrace;
30
31typedef __u64 stack_trace_t[128];
32
33struct {
34 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
35 __uint(max_entries, 1);
36 __type(key, __u32);
37 __type(value, stack_trace_t);
38} stack_heap SEC(".maps");
39
40static __always_inline void do_stacktrace(void *ctx)
41{
42 if (!stacktrace)
43 return;
44
45 __u64 *ptr = bpf_map_lookup_elem(&stack_heap, &(__u32){0});
46
47 if (ptr)
48 bpf_get_stack(ctx, ptr, sizeof(stack_trace_t), 0);
49}
50
51static __always_inline void handle(void *ctx)
52{
53 inc_counter();
54 do_stacktrace(ctx);
55}
56
57SEC("?uprobe")
58int bench_trigger_uprobe(void *ctx)
59{
60 inc_counter();
61 return 0;
62}
63
64SEC("?uprobe.multi")
65int bench_trigger_uprobe_multi(void *ctx)
66{
67 inc_counter();
68 return 0;
69}
70
71const volatile int batch_iters = 0;
72
73SEC("?raw_tp")
74int trigger_kernel_count(void *ctx)
75{
76 int i;
77
78 for (i = 0; i < batch_iters; i++) {
79 inc_counter();
80 bpf_get_numa_node_id();
81 }
82
83 return 0;
84}
85
86SEC("?raw_tp")
87int trigger_driver(void *ctx)
88{
89 int i;
90
91 for (i = 0; i < batch_iters; i++)
92 (void)bpf_get_numa_node_id(); /* attach point for benchmarking */
93
94 return 0;
95}
96
97extern int bpf_modify_return_test_tp(int nonce) __ksym __weak;
98
99SEC("?raw_tp")
100int trigger_driver_kfunc(void *ctx)
101{
102 int i;
103
104 for (i = 0; i < batch_iters; i++)
105 (void)bpf_modify_return_test_tp(0); /* attach point for benchmarking */
106
107 return 0;
108}
109
110SEC("?kprobe/bpf_get_numa_node_id")
111int bench_trigger_kprobe(void *ctx)
112{
113 handle(ctx);
114 return 0;
115}
116
117SEC("?kretprobe/bpf_get_numa_node_id")
118int bench_trigger_kretprobe(void *ctx)
119{
120 handle(ctx);
121 return 0;
122}
123
124SEC("?kprobe.multi/bpf_get_numa_node_id")
125int bench_trigger_kprobe_multi(void *ctx)
126{
127 handle(ctx);
128 return 0;
129}
130
131SEC("?kprobe.multi/bpf_get_numa_node_id")
132int bench_kprobe_multi_empty(void *ctx)
133{
134 return 0;
135}
136
137SEC("?kretprobe.multi/bpf_get_numa_node_id")
138int bench_trigger_kretprobe_multi(void *ctx)
139{
140 handle(ctx);
141 return 0;
142}
143
144SEC("?kretprobe.multi/bpf_get_numa_node_id")
145int bench_kretprobe_multi_empty(void *ctx)
146{
147 return 0;
148}
149
150SEC("?fentry/bpf_get_numa_node_id")
151int bench_trigger_fentry(void *ctx)
152{
153 handle(ctx);
154 return 0;
155}
156
157SEC("?fexit/bpf_get_numa_node_id")
158int bench_trigger_fexit(void *ctx)
159{
160 handle(ctx);
161 return 0;
162}
163
164SEC("?fmod_ret/bpf_modify_return_test_tp")
165int bench_trigger_fmodret(void *ctx)
166{
167 handle(ctx);
168 return -22;
169}
170
171SEC("?tp/bpf_test_run/bpf_trigger_tp")
172int bench_trigger_tp(void *ctx)
173{
174 handle(ctx);
175 return 0;
176}
177
178SEC("?raw_tp/bpf_trigger_tp")
179int bench_trigger_rawtp(void *ctx)
180{
181 handle(ctx);
182 return 0;
183}
184
185SEC("?usdt")
186int bench_trigger_usdt(void *ctx)
187{
188 inc_counter();
189 return 0;
190}