Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2
3#include <vmlinux.h>
4#include <bpf/bpf_helpers.h>
5#include "bpf_misc.h"
6#include "bpf_experimental.h"
7
8/* From include/linux/filter.h */
9#define MAX_BPF_STACK 512
10
11#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64)
12
13struct elem {
14 struct bpf_timer t;
15 char pad[256];
16};
17
18struct {
19 __uint(type, BPF_MAP_TYPE_ARRAY);
20 __uint(max_entries, 1);
21 __type(key, int);
22 __type(value, struct elem);
23} array SEC(".maps");
24
25SEC("kprobe")
26__description("Private stack, single prog")
27__success
28__arch_x86_64
29__jited(" movabsq $0x{{.*}}, %r9")
30__jited(" addq %gs:{{.*}}, %r9")
31__jited(" movl $0x2a, %edi")
32__jited(" movq %rdi, -0x100(%r9)")
33__arch_arm64
34__jited(" stp x25, x27, [sp, {{.*}}]!")
35__jited(" mov x27, {{.*}}")
36__jited(" movk x27, {{.*}}, lsl #16")
37__jited(" movk x27, {{.*}}")
38__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
39__jited(" add x27, x27, x10")
40__jited(" add x25, x27, {{.*}}")
41__jited(" mov x0, #0x2a")
42__jited(" str x0, [x27]")
43__jited("...")
44__jited(" ldp x25, x27, [sp], {{.*}}")
45__naked void private_stack_single_prog(void)
46{
47 asm volatile (" \
48 r1 = 42; \
49 *(u64 *)(r10 - 256) = r1; \
50 r0 = 0; \
51 exit; \
52" ::: __clobber_all);
53}
54
55SEC("raw_tp")
56__description("No private stack")
57__success
58__arch_x86_64
59__jited(" subq $0x8, %rsp")
60__arch_arm64
61__jited(" mov x25, sp")
62__jited(" sub sp, sp, #0x10")
63__naked void no_private_stack_nested(void)
64{
65 asm volatile (" \
66 r1 = 42; \
67 *(u64 *)(r10 - 8) = r1; \
68 r0 = 0; \
69 exit; \
70" ::: __clobber_all);
71}
72
73__used
74__naked static void cumulative_stack_depth_subprog(void)
75{
76 asm volatile (" \
77 r1 = 41; \
78 *(u64 *)(r10 - 32) = r1; \
79 call %[bpf_get_smp_processor_id]; \
80 exit; \
81" :
82 : __imm(bpf_get_smp_processor_id)
83 : __clobber_all);
84}
85
86SEC("kprobe")
87__description("Private stack, subtree > MAX_BPF_STACK")
88__success
89__arch_x86_64
90/* private stack fp for the main prog */
91__jited(" movabsq $0x{{.*}}, %r9")
92__jited(" addq %gs:{{.*}}, %r9")
93__jited(" movl $0x2a, %edi")
94__jited(" movq %rdi, -0x200(%r9)")
95__jited(" pushq %r9")
96__jited(" callq 0x{{.*}}")
97__jited(" popq %r9")
98__jited(" xorl %eax, %eax")
99__arch_arm64
100__jited(" stp x25, x27, [sp, {{.*}}]!")
101__jited(" mov x27, {{.*}}")
102__jited(" movk x27, {{.*}}, lsl #16")
103__jited(" movk x27, {{.*}}")
104__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
105__jited(" add x27, x27, x10")
106__jited(" add x25, x27, {{.*}}")
107__jited(" mov x0, #0x2a")
108__jited(" str x0, [x27]")
109__jited(" bl {{.*}}")
110__jited("...")
111__jited(" ldp x25, x27, [sp], {{.*}}")
112__naked void private_stack_nested_1(void)
113{
114 asm volatile (" \
115 r1 = 42; \
116 *(u64 *)(r10 - %[max_bpf_stack]) = r1; \
117 call cumulative_stack_depth_subprog; \
118 r0 = 0; \
119 exit; \
120" :
121 : __imm_const(max_bpf_stack, MAX_BPF_STACK)
122 : __clobber_all);
123}
124
125__naked __noinline __used
126static unsigned long loop_callback(void)
127{
128 asm volatile (" \
129 call %[bpf_get_prandom_u32]; \
130 r1 = 42; \
131 *(u64 *)(r10 - 512) = r1; \
132 call cumulative_stack_depth_subprog; \
133 r0 = 0; \
134 exit; \
135" :
136 : __imm(bpf_get_prandom_u32)
137 : __clobber_common);
138}
139
140SEC("raw_tp")
141__description("Private stack, callback")
142__success
143__arch_x86_64
144/* for func loop_callback */
145__jited("func #1")
146__jited(" endbr64")
147__jited(" nopl (%rax,%rax)")
148__jited(" nopl (%rax)")
149__jited(" pushq %rbp")
150__jited(" movq %rsp, %rbp")
151__jited(" endbr64")
152__jited(" movabsq $0x{{.*}}, %r9")
153__jited(" addq %gs:{{.*}}, %r9")
154__jited(" pushq %r9")
155__jited(" callq")
156__jited(" popq %r9")
157__jited(" movl $0x2a, %edi")
158__jited(" movq %rdi, -0x200(%r9)")
159__jited(" pushq %r9")
160__jited(" callq")
161__jited(" popq %r9")
162__arch_arm64
163__jited("func #1")
164__jited("...")
165__jited(" stp x25, x27, [sp, {{.*}}]!")
166__jited(" mov x27, {{.*}}")
167__jited(" movk x27, {{.*}}, lsl #16")
168__jited(" movk x27, {{.*}}")
169__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
170__jited(" add x27, x27, x10")
171__jited(" add x25, x27, {{.*}}")
172__jited(" bl 0x{{.*}}")
173__jited(" mov x7, x0")
174__jited(" mov x0, #0x2a")
175__jited(" str x0, [x27]")
176__jited(" bl 0x{{.*}}")
177__jited(" mov x7, x0")
178__jited(" mov x7, #0x0")
179__jited(" ldp x25, x27, [sp], {{.*}}")
180__naked void private_stack_callback(void)
181{
182 asm volatile (" \
183 r1 = 1; \
184 r2 = %[loop_callback]; \
185 r3 = 0; \
186 r4 = 0; \
187 call %[bpf_loop]; \
188 r0 = 0; \
189 exit; \
190" :
191 : __imm_ptr(loop_callback),
192 __imm(bpf_loop)
193 : __clobber_common);
194}
195
196SEC("fentry/bpf_fentry_test9")
197__description("Private stack, exception in main prog")
198__success __retval(0)
199__arch_x86_64
200__jited(" pushq %r9")
201__jited(" callq")
202__jited(" popq %r9")
203__arch_arm64
204__jited(" stp x29, x30, [sp, #-0x10]!")
205__jited(" mov x29, sp")
206__jited(" stp xzr, x26, [sp, #-0x10]!")
207__jited(" mov x26, sp")
208__jited(" stp x19, x20, [sp, #-0x10]!")
209__jited(" stp x21, x22, [sp, #-0x10]!")
210__jited(" stp x23, x24, [sp, #-0x10]!")
211__jited(" stp x25, x26, [sp, #-0x10]!")
212__jited(" stp x27, x28, [sp, #-0x10]!")
213__jited(" mov x27, {{.*}}")
214__jited(" movk x27, {{.*}}, lsl #16")
215__jited(" movk x27, {{.*}}")
216__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
217__jited(" add x27, x27, x10")
218__jited(" add x25, x27, {{.*}}")
219__jited(" mov x0, #0x2a")
220__jited(" str x0, [x27]")
221__jited(" mov x0, #0x0")
222__jited(" bl 0x{{.*}}")
223__jited(" mov x7, x0")
224__jited(" ldp x27, x28, [sp], #0x10")
225int private_stack_exception_main_prog(void)
226{
227 asm volatile (" \
228 r1 = 42; \
229 *(u64 *)(r10 - 512) = r1; \
230" ::: __clobber_common);
231
232 bpf_throw(0);
233 return 0;
234}
235
236__used static int subprog_exception(void)
237{
238 bpf_throw(0);
239 return 0;
240}
241
242SEC("fentry/bpf_fentry_test9")
243__description("Private stack, exception in subprog")
244__success __retval(0)
245__arch_x86_64
246__jited(" movq %rdi, -0x200(%r9)")
247__jited(" pushq %r9")
248__jited(" callq")
249__jited(" popq %r9")
250__arch_arm64
251__jited(" stp x27, x28, [sp, #-0x10]!")
252__jited(" mov x27, {{.*}}")
253__jited(" movk x27, {{.*}}, lsl #16")
254__jited(" movk x27, {{.*}}")
255__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
256__jited(" add x27, x27, x10")
257__jited(" add x25, x27, {{.*}}")
258__jited(" mov x0, #0x2a")
259__jited(" str x0, [x27]")
260__jited(" bl 0x{{.*}}")
261__jited(" mov x7, x0")
262__jited(" ldp x27, x28, [sp], #0x10")
263int private_stack_exception_sub_prog(void)
264{
265 asm volatile (" \
266 r1 = 42; \
267 *(u64 *)(r10 - 512) = r1; \
268 call subprog_exception; \
269" ::: __clobber_common);
270
271 return 0;
272}
273
274int glob;
275__noinline static void subprog2(int *val)
276{
277 glob += val[0] * 2;
278}
279
280__noinline static void subprog1(int *val)
281{
282 int tmp[64] = {};
283
284 tmp[0] = *val;
285 subprog2(tmp);
286}
287
288__noinline static int timer_cb1(void *map, int *key, struct bpf_timer *timer)
289{
290 subprog1(key);
291 return 0;
292}
293
294__noinline static int timer_cb2(void *map, int *key, struct bpf_timer *timer)
295{
296 return 0;
297}
298
299SEC("fentry/bpf_fentry_test9")
300__description("Private stack, async callback, not nested")
301__success __retval(0)
302__arch_x86_64
303__jited(" movabsq $0x{{.*}}, %r9")
304__arch_arm64
305__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
306__jited(" add x27, x27, x10")
307__jited(" add x25, x27, {{.*}}")
308int private_stack_async_callback_1(void)
309{
310 struct bpf_timer *arr_timer;
311 int array_key = 0;
312
313 arr_timer = bpf_map_lookup_elem(&array, &array_key);
314 if (!arr_timer)
315 return 0;
316
317 bpf_timer_init(arr_timer, &array, 1);
318 bpf_timer_set_callback(arr_timer, timer_cb2);
319 bpf_timer_start(arr_timer, 0, 0);
320 subprog1(&array_key);
321 return 0;
322}
323
324SEC("fentry/bpf_fentry_test9")
325__description("Private stack, async callback, potential nesting")
326__success __retval(0)
327__arch_x86_64
328__jited(" subq $0x100, %rsp")
329__arch_arm64
330__jited(" sub sp, sp, #0x100")
331int private_stack_async_callback_2(void)
332{
333 struct bpf_timer *arr_timer;
334 int array_key = 0;
335
336 arr_timer = bpf_map_lookup_elem(&array, &array_key);
337 if (!arr_timer)
338 return 0;
339
340 bpf_timer_init(arr_timer, &array, 1);
341 bpf_timer_set_callback(arr_timer, timer_cb1);
342 bpf_timer_start(arr_timer, 0, 0);
343 subprog1(&array_key);
344 return 0;
345}
346
347#else
348
349SEC("kprobe")
350__description("private stack is not supported, use a dummy test")
351__success
352int dummy_test(void)
353{
354 return 0;
355}
356
357#endif
358
359char _license[] SEC("license") = "GPL";