Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/bpf.h>
4#include <bpf/bpf_helpers.h>
5#include "bpf_misc.h"
6#include "bpf_arena_common.h"
7
8#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
9 (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
10 defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
11 defined(__TARGET_ARCH_loongarch)) && \
12 __clang_major__ >= 18
13
14struct {
15 __uint(type, BPF_MAP_TYPE_ARENA);
16 __uint(map_flags, BPF_F_MMAPABLE);
17 __uint(max_entries, 1);
18} arena SEC(".maps");
19
20SEC("socket")
21__description("LDSX, S8")
22__success __success_unpriv __retval(-2)
23__naked void ldsx_s8(void)
24{
25 asm volatile (
26 "r1 = 0x3fe;"
27 "*(u64 *)(r10 - 8) = r1;"
28#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
29 "r0 = *(s8 *)(r10 - 8);"
30#else
31 "r0 = *(s8 *)(r10 - 1);"
32#endif
33 "exit;"
34 ::: __clobber_all);
35}
36
37SEC("socket")
38__description("LDSX, S16")
39__success __success_unpriv __retval(-2)
40__naked void ldsx_s16(void)
41{
42 asm volatile (
43 "r1 = 0x3fffe;"
44 "*(u64 *)(r10 - 8) = r1;"
45#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
46 "r0 = *(s16 *)(r10 - 8);"
47#else
48 "r0 = *(s16 *)(r10 - 2);"
49#endif
50 "exit;"
51 ::: __clobber_all);
52}
53
54SEC("socket")
55__description("LDSX, S32")
56__success __success_unpriv __retval(-1)
57__naked void ldsx_s32(void)
58{
59 asm volatile (
60 "r1 = 0xfffffffe;"
61 "*(u64 *)(r10 - 8) = r1;"
62#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
63 "r0 = *(s32 *)(r10 - 8);"
64#else
65 "r0 = *(s32 *)(r10 - 4);"
66#endif
67 "r0 >>= 1;"
68 "exit;"
69 ::: __clobber_all);
70}
71
72SEC("socket")
73__description("LDSX, S8 range checking, privileged")
74__log_level(2) __success __retval(1)
75__msg("R1=scalar(smin=smin32=-128,smax=smax32=127)")
76__naked void ldsx_s8_range_priv(void)
77{
78 asm volatile (
79 "call %[bpf_get_prandom_u32];"
80 "*(u64 *)(r10 - 8) = r0;"
81#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
82 "r1 = *(s8 *)(r10 - 8);"
83#else
84 "r1 = *(s8 *)(r10 - 1);"
85#endif
86 /* r1 with s8 range */
87 "if r1 s> 0x7f goto l0_%=;"
88 "if r1 s< -0x80 goto l0_%=;"
89 "r0 = 1;"
90"l1_%=:"
91 "exit;"
92"l0_%=:"
93 "r0 = 2;"
94 "goto l1_%=;"
95 :
96 : __imm(bpf_get_prandom_u32)
97 : __clobber_all);
98}
99
100SEC("socket")
101__description("LDSX, S16 range checking")
102__success __success_unpriv __retval(1)
103__naked void ldsx_s16_range(void)
104{
105 asm volatile (
106 "call %[bpf_get_prandom_u32];"
107 "*(u64 *)(r10 - 8) = r0;"
108#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
109 "r1 = *(s16 *)(r10 - 8);"
110#else
111 "r1 = *(s16 *)(r10 - 2);"
112#endif
113 /* r1 with s16 range */
114 "if r1 s> 0x7fff goto l0_%=;"
115 "if r1 s< -0x8000 goto l0_%=;"
116 "r0 = 1;"
117"l1_%=:"
118 "exit;"
119"l0_%=:"
120 "r0 = 2;"
121 "goto l1_%=;"
122 :
123 : __imm(bpf_get_prandom_u32)
124 : __clobber_all);
125}
126
127SEC("socket")
128__description("LDSX, S32 range checking")
129__success __success_unpriv __retval(1)
130__naked void ldsx_s32_range(void)
131{
132 asm volatile (
133 "call %[bpf_get_prandom_u32];"
134 "*(u64 *)(r10 - 8) = r0;"
135#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
136 "r1 = *(s32 *)(r10 - 8);"
137#else
138 "r1 = *(s32 *)(r10 - 4);"
139#endif
140 /* r1 with s16 range */
141 "if r1 s> 0x7fffFFFF goto l0_%=;"
142 "if r1 s< -0x80000000 goto l0_%=;"
143 "r0 = 1;"
144"l1_%=:"
145 "exit;"
146"l0_%=:"
147 "r0 = 2;"
148 "goto l1_%=;"
149 :
150 : __imm(bpf_get_prandom_u32)
151 : __clobber_all);
152}
153
154SEC("xdp")
155__description("LDSX, xdp s32 xdp_md->data")
156__failure __msg("invalid bpf_context access")
157__naked void ldsx_ctx_1(void)
158{
159 asm volatile (
160 "r2 = *(s32 *)(r1 + %[xdp_md_data]);"
161 "r0 = 0;"
162 "exit;"
163 :
164 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data))
165 : __clobber_all);
166}
167
168SEC("xdp")
169__description("LDSX, xdp s32 xdp_md->data_end")
170__failure __msg("invalid bpf_context access")
171__naked void ldsx_ctx_2(void)
172{
173 asm volatile (
174 "r2 = *(s32 *)(r1 + %[xdp_md_data_end]);"
175 "r0 = 0;"
176 "exit;"
177 :
178 : __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
179 : __clobber_all);
180}
181
182SEC("xdp")
183__description("LDSX, xdp s32 xdp_md->data_meta")
184__failure __msg("invalid bpf_context access")
185__naked void ldsx_ctx_3(void)
186{
187 asm volatile (
188 "r2 = *(s32 *)(r1 + %[xdp_md_data_meta]);"
189 "r0 = 0;"
190 "exit;"
191 :
192 : __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
193 : __clobber_all);
194}
195
196SEC("tcx/ingress")
197__description("LDSX, tcx s32 __sk_buff->data")
198__failure __msg("invalid bpf_context access")
199__naked void ldsx_ctx_4(void)
200{
201 asm volatile (
202 "r2 = *(s32 *)(r1 + %[sk_buff_data]);"
203 "r0 = 0;"
204 "exit;"
205 :
206 : __imm_const(sk_buff_data, offsetof(struct __sk_buff, data))
207 : __clobber_all);
208}
209
210SEC("tcx/ingress")
211__description("LDSX, tcx s32 __sk_buff->data_end")
212__failure __msg("invalid bpf_context access")
213__naked void ldsx_ctx_5(void)
214{
215 asm volatile (
216 "r2 = *(s32 *)(r1 + %[sk_buff_data_end]);"
217 "r0 = 0;"
218 "exit;"
219 :
220 : __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end))
221 : __clobber_all);
222}
223
224SEC("tcx/ingress")
225__description("LDSX, tcx s32 __sk_buff->data_meta")
226__failure __msg("invalid bpf_context access")
227__naked void ldsx_ctx_6(void)
228{
229 asm volatile (
230 "r2 = *(s32 *)(r1 + %[sk_buff_data_meta]);"
231 "r0 = 0;"
232 "exit;"
233 :
234 : __imm_const(sk_buff_data_meta, offsetof(struct __sk_buff, data_meta))
235 : __clobber_all);
236}
237
238SEC("flow_dissector")
239__description("LDSX, flow_dissector s32 __sk_buff->data")
240__failure __msg("invalid bpf_context access")
241__naked void ldsx_ctx_7(void)
242{
243 asm volatile (
244 "r2 = *(s32 *)(r1 + %[sk_buff_data]);"
245 "r0 = 0;"
246 "exit;"
247 :
248 : __imm_const(sk_buff_data, offsetof(struct __sk_buff, data))
249 : __clobber_all);
250}
251
252SEC("flow_dissector")
253__description("LDSX, flow_dissector s32 __sk_buff->data_end")
254__failure __msg("invalid bpf_context access")
255__naked void ldsx_ctx_8(void)
256{
257 asm volatile (
258 "r2 = *(s32 *)(r1 + %[sk_buff_data_end]);"
259 "r0 = 0;"
260 "exit;"
261 :
262 : __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end))
263 : __clobber_all);
264}
265
266SEC("syscall")
267__description("Arena LDSX Disasm")
268__success
269__arch_x86_64
270__jited("movslq 0x10(%rax,%r12), %r14")
271__jited("movswq 0x18(%rax,%r12), %r14")
272__jited("movsbq 0x20(%rax,%r12), %r14")
273__jited("movslq 0x10(%rdi,%r12), %r15")
274__jited("movswq 0x18(%rdi,%r12), %r15")
275__jited("movsbq 0x20(%rdi,%r12), %r15")
276__arch_arm64
277__jited("add x11, x7, x28")
278__jited("ldrsw x21, [x11, #0x10]")
279__jited("add x11, x7, x28")
280__jited("ldrsh x21, [x11, #0x18]")
281__jited("add x11, x7, x28")
282__jited("ldrsb x21, [x11, #0x20]")
283__jited("add x11, x0, x28")
284__jited("ldrsw x22, [x11, #0x10]")
285__jited("add x11, x0, x28")
286__jited("ldrsh x22, [x11, #0x18]")
287__jited("add x11, x0, x28")
288__jited("ldrsb x22, [x11, #0x20]")
289__naked void arena_ldsx_disasm(void *ctx)
290{
291 asm volatile (
292 "r1 = %[arena] ll;"
293 "r2 = 0;"
294 "r3 = 1;"
295 "r4 = %[numa_no_node];"
296 "r5 = 0;"
297 "call %[bpf_arena_alloc_pages];"
298 "r0 = addr_space_cast(r0, 0x0, 0x1);"
299 "r1 = r0;"
300 "r8 = *(s32 *)(r0 + 16);"
301 "r8 = *(s16 *)(r0 + 24);"
302 "r8 = *(s8 *)(r0 + 32);"
303 "r9 = *(s32 *)(r1 + 16);"
304 "r9 = *(s16 *)(r1 + 24);"
305 "r9 = *(s8 *)(r1 + 32);"
306 "r0 = 0;"
307 "exit;"
308 :: __imm(bpf_arena_alloc_pages),
309 __imm_addr(arena),
310 __imm_const(numa_no_node, NUMA_NO_NODE)
311 : __clobber_all
312 );
313}
314
315SEC("syscall")
316__description("Arena LDSX Exception")
317__success __retval(0)
318__arch_x86_64
319__arch_arm64
320__naked void arena_ldsx_exception(void *ctx)
321{
322 asm volatile (
323 "r1 = %[arena] ll;"
324 "r0 = 0xdeadbeef;"
325 "r0 = addr_space_cast(r0, 0x0, 0x1);"
326 "r1 = 0x3fe;"
327 "*(u64 *)(r0 + 0) = r1;"
328 "r0 = *(s8 *)(r0 + 0);"
329 "exit;"
330 :
331 : __imm_addr(arena)
332 : __clobber_all
333 );
334}
335
336SEC("syscall")
337__description("Arena LDSX, S8")
338__success __retval(-1)
339__arch_x86_64
340__arch_arm64
341__naked void arena_ldsx_s8(void *ctx)
342{
343 asm volatile (
344 "r1 = %[arena] ll;"
345 "r2 = 0;"
346 "r3 = 1;"
347 "r4 = %[numa_no_node];"
348 "r5 = 0;"
349 "call %[bpf_arena_alloc_pages];"
350 "r0 = addr_space_cast(r0, 0x0, 0x1);"
351 "r1 = 0x3fe;"
352 "*(u64 *)(r0 + 0) = r1;"
353#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
354 "r0 = *(s8 *)(r0 + 0);"
355#else
356 "r0 = *(s8 *)(r0 + 7);"
357#endif
358 "r0 >>= 1;"
359 "exit;"
360 :: __imm(bpf_arena_alloc_pages),
361 __imm_addr(arena),
362 __imm_const(numa_no_node, NUMA_NO_NODE)
363 : __clobber_all
364 );
365}
366
367SEC("syscall")
368__description("Arena LDSX, S16")
369__success __retval(-1)
370__arch_x86_64
371__arch_arm64
372__naked void arena_ldsx_s16(void *ctx)
373{
374 asm volatile (
375 "r1 = %[arena] ll;"
376 "r2 = 0;"
377 "r3 = 1;"
378 "r4 = %[numa_no_node];"
379 "r5 = 0;"
380 "call %[bpf_arena_alloc_pages];"
381 "r0 = addr_space_cast(r0, 0x0, 0x1);"
382 "r1 = 0x3fffe;"
383 "*(u64 *)(r0 + 0) = r1;"
384#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
385 "r0 = *(s16 *)(r0 + 0);"
386#else
387 "r0 = *(s16 *)(r0 + 6);"
388#endif
389 "r0 >>= 1;"
390 "exit;"
391 :: __imm(bpf_arena_alloc_pages),
392 __imm_addr(arena),
393 __imm_const(numa_no_node, NUMA_NO_NODE)
394 : __clobber_all
395 );
396}
397
398SEC("syscall")
399__description("Arena LDSX, S32")
400__success __retval(-1)
401__arch_x86_64
402__arch_arm64
403__naked void arena_ldsx_s32(void *ctx)
404{
405 asm volatile (
406 "r1 = %[arena] ll;"
407 "r2 = 0;"
408 "r3 = 1;"
409 "r4 = %[numa_no_node];"
410 "r5 = 0;"
411 "call %[bpf_arena_alloc_pages];"
412 "r0 = addr_space_cast(r0, 0x0, 0x1);"
413 "r1 = 0xfffffffe;"
414 "*(u64 *)(r0 + 0) = r1;"
415#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
416 "r0 = *(s32 *)(r0 + 0);"
417#else
418 "r0 = *(s32 *)(r0 + 4);"
419#endif
420 "r0 >>= 1;"
421 "exit;"
422 :: __imm(bpf_arena_alloc_pages),
423 __imm_addr(arena),
424 __imm_const(numa_no_node, NUMA_NO_NODE)
425 : __clobber_all
426 );
427}
428
429/* to retain debug info for BTF generation */
430void kfunc_root(void)
431{
432 bpf_arena_alloc_pages(0, 0, 0, 0, 0);
433}
434
435#else
436
437SEC("socket")
438__description("cpuv4 is not supported by compiler or jit, use a dummy test")
439__success
440int dummy_test(void)
441{
442 return 0;
443}
444
445#endif
446
447char _license[] SEC("license") = "GPL";