Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Converted from tools/testing/selftests/bpf/verifier/sock.c */
3
4#include "vmlinux.h"
5#include <bpf/bpf_helpers.h>
6#include "bpf_misc.h"
7
8struct {
9 __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
10 __uint(max_entries, 1);
11 __type(key, __u32);
12 __type(value, __u64);
13} map_reuseport_array SEC(".maps");
14
15struct {
16 __uint(type, BPF_MAP_TYPE_SOCKHASH);
17 __uint(max_entries, 1);
18 __type(key, int);
19 __type(value, int);
20} map_sockhash SEC(".maps");
21
22struct {
23 __uint(type, BPF_MAP_TYPE_SOCKMAP);
24 __uint(max_entries, 1);
25 __type(key, int);
26 __type(value, int);
27} map_sockmap SEC(".maps");
28
29struct {
30 __uint(type, BPF_MAP_TYPE_XSKMAP);
31 __uint(max_entries, 1);
32 __type(key, int);
33 __type(value, int);
34} map_xskmap SEC(".maps");
35
36struct val {
37 int cnt;
38 struct bpf_spin_lock l;
39};
40
41struct {
42 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
43 __uint(max_entries, 0);
44 __type(key, int);
45 __type(value, struct val);
46 __uint(map_flags, BPF_F_NO_PREALLOC);
47} sk_storage_map SEC(".maps");
48
49struct {
50 __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
51 __uint(max_entries, 1);
52 __uint(key_size, sizeof(__u32));
53 __uint(value_size, sizeof(__u32));
54} jmp_table SEC(".maps");
55
56SEC("cgroup/skb")
57__description("skb->sk: no NULL check")
58__failure __msg("invalid mem access 'sock_common_or_null'")
59__failure_unpriv
60__naked void skb_sk_no_null_check(void)
61{
62 asm volatile (" \
63 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
64 r0 = *(u32*)(r1 + 0); \
65 r0 = 0; \
66 exit; \
67" :
68 : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
69 : __clobber_all);
70}
71
72SEC("cgroup/skb")
73__description("skb->sk: sk->family [non fullsock field]")
74__success __success_unpriv __retval(0)
75__naked void sk_family_non_fullsock_field_1(void)
76{
77 asm volatile (" \
78 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
79 if r1 != 0 goto l0_%=; \
80 r0 = 0; \
81 exit; \
82l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_family]); \
83 r0 = 0; \
84 exit; \
85" :
86 : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
87 __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
88 : __clobber_all);
89}
90
91SEC("cgroup/skb")
92__description("skb->sk: sk->type [fullsock field]")
93__failure __msg("invalid sock_common access")
94__failure_unpriv
95__naked void sk_sk_type_fullsock_field_1(void)
96{
97 asm volatile (" \
98 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
99 if r1 != 0 goto l0_%=; \
100 r0 = 0; \
101 exit; \
102l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_type]); \
103 r0 = 0; \
104 exit; \
105" :
106 : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
107 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
108 : __clobber_all);
109}
110
111SEC("cgroup/skb")
112__description("bpf_sk_fullsock(skb->sk): no !skb->sk check")
113__failure __msg("type=sock_common_or_null expected=sock_common")
114__failure_unpriv
115__naked void sk_no_skb_sk_check_1(void)
116{
117 asm volatile (" \
118 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
119 call %[bpf_sk_fullsock]; \
120 r0 = 0; \
121 exit; \
122" :
123 : __imm(bpf_sk_fullsock),
124 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
125 : __clobber_all);
126}
127
128SEC("cgroup/skb")
129__description("sk_fullsock(skb->sk): no NULL check on ret")
130__failure __msg("invalid mem access 'sock_or_null'")
131__failure_unpriv
132__naked void no_null_check_on_ret_1(void)
133{
134 asm volatile (" \
135 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
136 if r1 != 0 goto l0_%=; \
137 r0 = 0; \
138 exit; \
139l0_%=: call %[bpf_sk_fullsock]; \
140 r0 = *(u32*)(r0 + %[bpf_sock_type]); \
141 r0 = 0; \
142 exit; \
143" :
144 : __imm(bpf_sk_fullsock),
145 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
146 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
147 : __clobber_all);
148}
149
150SEC("cgroup/skb")
151__description("sk_fullsock(skb->sk): sk->type [fullsock field]")
152__success __success_unpriv __retval(0)
153__naked void sk_sk_type_fullsock_field_2(void)
154{
155 asm volatile (" \
156 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
157 if r1 != 0 goto l0_%=; \
158 r0 = 0; \
159 exit; \
160l0_%=: call %[bpf_sk_fullsock]; \
161 if r0 != 0 goto l1_%=; \
162 r0 = 0; \
163 exit; \
164l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_type]); \
165 r0 = 0; \
166 exit; \
167" :
168 : __imm(bpf_sk_fullsock),
169 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
170 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
171 : __clobber_all);
172}
173
174SEC("cgroup/skb")
175__description("sk_fullsock(skb->sk): sk->family [non fullsock field]")
176__success __success_unpriv __retval(0)
177__naked void sk_family_non_fullsock_field_2(void)
178{
179 asm volatile (" \
180 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
181 if r1 != 0 goto l0_%=; \
182 r0 = 0; \
183 exit; \
184l0_%=: call %[bpf_sk_fullsock]; \
185 if r0 != 0 goto l1_%=; \
186 exit; \
187l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_family]); \
188 r0 = 0; \
189 exit; \
190" :
191 : __imm(bpf_sk_fullsock),
192 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
193 __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
194 : __clobber_all);
195}
196
197SEC("cgroup/skb")
198__description("sk_fullsock(skb->sk): sk->state [narrow load]")
199__success __success_unpriv __retval(0)
200__naked void sk_sk_state_narrow_load(void)
201{
202 asm volatile (" \
203 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
204 if r1 != 0 goto l0_%=; \
205 r0 = 0; \
206 exit; \
207l0_%=: call %[bpf_sk_fullsock]; \
208 if r0 != 0 goto l1_%=; \
209 r0 = 0; \
210 exit; \
211l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_state]); \
212 r0 = 0; \
213 exit; \
214" :
215 : __imm(bpf_sk_fullsock),
216 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
217 __imm_const(bpf_sock_state, offsetof(struct bpf_sock, state))
218 : __clobber_all);
219}
220
221SEC("cgroup/skb")
222__description("sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)")
223__success __success_unpriv __retval(0)
224__naked void port_word_load_backward_compatibility(void)
225{
226 asm volatile (" \
227 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
228 if r1 != 0 goto l0_%=; \
229 r0 = 0; \
230 exit; \
231l0_%=: call %[bpf_sk_fullsock]; \
232 if r0 != 0 goto l1_%=; \
233 r0 = 0; \
234 exit; \
235l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_dst_port]); \
236 r0 = 0; \
237 exit; \
238" :
239 : __imm(bpf_sk_fullsock),
240 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
241 __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
242 : __clobber_all);
243}
244
245SEC("cgroup/skb")
246__description("sk_fullsock(skb->sk): sk->dst_port [half load]")
247__success __success_unpriv __retval(0)
248__naked void sk_dst_port_half_load(void)
249{
250 asm volatile (" \
251 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
252 if r1 != 0 goto l0_%=; \
253 r0 = 0; \
254 exit; \
255l0_%=: call %[bpf_sk_fullsock]; \
256 if r0 != 0 goto l1_%=; \
257 r0 = 0; \
258 exit; \
259l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port]); \
260 r0 = 0; \
261 exit; \
262" :
263 : __imm(bpf_sk_fullsock),
264 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
265 __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
266 : __clobber_all);
267}
268
269SEC("cgroup/skb")
270__description("sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)")
271__failure __msg("invalid sock access")
272__failure_unpriv
273__naked void dst_port_half_load_invalid_1(void)
274{
275 asm volatile (" \
276 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
277 if r1 != 0 goto l0_%=; \
278 r0 = 0; \
279 exit; \
280l0_%=: call %[bpf_sk_fullsock]; \
281 if r0 != 0 goto l1_%=; \
282 r0 = 0; \
283 exit; \
284l1_%=: r0 = *(u16*)(r0 + %[__imm_0]); \
285 r0 = 0; \
286 exit; \
287" :
288 : __imm(bpf_sk_fullsock),
289 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
290 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
291 : __clobber_all);
292}
293
294SEC("cgroup/skb")
295__description("sk_fullsock(skb->sk): sk->dst_port [byte load]")
296__success __success_unpriv __retval(0)
297__naked void sk_dst_port_byte_load(void)
298{
299 asm volatile (" \
300 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
301 if r1 != 0 goto l0_%=; \
302 r0 = 0; \
303 exit; \
304l0_%=: call %[bpf_sk_fullsock]; \
305 if r0 != 0 goto l1_%=; \
306 r0 = 0; \
307 exit; \
308l1_%=: r2 = *(u8*)(r0 + %[bpf_sock_dst_port]); \
309 r2 = *(u8*)(r0 + %[__imm_0]); \
310 r0 = 0; \
311 exit; \
312" :
313 : __imm(bpf_sk_fullsock),
314 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 1),
315 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
316 __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
317 : __clobber_all);
318}
319
320SEC("cgroup/skb")
321__description("sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)")
322__failure __msg("invalid sock access")
323__failure_unpriv
324__naked void dst_port_byte_load_invalid(void)
325{
326 asm volatile (" \
327 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
328 if r1 != 0 goto l0_%=; \
329 r0 = 0; \
330 exit; \
331l0_%=: call %[bpf_sk_fullsock]; \
332 if r0 != 0 goto l1_%=; \
333 r0 = 0; \
334 exit; \
335l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \
336 r0 = 0; \
337 exit; \
338" :
339 : __imm(bpf_sk_fullsock),
340 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
341 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
342 : __clobber_all);
343}
344
345SEC("cgroup/skb")
346__description("sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)")
347__failure __msg("invalid sock access")
348__failure_unpriv
349__naked void dst_port_half_load_invalid_2(void)
350{
351 asm volatile (" \
352 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
353 if r1 != 0 goto l0_%=; \
354 r0 = 0; \
355 exit; \
356l0_%=: call %[bpf_sk_fullsock]; \
357 if r0 != 0 goto l1_%=; \
358 r0 = 0; \
359 exit; \
360l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port__end]); \
361 r0 = 0; \
362 exit; \
363" :
364 : __imm(bpf_sk_fullsock),
365 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
366 __imm_const(bpf_sock_dst_port__end, offsetofend(struct bpf_sock, dst_port))
367 : __clobber_all);
368}
369
370SEC("cgroup/skb")
371__description("sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]")
372__success __success_unpriv __retval(0)
373__naked void dst_ip6_load_2nd_byte(void)
374{
375 asm volatile (" \
376 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
377 if r1 != 0 goto l0_%=; \
378 r0 = 0; \
379 exit; \
380l0_%=: call %[bpf_sk_fullsock]; \
381 if r0 != 0 goto l1_%=; \
382 r0 = 0; \
383 exit; \
384l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \
385 r0 = 0; \
386 exit; \
387" :
388 : __imm(bpf_sk_fullsock),
389 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1),
390 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
391 : __clobber_all);
392}
393
394SEC("cgroup/skb")
395__description("sk_fullsock(skb->sk): sk->type [narrow load]")
396__success __success_unpriv __retval(0)
397__naked void sk_sk_type_narrow_load(void)
398{
399 asm volatile (" \
400 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
401 if r1 != 0 goto l0_%=; \
402 r0 = 0; \
403 exit; \
404l0_%=: call %[bpf_sk_fullsock]; \
405 if r0 != 0 goto l1_%=; \
406 r0 = 0; \
407 exit; \
408l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_type]); \
409 r0 = 0; \
410 exit; \
411" :
412 : __imm(bpf_sk_fullsock),
413 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
414 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
415 : __clobber_all);
416}
417
418SEC("cgroup/skb")
419__description("sk_fullsock(skb->sk): sk->protocol [narrow load]")
420__success __success_unpriv __retval(0)
421__naked void sk_sk_protocol_narrow_load(void)
422{
423 asm volatile (" \
424 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
425 if r1 != 0 goto l0_%=; \
426 r0 = 0; \
427 exit; \
428l0_%=: call %[bpf_sk_fullsock]; \
429 if r0 != 0 goto l1_%=; \
430 r0 = 0; \
431 exit; \
432l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_protocol]); \
433 r0 = 0; \
434 exit; \
435" :
436 : __imm(bpf_sk_fullsock),
437 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
438 __imm_const(bpf_sock_protocol, offsetof(struct bpf_sock, protocol))
439 : __clobber_all);
440}
441
442SEC("cgroup/skb")
443__description("sk_fullsock(skb->sk): beyond last field")
444__failure __msg("invalid sock access")
445__failure_unpriv
446__naked void skb_sk_beyond_last_field_1(void)
447{
448 asm volatile (" \
449 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
450 if r1 != 0 goto l0_%=; \
451 r0 = 0; \
452 exit; \
453l0_%=: call %[bpf_sk_fullsock]; \
454 if r0 != 0 goto l1_%=; \
455 r0 = 0; \
456 exit; \
457l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_rx_queue_mapping__end]);\
458 r0 = 0; \
459 exit; \
460" :
461 : __imm(bpf_sk_fullsock),
462 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
463 __imm_const(bpf_sock_rx_queue_mapping__end, offsetofend(struct bpf_sock, rx_queue_mapping))
464 : __clobber_all);
465}
466
467SEC("cgroup/skb")
468__description("bpf_tcp_sock(skb->sk): no !skb->sk check")
469__failure __msg("type=sock_common_or_null expected=sock_common")
470__failure_unpriv
471__naked void sk_no_skb_sk_check_2(void)
472{
473 asm volatile (" \
474 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
475 call %[bpf_tcp_sock]; \
476 r0 = 0; \
477 exit; \
478" :
479 : __imm(bpf_tcp_sock),
480 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
481 : __clobber_all);
482}
483
484SEC("cgroup/skb")
485__description("bpf_tcp_sock(skb->sk): no NULL check on ret")
486__failure __msg("invalid mem access 'tcp_sock_or_null'")
487__failure_unpriv
488__naked void no_null_check_on_ret_2(void)
489{
490 asm volatile (" \
491 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
492 if r1 != 0 goto l0_%=; \
493 r0 = 0; \
494 exit; \
495l0_%=: call %[bpf_tcp_sock]; \
496 r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
497 r0 = 0; \
498 exit; \
499" :
500 : __imm(bpf_tcp_sock),
501 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
502 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
503 : __clobber_all);
504}
505
506SEC("cgroup/skb")
507__description("bpf_tcp_sock(skb->sk): tp->snd_cwnd")
508__success __success_unpriv __retval(0)
509__naked void skb_sk_tp_snd_cwnd_1(void)
510{
511 asm volatile (" \
512 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
513 if r1 != 0 goto l0_%=; \
514 r0 = 0; \
515 exit; \
516l0_%=: call %[bpf_tcp_sock]; \
517 if r0 != 0 goto l1_%=; \
518 exit; \
519l1_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
520 r0 = 0; \
521 exit; \
522" :
523 : __imm(bpf_tcp_sock),
524 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
525 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
526 : __clobber_all);
527}
528
529SEC("cgroup/skb")
530__description("bpf_tcp_sock(skb->sk): tp->bytes_acked")
531__success __success_unpriv __retval(0)
532__naked void skb_sk_tp_bytes_acked(void)
533{
534 asm volatile (" \
535 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
536 if r1 != 0 goto l0_%=; \
537 r0 = 0; \
538 exit; \
539l0_%=: call %[bpf_tcp_sock]; \
540 if r0 != 0 goto l1_%=; \
541 exit; \
542l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked]); \
543 r0 = 0; \
544 exit; \
545" :
546 : __imm(bpf_tcp_sock),
547 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
548 __imm_const(bpf_tcp_sock_bytes_acked, offsetof(struct bpf_tcp_sock, bytes_acked))
549 : __clobber_all);
550}
551
552SEC("cgroup/skb")
553__description("bpf_tcp_sock(skb->sk): beyond last field")
554__failure __msg("invalid tcp_sock access")
555__failure_unpriv
556__naked void skb_sk_beyond_last_field_2(void)
557{
558 asm volatile (" \
559 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
560 if r1 != 0 goto l0_%=; \
561 r0 = 0; \
562 exit; \
563l0_%=: call %[bpf_tcp_sock]; \
564 if r0 != 0 goto l1_%=; \
565 exit; \
566l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked__end]);\
567 r0 = 0; \
568 exit; \
569" :
570 : __imm(bpf_tcp_sock),
571 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
572 __imm_const(bpf_tcp_sock_bytes_acked__end, offsetofend(struct bpf_tcp_sock, bytes_acked))
573 : __clobber_all);
574}
575
576SEC("cgroup/skb")
577__description("bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd")
578__success __success_unpriv __retval(0)
579__naked void skb_sk_tp_snd_cwnd_2(void)
580{
581 asm volatile (" \
582 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
583 if r1 != 0 goto l0_%=; \
584 r0 = 0; \
585 exit; \
586l0_%=: call %[bpf_sk_fullsock]; \
587 if r0 != 0 goto l1_%=; \
588 exit; \
589l1_%=: r1 = r0; \
590 call %[bpf_tcp_sock]; \
591 if r0 != 0 goto l2_%=; \
592 exit; \
593l2_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
594 r0 = 0; \
595 exit; \
596" :
597 : __imm(bpf_sk_fullsock),
598 __imm(bpf_tcp_sock),
599 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
600 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
601 : __clobber_all);
602}
603
604SEC("tc")
605__description("bpf_sk_release(skb->sk)")
606__failure __msg("R1 must be referenced when passed to release function")
607__naked void bpf_sk_release_skb_sk(void)
608{
609 asm volatile (" \
610 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
611 if r1 == 0 goto l0_%=; \
612 call %[bpf_sk_release]; \
613l0_%=: r0 = 0; \
614 exit; \
615" :
616 : __imm(bpf_sk_release),
617 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
618 : __clobber_all);
619}
620
621SEC("tc")
622__description("bpf_sk_release(bpf_sk_fullsock(skb->sk))")
623__failure __msg("R1 must be referenced when passed to release function")
624__naked void bpf_sk_fullsock_skb_sk(void)
625{
626 asm volatile (" \
627 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
628 if r1 != 0 goto l0_%=; \
629 r0 = 0; \
630 exit; \
631l0_%=: call %[bpf_sk_fullsock]; \
632 if r0 != 0 goto l1_%=; \
633 exit; \
634l1_%=: r1 = r0; \
635 call %[bpf_sk_release]; \
636 r0 = 1; \
637 exit; \
638" :
639 : __imm(bpf_sk_fullsock),
640 __imm(bpf_sk_release),
641 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
642 : __clobber_all);
643}
644
645SEC("tc")
646__description("bpf_sk_release(bpf_tcp_sock(skb->sk))")
647__failure __msg("R1 must be referenced when passed to release function")
648__naked void bpf_tcp_sock_skb_sk(void)
649{
650 asm volatile (" \
651 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
652 if r1 != 0 goto l0_%=; \
653 r0 = 0; \
654 exit; \
655l0_%=: call %[bpf_tcp_sock]; \
656 if r0 != 0 goto l1_%=; \
657 exit; \
658l1_%=: r1 = r0; \
659 call %[bpf_sk_release]; \
660 r0 = 1; \
661 exit; \
662" :
663 : __imm(bpf_sk_release),
664 __imm(bpf_tcp_sock),
665 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
666 : __clobber_all);
667}
668
669SEC("tc")
670__description("sk_storage_get(map, skb->sk, NULL, 0): value == NULL")
671__success __retval(0)
672__naked void sk_null_0_value_null(void)
673{
674 asm volatile (" \
675 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
676 if r1 != 0 goto l0_%=; \
677 r0 = 0; \
678 exit; \
679l0_%=: call %[bpf_sk_fullsock]; \
680 if r0 != 0 goto l1_%=; \
681 r0 = 0; \
682 exit; \
683l1_%=: r4 = 0; \
684 r3 = 0; \
685 r2 = r0; \
686 r1 = %[sk_storage_map] ll; \
687 call %[bpf_sk_storage_get]; \
688 r0 = 0; \
689 exit; \
690" :
691 : __imm(bpf_sk_fullsock),
692 __imm(bpf_sk_storage_get),
693 __imm_addr(sk_storage_map),
694 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
695 : __clobber_all);
696}
697
698SEC("tc")
699__description("sk_storage_get(map, skb->sk, 1, 1): value == 1")
700__failure __msg("R3 type=scalar expected=fp")
701__naked void sk_1_1_value_1(void)
702{
703 asm volatile (" \
704 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
705 if r1 != 0 goto l0_%=; \
706 r0 = 0; \
707 exit; \
708l0_%=: call %[bpf_sk_fullsock]; \
709 if r0 != 0 goto l1_%=; \
710 r0 = 0; \
711 exit; \
712l1_%=: r4 = 1; \
713 r3 = 1; \
714 r2 = r0; \
715 r1 = %[sk_storage_map] ll; \
716 call %[bpf_sk_storage_get]; \
717 r0 = 0; \
718 exit; \
719" :
720 : __imm(bpf_sk_fullsock),
721 __imm(bpf_sk_storage_get),
722 __imm_addr(sk_storage_map),
723 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
724 : __clobber_all);
725}
726
727SEC("tc")
728__description("sk_storage_get(map, skb->sk, &stack_value, 1): stack_value")
729__success __retval(0)
730__naked void stack_value_1_stack_value(void)
731{
732 asm volatile (" \
733 r2 = 0; \
734 *(u64*)(r10 - 8) = r2; \
735 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
736 if r1 != 0 goto l0_%=; \
737 r0 = 0; \
738 exit; \
739l0_%=: call %[bpf_sk_fullsock]; \
740 if r0 != 0 goto l1_%=; \
741 r0 = 0; \
742 exit; \
743l1_%=: r4 = 1; \
744 r3 = r10; \
745 r3 += -8; \
746 r2 = r0; \
747 r1 = %[sk_storage_map] ll; \
748 call %[bpf_sk_storage_get]; \
749 r0 = 0; \
750 exit; \
751" :
752 : __imm(bpf_sk_fullsock),
753 __imm(bpf_sk_storage_get),
754 __imm_addr(sk_storage_map),
755 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
756 : __clobber_all);
757}
758
759SEC("tc")
760__description("bpf_map_lookup_elem(smap, &key)")
761__failure __msg("cannot pass map_type 24 into func bpf_map_lookup_elem")
762__naked void map_lookup_elem_smap_key(void)
763{
764 asm volatile (" \
765 r1 = 0; \
766 *(u32*)(r10 - 4) = r1; \
767 r2 = r10; \
768 r2 += -4; \
769 r1 = %[sk_storage_map] ll; \
770 call %[bpf_map_lookup_elem]; \
771 r0 = 0; \
772 exit; \
773" :
774 : __imm(bpf_map_lookup_elem),
775 __imm_addr(sk_storage_map)
776 : __clobber_all);
777}
778
779SEC("xdp")
780__description("bpf_map_lookup_elem(xskmap, &key); xs->queue_id")
781__success __retval(0)
782__naked void xskmap_key_xs_queue_id(void)
783{
784 asm volatile (" \
785 r1 = 0; \
786 *(u32*)(r10 - 8) = r1; \
787 r2 = r10; \
788 r2 += -8; \
789 r1 = %[map_xskmap] ll; \
790 call %[bpf_map_lookup_elem]; \
791 if r0 != 0 goto l0_%=; \
792 exit; \
793l0_%=: r0 = *(u32*)(r0 + %[bpf_xdp_sock_queue_id]); \
794 r0 = 0; \
795 exit; \
796" :
797 : __imm(bpf_map_lookup_elem),
798 __imm_addr(map_xskmap),
799 __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id))
800 : __clobber_all);
801}
802
803SEC("sk_skb")
804__description("bpf_map_lookup_elem(sockmap, &key)")
805__failure __msg("Unreleased reference id=2 alloc_insn=6")
806__naked void map_lookup_elem_sockmap_key(void)
807{
808 asm volatile (" \
809 r1 = 0; \
810 *(u32*)(r10 - 4) = r1; \
811 r2 = r10; \
812 r2 += -4; \
813 r1 = %[map_sockmap] ll; \
814 call %[bpf_map_lookup_elem]; \
815 r0 = 0; \
816 exit; \
817" :
818 : __imm(bpf_map_lookup_elem),
819 __imm_addr(map_sockmap)
820 : __clobber_all);
821}
822
823SEC("sk_skb")
824__description("bpf_map_lookup_elem(sockhash, &key)")
825__failure __msg("Unreleased reference id=2 alloc_insn=6")
826__naked void map_lookup_elem_sockhash_key(void)
827{
828 asm volatile (" \
829 r1 = 0; \
830 *(u32*)(r10 - 4) = r1; \
831 r2 = r10; \
832 r2 += -4; \
833 r1 = %[map_sockhash] ll; \
834 call %[bpf_map_lookup_elem]; \
835 r0 = 0; \
836 exit; \
837" :
838 : __imm(bpf_map_lookup_elem),
839 __imm_addr(map_sockhash)
840 : __clobber_all);
841}
842
843SEC("sk_skb")
844__description("bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
845__success
846__naked void field_bpf_sk_release_sk_1(void)
847{
848 asm volatile (" \
849 r1 = 0; \
850 *(u32*)(r10 - 4) = r1; \
851 r2 = r10; \
852 r2 += -4; \
853 r1 = %[map_sockmap] ll; \
854 call %[bpf_map_lookup_elem]; \
855 if r0 != 0 goto l0_%=; \
856 exit; \
857l0_%=: r1 = r0; \
858 r0 = *(u32*)(r0 + %[bpf_sock_type]); \
859 call %[bpf_sk_release]; \
860 exit; \
861" :
862 : __imm(bpf_map_lookup_elem),
863 __imm(bpf_sk_release),
864 __imm_addr(map_sockmap),
865 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
866 : __clobber_all);
867}
868
869SEC("sk_skb")
870__description("bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
871__success
872__naked void field_bpf_sk_release_sk_2(void)
873{
874 asm volatile (" \
875 r1 = 0; \
876 *(u32*)(r10 - 4) = r1; \
877 r2 = r10; \
878 r2 += -4; \
879 r1 = %[map_sockhash] ll; \
880 call %[bpf_map_lookup_elem]; \
881 if r0 != 0 goto l0_%=; \
882 exit; \
883l0_%=: r1 = r0; \
884 r0 = *(u32*)(r0 + %[bpf_sock_type]); \
885 call %[bpf_sk_release]; \
886 exit; \
887" :
888 : __imm(bpf_map_lookup_elem),
889 __imm(bpf_sk_release),
890 __imm_addr(map_sockhash),
891 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
892 : __clobber_all);
893}
894
895SEC("sk_reuseport")
896__description("bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)")
897__success
898__naked void ctx_reuseport_array_key_flags(void)
899{
900 asm volatile (" \
901 r4 = 0; \
902 r2 = 0; \
903 *(u32*)(r10 - 4) = r2; \
904 r3 = r10; \
905 r3 += -4; \
906 r2 = %[map_reuseport_array] ll; \
907 call %[bpf_sk_select_reuseport]; \
908 exit; \
909" :
910 : __imm(bpf_sk_select_reuseport),
911 __imm_addr(map_reuseport_array)
912 : __clobber_all);
913}
914
915SEC("sk_reuseport")
916__description("bpf_sk_select_reuseport(ctx, sockmap, &key, flags)")
917__success
918__naked void reuseport_ctx_sockmap_key_flags(void)
919{
920 asm volatile (" \
921 r4 = 0; \
922 r2 = 0; \
923 *(u32*)(r10 - 4) = r2; \
924 r3 = r10; \
925 r3 += -4; \
926 r2 = %[map_sockmap] ll; \
927 call %[bpf_sk_select_reuseport]; \
928 exit; \
929" :
930 : __imm(bpf_sk_select_reuseport),
931 __imm_addr(map_sockmap)
932 : __clobber_all);
933}
934
935SEC("sk_reuseport")
936__description("bpf_sk_select_reuseport(ctx, sockhash, &key, flags)")
937__success
938__naked void reuseport_ctx_sockhash_key_flags(void)
939{
940 asm volatile (" \
941 r4 = 0; \
942 r2 = 0; \
943 *(u32*)(r10 - 4) = r2; \
944 r3 = r10; \
945 r3 += -4; \
946 r2 = %[map_sockmap] ll; \
947 call %[bpf_sk_select_reuseport]; \
948 exit; \
949" :
950 : __imm(bpf_sk_select_reuseport),
951 __imm_addr(map_sockmap)
952 : __clobber_all);
953}
954
955SEC("tc")
956__description("mark null check on return value of bpf_skc_to helpers")
957__failure __msg("invalid mem access")
958__naked void of_bpf_skc_to_helpers(void)
959{
960 asm volatile (" \
961 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
962 if r1 != 0 goto l0_%=; \
963 r0 = 0; \
964 exit; \
965l0_%=: r6 = r1; \
966 call %[bpf_skc_to_tcp_sock]; \
967 r7 = r0; \
968 r1 = r6; \
969 call %[bpf_skc_to_tcp_request_sock]; \
970 r8 = r0; \
971 if r8 != 0 goto l1_%=; \
972 r0 = 0; \
973 exit; \
974l1_%=: r0 = *(u8*)(r7 + 0); \
975 exit; \
976" :
977 : __imm(bpf_skc_to_tcp_request_sock),
978 __imm(bpf_skc_to_tcp_sock),
979 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
980 : __clobber_all);
981}
982
983SEC("cgroup/post_bind4")
984__description("sk->src_ip6[0] [load 1st byte]")
985__failure __msg("invalid bpf_context access off=28 size=2")
986__naked void post_bind4_read_src_ip6(void)
987{
988 asm volatile (" \
989 r6 = r1; \
990 r7 = *(u16*)(r6 + %[bpf_sock_src_ip6_0]); \
991 r0 = 1; \
992 exit; \
993" :
994 : __imm_const(bpf_sock_src_ip6_0, offsetof(struct bpf_sock, src_ip6[0]))
995 : __clobber_all);
996}
997
998SEC("cgroup/post_bind4")
999__description("sk->mark [load mark]")
1000__failure __msg("invalid bpf_context access off=16 size=2")
1001__naked void post_bind4_read_mark(void)
1002{
1003 asm volatile (" \
1004 r6 = r1; \
1005 r7 = *(u16*)(r6 + %[bpf_sock_mark]); \
1006 r0 = 1; \
1007 exit; \
1008" :
1009 : __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark))
1010 : __clobber_all);
1011}
1012
1013SEC("cgroup/post_bind6")
1014__description("sk->src_ip4 [load src_ip4]")
1015__failure __msg("invalid bpf_context access off=24 size=2")
1016__naked void post_bind6_read_src_ip4(void)
1017{
1018 asm volatile (" \
1019 r6 = r1; \
1020 r7 = *(u16*)(r6 + %[bpf_sock_src_ip4]); \
1021 r0 = 1; \
1022 exit; \
1023" :
1024 : __imm_const(bpf_sock_src_ip4, offsetof(struct bpf_sock, src_ip4))
1025 : __clobber_all);
1026}
1027
1028SEC("cgroup/sock_create")
1029__description("sk->src_port [word load]")
1030__failure __msg("invalid bpf_context access off=44 size=2")
1031__naked void sock_create_read_src_port(void)
1032{
1033 asm volatile (" \
1034 r6 = r1; \
1035 r7 = *(u16*)(r6 + %[bpf_sock_src_port]); \
1036 r0 = 1; \
1037 exit; \
1038" :
1039 : __imm_const(bpf_sock_src_port, offsetof(struct bpf_sock, src_port))
1040 : __clobber_all);
1041}
1042
1043__noinline
1044long skb_pull_data2(struct __sk_buff *sk, __u32 len)
1045{
1046 return bpf_skb_pull_data(sk, len);
1047}
1048
1049__noinline
1050long skb_pull_data1(struct __sk_buff *sk, __u32 len)
1051{
1052 return skb_pull_data2(sk, len);
1053}
1054
1055/* global function calls bpf_skb_pull_data(), which invalidates packet
1056 * pointers established before global function call.
1057 */
1058SEC("tc")
1059__failure __msg("invalid mem access")
1060int invalidate_pkt_pointers_from_global_func(struct __sk_buff *sk)
1061{
1062 int *p = (void *)(long)sk->data;
1063
1064 if ((void *)(p + 1) > (void *)(long)sk->data_end)
1065 return TCX_DROP;
1066 skb_pull_data1(sk, 0);
1067 *p = 42; /* this is unsafe */
1068 return TCX_PASS;
1069}
1070
1071__noinline
1072long xdp_pull_data2(struct xdp_md *x, __u32 len)
1073{
1074 return bpf_xdp_pull_data(x, len);
1075}
1076
1077__noinline
1078long xdp_pull_data1(struct xdp_md *x, __u32 len)
1079{
1080 return xdp_pull_data2(x, len);
1081}
1082
1083/* global function calls bpf_xdp_pull_data(), which invalidates packet
1084 * pointers established before global function call.
1085 */
1086SEC("xdp")
1087__failure __msg("invalid mem access")
1088int invalidate_xdp_pkt_pointers_from_global_func(struct xdp_md *x)
1089{
1090 int *p = (void *)(long)x->data;
1091
1092 if ((void *)(p + 1) > (void *)(long)x->data_end)
1093 return XDP_DROP;
1094 xdp_pull_data1(x, 0);
1095 *p = 42; /* this is unsafe */
1096 return XDP_PASS;
1097}
1098
1099/* XDP packet changing kfunc calls invalidate packet pointers */
1100SEC("xdp")
1101__failure __msg("invalid mem access")
1102int invalidate_xdp_pkt_pointers(struct xdp_md *x)
1103{
1104 int *p = (void *)(long)x->data;
1105
1106 if ((void *)(p + 1) > (void *)(long)x->data_end)
1107 return XDP_DROP;
1108 bpf_xdp_pull_data(x, 0);
1109 *p = 42; /* this is unsafe */
1110 return XDP_PASS;
1111}
1112
1113__noinline
1114int tail_call(struct __sk_buff *sk)
1115{
1116 bpf_tail_call_static(sk, &jmp_table, 0);
1117 return 0;
1118}
1119
1120static __noinline
1121int static_tail_call(struct __sk_buff *sk)
1122{
1123 bpf_tail_call_static(sk, &jmp_table, 0);
1124 return 0;
1125}
1126
1127/* Tail calls in sub-programs invalidate packet pointers. */
1128SEC("tc")
1129__failure __msg("invalid mem access")
1130int invalidate_pkt_pointers_by_global_tail_call(struct __sk_buff *sk)
1131{
1132 int *p = (void *)(long)sk->data;
1133
1134 if ((void *)(p + 1) > (void *)(long)sk->data_end)
1135 return TCX_DROP;
1136 tail_call(sk);
1137 *p = 42; /* this is unsafe */
1138 return TCX_PASS;
1139}
1140
1141/* Tail calls in static sub-programs invalidate packet pointers. */
1142SEC("tc")
1143__failure __msg("invalid mem access")
1144int invalidate_pkt_pointers_by_static_tail_call(struct __sk_buff *sk)
1145{
1146 int *p = (void *)(long)sk->data;
1147
1148 if ((void *)(p + 1) > (void *)(long)sk->data_end)
1149 return TCX_DROP;
1150 static_tail_call(sk);
1151 *p = 42; /* this is unsafe */
1152 return TCX_PASS;
1153}
1154
1155/* Direct tail calls do not invalidate packet pointers. */
1156SEC("tc")
1157__success
1158int invalidate_pkt_pointers_by_tail_call(struct __sk_buff *sk)
1159{
1160 int *p = (void *)(long)sk->data;
1161
1162 if ((void *)(p + 1) > (void *)(long)sk->data_end)
1163 return TCX_DROP;
1164 bpf_tail_call_static(sk, &jmp_table, 0);
1165 *p = 42; /* this is NOT unsafe: tail calls don't return */
1166 return TCX_PASS;
1167}
1168
1169char _license[] SEC("license") = "GPL";