Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
3/* Converted from tools/testing/selftests/bpf/prog_tests/align.c */
4
5#include <linux/bpf.h>
6#include <bpf/bpf_helpers.h>
7#include "bpf_misc.h"
8
9/* Four tests of known constants. These aren't staggeringly
10 * interesting since we track exact values now.
11 */
12
13SEC("tc")
14__success __log_level(2)
15__flag(BPF_F_ANY_ALIGNMENT)
16__msg("0: R1=ctx() R10=fp0")
17__msg("0: {{.*}} R3=2")
18__msg("1: {{.*}} R3=4")
19__msg("2: {{.*}} R3=8")
20__msg("3: {{.*}} R3=16")
21__msg("4: {{.*}} R3=32")
22__naked void mov(void)
23{
24 asm volatile (" \
25 r3 = 2; \
26 r3 = 4; \
27 r3 = 8; \
28 r3 = 16; \
29 r3 = 32; \
30 r0 = 0; \
31 exit; \
32" ::: __clobber_all);
33}
34
35SEC("tc")
36__success __log_level(2)
37__flag(BPF_F_ANY_ALIGNMENT)
38__msg("0: R1=ctx() R10=fp0")
39__msg("0: {{.*}}R3=1")
40__msg("1: {{.*}}R3=2")
41__msg("2: {{.*}}R3=4")
42__msg("3: {{.*}}R3=8")
43__msg("4: {{.*}}R3=16")
44__msg("5: {{.*}}R3=1")
45__msg("6: {{.*}}R4=32")
46__msg("7: {{.*}}R4=16")
47__msg("8: {{.*}}R4=8")
48__msg("9: {{.*}}R4=4")
49__msg("10: {{.*}}R4=2")
50__naked void shift(void)
51{
52 asm volatile (" \
53 r3 = 1; \
54 r3 <<= 1; \
55 r3 <<= 1; \
56 r3 <<= 1; \
57 r3 <<= 1; \
58 r3 >>= 4; \
59 r4 = 32; \
60 r4 >>= 1; \
61 r4 >>= 1; \
62 r4 >>= 1; \
63 r4 >>= 1; \
64 r0 = 0; \
65 exit; \
66" ::: __clobber_all);
67}
68
69SEC("tc")
70__success __log_level(2)
71__flag(BPF_F_ANY_ALIGNMENT)
72__msg("0: R1=ctx() R10=fp0")
73__msg("0: {{.*}}R3=4")
74__msg("1: {{.*}}R3=8")
75__msg("2: {{.*}}R3=10")
76__msg("3: {{.*}}R4=8")
77__msg("4: {{.*}}R4=12")
78__msg("5: {{.*}}R4=14")
79__naked void addsub(void)
80{
81 asm volatile (" \
82 r3 = 4; \
83 r3 += 4; \
84 r3 += 2; \
85 r4 = 8; \
86 r4 += 4; \
87 r4 += 2; \
88 r0 = 0; \
89 exit; \
90" ::: __clobber_all);
91}
92
93SEC("tc")
94__success __log_level(2)
95__flag(BPF_F_ANY_ALIGNMENT)
96__msg("0: R1=ctx() R10=fp0")
97__msg("0: {{.*}}R3=7")
98__msg("1: {{.*}}R3=7")
99__msg("2: {{.*}}R3=14")
100__msg("3: {{.*}}R3=56")
101__naked void mul(void)
102{
103 asm volatile (" \
104 r3 = 7; \
105 r3 *= 1; \
106 r3 *= 2; \
107 r3 *= 4; \
108 r0 = 0; \
109 exit; \
110" ::: __clobber_all);
111}
112
113/* Tests using unknown values */
114
115#define PREP_PKT_POINTERS \
116 "r2 = *(u32*)(r1 + %[__sk_buff_data]);" \
117 "r3 = *(u32*)(r1 + %[__sk_buff_data_end]);"
118
119#define __LOAD_UNKNOWN(DST_REG, LBL) \
120 "r2 = *(u32*)(r1 + %[__sk_buff_data]);" \
121 "r3 = *(u32*)(r1 + %[__sk_buff_data_end]);" \
122 "r0 = r2;" \
123 "r0 += 8;" \
124 "if r3 >= r0 goto " LBL ";" \
125 "exit;" \
126LBL ":" \
127 DST_REG " = *(u8*)(r2 + 0);"
128
129#define LOAD_UNKNOWN(DST_REG) __LOAD_UNKNOWN(DST_REG, "l99_%=")
130
131SEC("tc")
132__success __log_level(2)
133__flag(BPF_F_ANY_ALIGNMENT)
134__msg("6: {{.*}} R2=pkt(r=8)")
135__msg("6: {{.*}} R3={{[^)]*}}var_off=(0x0; 0xff)")
136__msg("7: {{.*}} R3={{[^)]*}}var_off=(0x0; 0x1fe)")
137__msg("8: {{.*}} R3={{[^)]*}}var_off=(0x0; 0x3fc)")
138__msg("9: {{.*}} R3={{[^)]*}}var_off=(0x0; 0x7f8)")
139__msg("10: {{.*}} R3={{[^)]*}}var_off=(0x0; 0xff0)")
140__msg("12: {{.*}} R3=pkt_end()")
141__msg("17: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xff)")
142__msg("18: {{.*}} R4={{[^)]*}}var_off=(0x0; 0x1fe0)")
143__msg("19: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xff0)")
144__msg("20: {{.*}} R4={{[^)]*}}var_off=(0x0; 0x7f8)")
145__msg("21: {{.*}} R4={{[^)]*}}var_off=(0x0; 0x3fc)")
146__msg("22: {{.*}} R4={{[^)]*}}var_off=(0x0; 0x1fe)")
147__naked void unknown_shift(void)
148{
149 asm volatile (" \
150 " __LOAD_UNKNOWN("r3", "l99_%=") " \
151 r3 <<= 1; \
152 r3 <<= 1; \
153 r3 <<= 1; \
154 r3 <<= 1; \
155 " __LOAD_UNKNOWN("r4", "l98_%=") " \
156 r4 <<= 5; \
157 r4 >>= 1; \
158 r4 >>= 1; \
159 r4 >>= 1; \
160 r4 >>= 1; \
161 r0 = 0; \
162 exit; \
163" :
164 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
165 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
166 : __clobber_all);
167}
168
169SEC("tc")
170__success __log_level(2)
171__flag(BPF_F_ANY_ALIGNMENT)
172__msg("6: {{.*}} R3={{[^)]*}}var_off=(0x0; 0xff)")
173__msg("7: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xff)")
174__msg("8: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xff)")
175__msg("9: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xff)")
176__msg("10: {{.*}} R4={{[^)]*}}var_off=(0x0; 0x1fe)")
177__msg("11: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xff)")
178__msg("12: {{.*}} R4={{[^)]*}}var_off=(0x0; 0x3fc)")
179__msg("13: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xff)")
180__msg("14: {{.*}} R4={{[^)]*}}var_off=(0x0; 0x7f8)")
181__msg("15: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xff0)")
182__naked void unknown_mul(void)
183{
184 asm volatile (" \
185 " LOAD_UNKNOWN("r3") " \
186 r4 = r3; \
187 r4 *= 1; \
188 r4 = r3; \
189 r4 *= 2; \
190 r4 = r3; \
191 r4 *= 4; \
192 r4 = r3; \
193 r4 *= 8; \
194 r4 *= 2; \
195 r0 = 0; \
196 exit; \
197" :
198 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
199 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
200 : __clobber_all);
201}
202
203SEC("tc")
204__success __log_level(2)
205__msg("2: {{.*}} R5=pkt(r=0)")
206__msg("4: {{.*}} R5=pkt(r=0,imm=14)")
207__msg("5: {{.*}} R4=pkt(r=0,imm=14)")
208__msg("9: {{.*}} R5=pkt(r=18,imm=14)")
209__msg("10: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xff){{.*}} R5=pkt(r=18,imm=14)")
210__msg("13: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xffff)")
211__msg("14: {{.*}} R4={{[^)]*}}var_off=(0x0; 0xffff)")
212__naked void packet_const_offset(void)
213{
214 asm volatile (" \
215 " PREP_PKT_POINTERS " \
216 r5 = r2; \
217 r0 = 0; \
218 /* Skip over ethernet header. */ \
219 r5 += 14; \
220 r4 = r5; \
221 r4 += 4; \
222 if r3 >= r4 goto l0_%=; \
223 exit; \
224l0_%=: r4 = *(u8*)(r5 + 0); \
225 r4 = *(u8*)(r5 + 1); \
226 r4 = *(u8*)(r5 + 2); \
227 r4 = *(u8*)(r5 + 3); \
228 r4 = *(u16*)(r5 + 0); \
229 r4 = *(u16*)(r5 + 2); \
230 r4 = *(u32*)(r5 + 0); \
231 r0 = 0; \
232 exit; \
233" :
234 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
235 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
236 : __clobber_all);
237}
238
239SEC("tc")
240__success __log_level(2)
241__flag(BPF_F_ANY_ALIGNMENT)
242/* Calculated offset in R6 has unknown value, but known
243 * alignment of 4.
244 */
245__msg("6: {{.*}} R2=pkt(r=8)")
246__msg("7: {{.*}} R6={{[^)]*}}var_off=(0x0; 0x3fc)")
247/* Offset is added to packet pointer R5, resulting in
248 * known fixed offset, and variable offset from R6.
249 */
250__msg("11: {{.*}} R5=pkt(id=1,{{[^)]*}},var_off=(0x2; 0x7fc)")
251/* At the time the word size load is performed from R5,
252 * it's total offset is NET_IP_ALIGN + reg->off (0) +
253 * reg->aux_off (14) which is 16. Then the variable
254 * offset is considered using reg->aux_off_align which
255 * is 4 and meets the load's requirements.
256 */
257__msg("15: {{.*}} R5={{[^)]*}}var_off=(0x2; 0x7fc)")
258/* Variable offset is added to R5 packet pointer,
259 * resulting in auxiliary alignment of 4. To avoid BPF
260 * verifier's precision backtracking logging
261 * interfering we also have a no-op R4 = R5
262 * instruction to validate R5 state. We also check
263 * that R4 is what it should be in such case.
264 */
265__msg("18: {{.*}} R4={{[^)]*}}var_off=(0x0; 0x3fc){{.*}} R5={{[^)]*}}var_off=(0x0; 0x3fc)")
266/* Constant offset is added to R5, resulting in
267 * reg->off of 14.
268 */
269__msg("19: {{.*}} R5=pkt(id=2,{{[^)]*}}var_off=(0x2; 0x7fc)")
270/* At the time the word size load is performed from R5,
271 * its total fixed offset is NET_IP_ALIGN + reg->off
272 * (14) which is 16. Then the variable offset is 4-byte
273 * aligned, so the total offset is 4-byte aligned and
274 * meets the load's requirements.
275 */
276__msg("24: {{.*}} R5={{[^)]*}}var_off=(0x2; 0x7fc)")
277/* Constant offset is added to R5 packet pointer,
278 * resulting in reg->off value of 14.
279 */
280__msg("26: {{.*}} R5=pkt(r=8,imm=14)")
281/* Variable offset is added to R5, resulting in a
282 * variable offset of (4n). See comment for insn #18
283 * for R4 = R5 trick.
284 */
285__msg("28: {{.*}} R4={{[^)]*}}var_off=(0x2; 0x7fc){{.*}} R5={{[^)]*}}var_off=(0x2; 0x7fc)")
286/* Constant is added to R5 again, setting reg->off to 18. */
287__msg("29: {{.*}} R5=pkt(id=3,{{[^)]*}}var_off=(0x2; 0x7fc)")
288/* And once more we add a variable; resulting {{[^)]*}}var_off
289 * is still (4n), fixed offset is not changed.
290 * Also, we create a new reg->id.
291 */
292__msg("31: {{.*}} R4={{[^)]*}}var_off=(0x2; 0xffc){{.*}} R5={{[^)]*}}var_off=(0x2; 0xffc)")
293/* At the time the word size load is performed from R5,
294 * its total fixed offset is NET_IP_ALIGN + reg->off (18)
295 * which is 20. Then the variable offset is (4n), so
296 * the total offset is 4-byte aligned and meets the
297 * load's requirements.
298 */
299__msg("35: {{.*}} R5={{[^)]*}}var_off=(0x2; 0xffc)")
300__naked void packet_variable_offset(void)
301{
302 asm volatile (" \
303 " LOAD_UNKNOWN("r6") " \
304 r6 <<= 2; \
305 /* First, add a constant to the R5 packet pointer,\
306 * then a variable with a known alignment. \
307 */ \
308 r5 = r2; \
309 r5 += 14; \
310 r5 += r6; \
311 r4 = r5; \
312 r4 += 4; \
313 if r3 >= r4 goto l0_%=; \
314 exit; \
315l0_%=: r4 = *(u32*)(r5 + 0); \
316 /* Now, test in the other direction. Adding first\
317 * the variable offset to R5, then the constant.\
318 */ \
319 r5 = r2; \
320 r5 += r6; \
321 r4 = r5; \
322 r5 += 14; \
323 r4 = r5; \
324 r4 += 4; \
325 if r3 >= r4 goto l1_%=; \
326 exit; \
327l1_%=: r4 = *(u32*)(r5 + 0); \
328 /* Test multiple accumulations of unknown values\
329 * into a packet pointer. \
330 */ \
331 r5 = r2; \
332 r5 += 14; \
333 r5 += r6; \
334 r4 = r5; \
335 r5 += 4; \
336 r5 += r6; \
337 r4 = r5; \
338 r4 += 4; \
339 if r3 >= r4 goto l2_%=; \
340 exit; \
341l2_%=: r4 = *(u32*)(r5 + 0); \
342 r0 = 0; \
343 exit; \
344" :
345 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
346 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
347 : __clobber_all);
348}
349
350SEC("tc")
351__success __log_level(2)
352__flag(BPF_F_ANY_ALIGNMENT)
353/* Calculated offset in R6 has unknown value, but known
354 * alignment of 4.
355 */
356__msg("6: {{.*}} R2=pkt(r=8)")
357__msg("7: {{.*}} R6={{[^)]*}}var_off=(0x0; 0x3fc)")
358/* Adding 14 makes R6 be (4n+2) */
359__msg("8: {{.*}} R6={{[^)]*}}var_off=(0x2; 0x7fc)")
360/* Packet pointer has (4n+2) offset */
361__msg("11: {{.*}} R5={{[^)]*}}var_off=(0x2; 0x7fc)")
362__msg("12: {{.*}} R4={{[^)]*}}var_off=(0x2; 0x7fc)")
363/* At the time the word size load is performed from R5,
364 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
365 * which is 2. Then the variable offset is (4n+2), so
366 * the total offset is 4-byte aligned and meets the
367 * load's requirements.
368 */
369__msg("15: {{.*}} R5={{[^)]*}}var_off=(0x2; 0x7fc)")
370/* Newly read value in R6 was shifted left by 2, so has
371 * known alignment of 4.
372 */
373__msg("17: {{.*}} R6={{[^)]*}}var_off=(0x0; 0x3fc)")
374/* Added (4n) to packet pointer's (4n+2) {{[^)]*}}var_off, giving
375 * another (4n+2).
376 */
377__msg("19: {{.*}} R5={{[^)]*}}var_off=(0x2; 0xffc)")
378__msg("20: {{.*}} R4={{[^)]*}}var_off=(0x2; 0xffc)")
379/* At the time the word size load is performed from R5,
380 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
381 * which is 2. Then the variable offset is (4n+2), so
382 * the total offset is 4-byte aligned and meets the
383 * load's requirements.
384 */
385__msg("23: {{.*}} R5={{[^)]*}}var_off=(0x2; 0xffc)")
386__naked void packet_variable_offset_2(void)
387{
388 asm volatile (" \
389 /* Create an unknown offset, (4n+2)-aligned */ \
390 " LOAD_UNKNOWN("r6") " \
391 r6 <<= 2; \
392 r6 += 14; \
393 /* Add it to the packet pointer */ \
394 r5 = r2; \
395 r5 += r6; \
396 /* Check bounds and perform a read */ \
397 r4 = r5; \
398 r4 += 4; \
399 if r3 >= r4 goto l0_%=; \
400 exit; \
401l0_%=: r6 = *(u32*)(r5 + 0); \
402 /* Make a (4n) offset from the value we just read */\
403 r6 &= 0xff; \
404 r6 <<= 2; \
405 /* Add it to the packet pointer */ \
406 r5 += r6; \
407 /* Check bounds and perform a read */ \
408 r4 = r5; \
409 r4 += 4; \
410 if r3 >= r4 goto l1_%=; \
411 exit; \
412l1_%=: r6 = *(u32*)(r5 + 0); \
413 r0 = 0; \
414 exit; \
415" :
416 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
417 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
418 : __clobber_all);
419}
420
421SEC("tc")
422__failure __log_level(2)
423__msg("3: {{.*}} R5=pkt_end()")
424/* (ptr - ptr) << 2 == unknown, (4n) */
425__msg("5: {{.*}} R5={{[^)]*}}var_off=(0x0; 0xfffffffffffffffc)")
426/* (4n) + 14 == (4n+2). We blow our bounds, because
427 * the add could overflow.
428 */
429__msg("6: {{.*}} R5={{[^)]*}}var_off=(0x2; 0xfffffffffffffffc)")
430/* Checked s>=0 */
431__msg("9: {{.*}} R5={{[^)]*}}var_off=(0x2; 0x7ffffffffffffffc)")
432/* packet pointer + nonnegative (4n+2) */
433__msg("11: {{.*}} R4={{[^)]*}}var_off=(0x2; 0x7ffffffffffffffc){{.*}} R6={{[^)]*}}var_off=(0x2; 0x7ffffffffffffffc)")
434__msg("12: (07) r4 += 4")
435/* packet smax bound overflow */
436__msg("pkt pointer offset -9223372036854775808 is not allowed")
437__naked void dubious_pointer_arithmetic(void)
438{
439 asm volatile (" \
440 " PREP_PKT_POINTERS " \
441 r0 = 0; \
442 /* (ptr - ptr) << 2 */ \
443 r5 = r3; \
444 r5 -= r2; \
445 r5 <<= 2; \
446 /* We have a (4n) value. Let's make a packet offset\
447 * out of it. First add 14, to make it a (4n+2)\
448 */ \
449 r5 += 14; \
450 /* Then make sure it's nonnegative */ \
451 if r5 s>= 0 goto l0_%=; \
452 exit; \
453l0_%=: /* Add it to packet pointer */ \
454 r6 = r2; \
455 r6 += r5; \
456 /* Check bounds and perform a read */ \
457 r4 = r6; \
458 r4 += 4; \
459 if r3 >= r4 goto l1_%=; \
460 exit; \
461l1_%=: r4 = *(u32*)(r6 + 0); \
462 exit; \
463" :
464 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
465 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
466 : __clobber_all);
467}
468
469SEC("tc")
470__success __log_level(2)
471__flag(BPF_F_ANY_ALIGNMENT)
472/* Calculated offset in R6 has unknown value, but known
473 * alignment of 4.
474 */
475__msg("6: {{.*}} R2=pkt(r=8)")
476__msg("8: {{.*}} R6={{[^)]*}}var_off=(0x0; 0x3fc)")
477/* Adding 14 makes R6 be (4n+2) */
478__msg("9: {{.*}} R6={{[^)]*}}var_off=(0x2; 0x7fc)")
479/* New unknown value in R7 is (4n) */
480__msg("10: {{.*}} R7={{[^)]*}}var_off=(0x0; 0x3fc)")
481/* Subtracting it from R6 blows our unsigned bounds */
482__msg("11: {{.*}} R6={{[^)]*}}var_off=(0x2; 0xfffffffffffffffc)")
483/* Checked s>= 0 */
484__msg("14: {{.*}} R6={{[^)]*}}var_off=(0x2; 0x7fc)")
485/* At the time the word size load is performed from R5,
486 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
487 * which is 2. Then the variable offset is (4n+2), so
488 * the total offset is 4-byte aligned and meets the
489 * load's requirements.
490 */
491__msg("20: {{.*}} R5={{[^)]*}}var_off=(0x2; 0x7fc)")
492__naked void variable_subtraction(void)
493{
494 asm volatile (" \
495 /* Create an unknown offset, (4n+2)-aligned */ \
496 " LOAD_UNKNOWN("r6") " \
497 r7 = r6; \
498 r6 <<= 2; \
499 r6 += 14; \
500 /* Create another unknown, (4n)-aligned, and subtract\
501 * it from the first one \
502 */ \
503 r7 <<= 2; \
504 r6 -= r7; \
505 /* Bounds-check the result */ \
506 if r6 s>= 0 goto l0_%=; \
507 exit; \
508l0_%=: /* Add it to the packet pointer */ \
509 r5 = r2; \
510 r5 += r6; \
511 /* Check bounds and perform a read */ \
512 r4 = r5; \
513 r4 += 4; \
514 if r3 >= r4 goto l1_%=; \
515 exit; \
516l1_%=: r6 = *(u32*)(r5 + 0); \
517 exit; \
518" :
519 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
520 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
521 : __clobber_all);
522}
523
524SEC("tc")
525__success __log_level(2)
526__flag(BPF_F_ANY_ALIGNMENT)
527/* Calculated offset in R6 has unknown value, but known
528 * alignment of 4.
529 */
530__msg("6: {{.*}} R2=pkt(r=8)")
531__msg("9: {{.*}} R6={{[^)]*}}var_off=(0x0; 0x3c)")
532/* Adding 14 makes R6 be (4n+2) */
533__msg("10: {{.*}} R6={{[^)]*}}var_off=(0x2; 0x7c)")
534/* Subtracting from packet pointer overflows ubounds */
535__msg("13: R5={{[^)]*}}var_off=(0xffffffffffffff82; 0x7c)")
536/* New unknown value in R7 is (4n), >= 76 */
537__msg("14: {{.*}} R7={{[^)]*}}var_off=(0x0; 0x7fc)")
538/* Adding it to packet pointer gives nice bounds again */
539__msg("16: {{.*}} R5={{[^)]*}}var_off=(0x2; 0x7fc)")
540/* At the time the word size load is performed from R5,
541 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
542 * which is 2. Then the variable offset is (4n+2), so
543 * the total offset is 4-byte aligned and meets the
544 * load's requirements.
545 */
546__msg("20: {{.*}} R5={{[^)]*}}var_off=(0x2; 0x7fc)")
547__naked void pointer_variable_subtraction(void)
548{
549 asm volatile (" \
550 /* Create an unknown offset, (4n+2)-aligned and bounded\
551 * to [14,74] \
552 */ \
553 " LOAD_UNKNOWN("r6") " \
554 r7 = r6; \
555 r6 &= 0xf; \
556 r6 <<= 2; \
557 r6 += 14; \
558 /* Subtract it from the packet pointer */ \
559 r5 = r2; \
560 r5 -= r6; \
561 /* Create another unknown, (4n)-aligned and >= 74.\
562 * That in fact means >= 76, since 74 mod 4 == 2\
563 */ \
564 r7 <<= 2; \
565 r7 += 76; \
566 /* Add it to the packet pointer */ \
567 r5 += r7; \
568 /* Check bounds and perform a read */ \
569 r4 = r5; \
570 r4 += 4; \
571 if r3 >= r4 goto l0_%=; \
572 exit; \
573l0_%=: r6 = *(u32*)(r5 + 0); \
574 exit; \
575" :
576 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
577 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
578 : __clobber_all);
579}
580
581char _license[] SEC("license") = "GPL";