Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/bpf.h>
4#include <limits.h>
5#include <bpf/bpf_helpers.h>
6#include "bpf_misc.h"
7
8SEC("socket")
9__description("scalars: find linked scalars")
10__failure
11__msg("math between fp pointer and 2147483647 is not allowed")
12__naked void scalars(void)
13{
14 asm volatile (" \
15 r0 = 0; \
16 r1 = 0x80000001 ll; \
17 r1 /= 1; \
18 r2 = r1; \
19 r4 = r1; \
20 w2 += 0x7FFFFFFF; \
21 w4 += 0; \
22 if r2 == 0 goto l0_%=; \
23 exit; \
24l0_%=: \
25 r4 >>= 63; \
26 r3 = 1; \
27 r3 -= r4; \
28 r3 *= 0x7FFFFFFF; \
29 r3 += r10; \
30 *(u8*)(r3 - 1) = r0; \
31 exit; \
32" ::: __clobber_all);
33}
34
35/*
36 * Test that sync_linked_regs() preserves register IDs.
37 *
38 * The sync_linked_regs() function copies bounds from known_reg to linked
39 * registers. When doing so, it must preserve each register's original id
40 * to allow subsequent syncs from the same source to work correctly.
41 *
42 */
43SEC("socket")
44__success
45__naked void sync_linked_regs_preserves_id(void)
46{
47 asm volatile (" \
48 call %[bpf_get_prandom_u32]; \
49 r0 &= 0xff; /* r0 in [0, 255] */ \
50 r1 = r0; /* r0, r1 linked with id 1 */ \
51 r1 += 4; /* r1 has id=1 and off=4 in [4, 259] */ \
52 if r1 < 10 goto l0_%=; \
53 /* r1 in [10, 259], r0 synced to [6, 255] */ \
54 r2 = r0; /* r2 has id=1 and in [6, 255] */ \
55 if r1 < 14 goto l0_%=; \
56 /* r1 in [14, 259], r0 synced to [10, 255] */ \
57 if r0 >= 10 goto l0_%=; \
58 /* Never executed */ \
59 r0 /= 0; \
60l0_%=: \
61 r0 = 0; \
62 exit; \
63" :
64 : __imm(bpf_get_prandom_u32)
65 : __clobber_all);
66}
67
68SEC("socket")
69__success
70__naked void scalars_neg(void)
71{
72 asm volatile (" \
73 call %[bpf_get_prandom_u32]; \
74 r0 &= 0xff; \
75 r1 = r0; \
76 r1 += -4; \
77 if r1 s< 0 goto l0_%=; \
78 if r0 != 0 goto l0_%=; \
79 r0 /= 0; \
80l0_%=: \
81 r0 = 0; \
82 exit; \
83" :
84 : __imm(bpf_get_prandom_u32)
85 : __clobber_all);
86}
87
88/* Same test but using BPF_SUB instead of BPF_ADD with negative immediate */
89SEC("socket")
90__success
91__naked void scalars_neg_sub(void)
92{
93 asm volatile (" \
94 call %[bpf_get_prandom_u32]; \
95 r0 &= 0xff; \
96 r1 = r0; \
97 r1 -= 4; \
98 if r1 s< 0 goto l0_%=; \
99 if r0 != 0 goto l0_%=; \
100 r0 /= 0; \
101l0_%=: \
102 r0 = 0; \
103 exit; \
104" :
105 : __imm(bpf_get_prandom_u32)
106 : __clobber_all);
107}
108
109/* alu32 with negative offset */
110SEC("socket")
111__success
112__naked void scalars_neg_alu32_add(void)
113{
114 asm volatile (" \
115 call %[bpf_get_prandom_u32]; \
116 w0 &= 0xff; \
117 w1 = w0; \
118 w1 += -4; \
119 if w1 s< 0 goto l0_%=; \
120 if w0 != 0 goto l0_%=; \
121 r0 /= 0; \
122l0_%=: \
123 r0 = 0; \
124 exit; \
125" :
126 : __imm(bpf_get_prandom_u32)
127 : __clobber_all);
128}
129
130/* alu32 with negative offset using SUB */
131SEC("socket")
132__success
133__naked void scalars_neg_alu32_sub(void)
134{
135 asm volatile (" \
136 call %[bpf_get_prandom_u32]; \
137 w0 &= 0xff; \
138 w1 = w0; \
139 w1 -= 4; \
140 if w1 s< 0 goto l0_%=; \
141 if w0 != 0 goto l0_%=; \
142 r0 /= 0; \
143l0_%=: \
144 r0 = 0; \
145 exit; \
146" :
147 : __imm(bpf_get_prandom_u32)
148 : __clobber_all);
149}
150
151/* Positive offset: r1 = r0 + 4, then if r1 >= 6, r0 >= 2, so r0 != 0 */
152SEC("socket")
153__success
154__naked void scalars_pos(void)
155{
156 asm volatile (" \
157 call %[bpf_get_prandom_u32]; \
158 r0 &= 0xff; \
159 r1 = r0; \
160 r1 += 4; \
161 if r1 < 6 goto l0_%=; \
162 if r0 != 0 goto l0_%=; \
163 r0 /= 0; \
164l0_%=: \
165 r0 = 0; \
166 exit; \
167" :
168 : __imm(bpf_get_prandom_u32)
169 : __clobber_all);
170}
171
172/* SUB with negative immediate: r1 -= -4 is equivalent to r1 += 4 */
173SEC("socket")
174__success
175__naked void scalars_sub_neg_imm(void)
176{
177 asm volatile (" \
178 call %[bpf_get_prandom_u32]; \
179 r0 &= 0xff; \
180 r1 = r0; \
181 r1 -= -4; \
182 if r1 < 6 goto l0_%=; \
183 if r0 != 0 goto l0_%=; \
184 r0 /= 0; \
185l0_%=: \
186 r0 = 0; \
187 exit; \
188" :
189 : __imm(bpf_get_prandom_u32)
190 : __clobber_all);
191}
192
193/* Double ADD clears the ID (can't accumulate offsets) */
194SEC("socket")
195__failure
196__msg("div by zero")
197__naked void scalars_double_add(void)
198{
199 asm volatile (" \
200 call %[bpf_get_prandom_u32]; \
201 r0 &= 0xff; \
202 r1 = r0; \
203 r1 += 2; \
204 r1 += 2; \
205 if r1 < 6 goto l0_%=; \
206 if r0 != 0 goto l0_%=; \
207 r0 /= 0; \
208l0_%=: \
209 r0 = 0; \
210 exit; \
211" :
212 : __imm(bpf_get_prandom_u32)
213 : __clobber_all);
214}
215
216/*
217 * Test that sync_linked_regs() correctly handles large offset differences.
218 * r1.off = S32_MIN, r2.off = 1, delta = S32_MIN - 1 requires 64-bit math.
219 */
220SEC("socket")
221__success
222__naked void scalars_sync_delta_overflow(void)
223{
224 asm volatile (" \
225 call %[bpf_get_prandom_u32]; \
226 r0 &= 0xff; \
227 r1 = r0; \
228 r2 = r0; \
229 r1 += %[s32_min]; \
230 r2 += 1; \
231 if r2 s< 100 goto l0_%=; \
232 if r1 s< 0 goto l0_%=; \
233 r0 /= 0; \
234l0_%=: \
235 r0 = 0; \
236 exit; \
237" :
238 : __imm(bpf_get_prandom_u32),
239 [s32_min]"i"(INT_MIN)
240 : __clobber_all);
241}
242
243/*
244 * Another large delta case: r1.off = S32_MAX, r2.off = -1.
245 * delta = S32_MAX - (-1) = S32_MAX + 1 requires 64-bit math.
246 */
247SEC("socket")
248__success
249__naked void scalars_sync_delta_overflow_large_range(void)
250{
251 asm volatile (" \
252 call %[bpf_get_prandom_u32]; \
253 r0 &= 0xff; \
254 r1 = r0; \
255 r2 = r0; \
256 r1 += %[s32_max]; \
257 r2 += -1; \
258 if r2 s< 0 goto l0_%=; \
259 if r1 s>= 0 goto l0_%=; \
260 r0 /= 0; \
261l0_%=: \
262 r0 = 0; \
263 exit; \
264" :
265 : __imm(bpf_get_prandom_u32),
266 [s32_max]"i"(INT_MAX)
267 : __clobber_all);
268}
269
270/*
271 * Test linked scalar tracking with alu32 and large positive offset (0x7FFFFFFF).
272 * After w1 += 0x7FFFFFFF, w1 wraps to negative for any r0 >= 1.
273 * If w1 is signed-negative, then r0 >= 1, so r0 != 0.
274 */
275SEC("socket")
276__success
277__naked void scalars_alu32_big_offset(void)
278{
279 asm volatile (" \
280 call %[bpf_get_prandom_u32]; \
281 w0 &= 0xff; \
282 w1 = w0; \
283 w1 += 0x7FFFFFFF; \
284 if w1 s>= 0 goto l0_%=; \
285 if w0 != 0 goto l0_%=; \
286 r0 /= 0; \
287l0_%=: \
288 r0 = 0; \
289 exit; \
290" :
291 : __imm(bpf_get_prandom_u32)
292 : __clobber_all);
293}
294
295SEC("socket")
296__failure
297__msg("div by zero")
298__naked void scalars_alu32_basic(void)
299{
300 asm volatile (" \
301 call %[bpf_get_prandom_u32]; \
302 r1 = r0; \
303 w1 += 1; \
304 if r1 > 10 goto 1f; \
305 r0 >>= 32; \
306 if r0 == 0 goto 1f; \
307 r0 /= 0; \
3081: \
309 r0 = 0; \
310 exit; \
311" :
312 : __imm(bpf_get_prandom_u32)
313 : __clobber_all);
314}
315
316/*
317 * Test alu32 linked register tracking with wrapping.
318 * R0 is bounded to [0xffffff00, 0xffffffff] (high 32-bit values)
319 * w1 += 0x100 causes R1 to wrap to [0, 0xff]
320 *
321 * After sync_linked_regs, if bounds are computed correctly:
322 * R0 should be [0x00000000_ffffff00, 0x00000000_ffffff80]
323 * R0 >> 32 == 0, so div by zero is unreachable
324 *
325 * If bounds are computed incorrectly (64-bit underflow):
326 * R0 becomes [0xffffffff_ffffff00, 0xffffffff_ffffff80]
327 * R0 >> 32 == 0xffffffff != 0, so div by zero is reachable
328 */
329SEC("socket")
330__success
331__naked void scalars_alu32_wrap(void)
332{
333 asm volatile (" \
334 call %[bpf_get_prandom_u32]; \
335 w0 |= 0xffffff00; \
336 r1 = r0; \
337 w1 += 0x100; \
338 if r1 > 0x80 goto l0_%=; \
339 r2 = r0; \
340 r2 >>= 32; \
341 if r2 == 0 goto l0_%=; \
342 r0 /= 0; \
343l0_%=: \
344 r0 = 0; \
345 exit; \
346" :
347 : __imm(bpf_get_prandom_u32)
348 : __clobber_all);
349}
350
351/*
352 * Test that sync_linked_regs() checks reg->id (the linked target register)
353 * for BPF_ADD_CONST32 rather than known_reg->id (the branch register).
354 */
355SEC("socket")
356__success
357__naked void scalars_alu32_zext_linked_reg(void)
358{
359 asm volatile (" \
360 call %[bpf_get_prandom_u32]; \
361 w6 = w0; /* r6 in [0, 0xFFFFFFFF] */ \
362 r7 = r6; /* linked: same id as r6 */ \
363 w7 += 1; /* alu32: r7.id |= BPF_ADD_CONST32 */ \
364 r8 = 0xFFFFffff ll; \
365 if r6 < r8 goto l0_%=; \
366 /* r6 in [0xFFFFFFFF, 0xFFFFFFFF] */ \
367 /* sync_linked_regs: known_reg=r6, reg=r7 */ \
368 /* CPU: w7 = (u32)(0xFFFFFFFF + 1) = 0, zext -> r7 = 0 */ \
369 /* With fix: r7 64-bit = [0, 0] (zext applied) */ \
370 /* Without fix: r7 64-bit = [0x100000000] (no zext) */ \
371 r7 >>= 32; \
372 if r7 == 0 goto l0_%=; \
373 r0 /= 0; /* unreachable with fix */ \
374l0_%=: \
375 r0 = 0; \
376 exit; \
377" :
378 : __imm(bpf_get_prandom_u32)
379 : __clobber_all);
380}
381
382/*
383 * Test that sync_linked_regs() skips propagation when one register used
384 * alu32 (BPF_ADD_CONST32) and the other used alu64 (BPF_ADD_CONST64).
385 * The delta relationship doesn't hold across different ALU widths.
386 */
387SEC("socket")
388__failure __msg("div by zero")
389__naked void scalars_alu32_alu64_cross_type(void)
390{
391 asm volatile (" \
392 call %[bpf_get_prandom_u32]; \
393 w6 = w0; /* r6 in [0, 0xFFFFFFFF] */ \
394 r7 = r6; /* linked: same id as r6 */ \
395 w7 += 1; /* alu32: BPF_ADD_CONST32, delta = 1 */ \
396 r8 = r6; /* linked: same id as r6 */ \
397 r8 += 2; /* alu64: BPF_ADD_CONST64, delta = 2 */ \
398 r9 = 0xFFFFffff ll; \
399 if r7 < r9 goto l0_%=; \
400 /* r7 = 0xFFFFFFFF */ \
401 /* sync: known_reg=r7 (ADD_CONST32), reg=r8 (ADD_CONST64) */ \
402 /* Without fix: r8 = zext(0xFFFFFFFF + 1) = 0 */ \
403 /* With fix: r8 stays [2, 0x100000001] (r8 >= 2) */ \
404 if r8 > 0 goto l1_%=; \
405 goto l0_%=; \
406l1_%=: \
407 r0 /= 0; /* div by zero */ \
408l0_%=: \
409 r0 = 0; \
410 exit; \
411" :
412 : __imm(bpf_get_prandom_u32)
413 : __clobber_all);
414}
415
416/*
417 * Test that regsafe() prevents pruning when two paths reach the same program
418 * point with linked registers carrying different ADD_CONST flags (one
419 * BPF_ADD_CONST32 from alu32, another BPF_ADD_CONST64 from alu64).
420 */
421SEC("socket")
422__failure __msg("div by zero")
423__flag(BPF_F_TEST_STATE_FREQ)
424__naked void scalars_alu32_alu64_regsafe_pruning(void)
425{
426 asm volatile (" \
427 call %[bpf_get_prandom_u32]; \
428 w6 = w0; /* r6 in [0, 0xFFFFFFFF] */ \
429 r7 = r6; /* linked: same id as r6 */ \
430 /* Get another random value for the path branch */ \
431 call %[bpf_get_prandom_u32]; \
432 if r0 > 0 goto l_pathb_%=; \
433 /* Path A: alu32 */ \
434 w7 += 1; /* BPF_ADD_CONST32, delta = 1 */\
435 goto l_merge_%=; \
436l_pathb_%=: \
437 /* Path B: alu64 */ \
438 r7 += 1; /* BPF_ADD_CONST64, delta = 1 */\
439l_merge_%=: \
440 /* Merge point: regsafe() compares path B against cached path A. */ \
441 /* Narrow r6 to trigger sync_linked_regs for r7 */ \
442 r9 = 0xFFFFffff ll; \
443 if r6 < r9 goto l0_%=; \
444 /* r6 = 0xFFFFFFFF */ \
445 /* sync: r7 = 0xFFFFFFFF + 1 = 0x100000000 */ \
446 /* Path A: zext -> r7 = 0 */ \
447 /* Path B: no zext -> r7 = 0x100000000 */ \
448 r7 >>= 32; \
449 if r7 == 0 goto l0_%=; \
450 r0 /= 0; /* div by zero on path B */ \
451l0_%=: \
452 r0 = 0; \
453 exit; \
454" :
455 : __imm(bpf_get_prandom_u32)
456 : __clobber_all);
457}
458
459SEC("socket")
460__success
461void alu32_negative_offset(void)
462{
463 volatile char path[5];
464 volatile int offset = bpf_get_prandom_u32();
465 int off = offset;
466
467 if (off >= 5 && off < 10)
468 path[off - 5] = '.';
469
470 /* So compiler doesn't say: error: variable 'path' set but not used */
471 __sink(path[0]);
472}
473
474void dummy_calls(void)
475{
476 bpf_iter_num_new(0, 0, 0);
477 bpf_iter_num_next(0);
478 bpf_iter_num_destroy(0);
479}
480
481SEC("socket")
482__success
483__flag(BPF_F_TEST_STATE_FREQ)
484int spurious_precision_marks(void *ctx)
485{
486 struct bpf_iter_num iter;
487
488 asm volatile(
489 "r1 = %[iter];"
490 "r2 = 0;"
491 "r3 = 10;"
492 "call %[bpf_iter_num_new];"
493 "1:"
494 "r1 = %[iter];"
495 "call %[bpf_iter_num_next];"
496 "if r0 == 0 goto 4f;"
497 "r7 = *(u32 *)(r0 + 0);"
498 "r8 = *(u32 *)(r0 + 0);"
499 /* This jump can't be predicted and does not change r7 or r8 state. */
500 "if r7 > r8 goto 2f;"
501 /* Branch explored first ties r2 and r7 as having the same id. */
502 "r2 = r7;"
503 "goto 3f;"
504 "2:"
505 /* Branch explored second does not tie r2 and r7 but has a function call. */
506 "call %[bpf_get_prandom_u32];"
507 "3:"
508 /*
509 * A checkpoint.
510 * When first branch is explored, this would inject linked registers
511 * r2 and r7 into the jump history.
512 * When second branch is explored, this would be a cache hit point,
513 * triggering propagate_precision().
514 */
515 "if r7 <= 42 goto +0;"
516 /*
517 * Mark r7 as precise using an if condition that is always true.
518 * When reached via the second branch, this triggered a bug in the backtrack_insn()
519 * because r2 (tied to r7) was propagated as precise to a call.
520 */
521 "if r7 <= 0xffffFFFF goto +0;"
522 "goto 1b;"
523 "4:"
524 "r1 = %[iter];"
525 "call %[bpf_iter_num_destroy];"
526 :
527 : __imm_ptr(iter),
528 __imm(bpf_iter_num_new),
529 __imm(bpf_iter_num_next),
530 __imm(bpf_iter_num_destroy),
531 __imm(bpf_get_prandom_u32)
532 : __clobber_common, "r7", "r8"
533 );
534
535 return 0;
536}
537
538/*
539 * Test that r += r (self-add, src_reg == dst_reg) clears the scalar ID
540 * so that sync_linked_regs() does not propagate an incorrect delta.
541 */
542SEC("socket")
543__failure
544__msg("div by zero")
545__naked void scalars_self_add_clears_id(void)
546{
547 asm volatile (" \
548 call %[bpf_get_prandom_u32]; \
549 r6 = r0; /* r6 unknown, id A */ \
550 r7 = r6; /* r7 linked to r6, id A */ \
551 call %[bpf_get_prandom_u32]; \
552 r8 = r0; /* r8 unknown, id B */ \
553 r9 = r8; /* r9 linked to r8, id B */ \
554 if r7 != 1 goto l_exit_%=; \
555 /* r7 == 1; sync propagates: r6 = 1 (known, id A) */ \
556 r6 += r6; /* r6 = 2; should clear id */ \
557 if r7 == r9 goto l_exit_%=; \
558 /* Bug: r6 synced to r7(1)+delta(2)=3; Fix: r6 = 2 */ \
559 if r6 == 3 goto l_exit_%=; \
560 r0 /= 0; \
561l_exit_%=: \
562 r0 = 0; \
563 exit; \
564" :
565 : __imm(bpf_get_prandom_u32)
566 : __clobber_all);
567}
568
569/* Same as above but with alu32 such that w6 += w6 also clears id. */
570SEC("socket")
571__failure
572__msg("div by zero")
573__naked void scalars_self_add_alu32_clears_id(void)
574{
575 asm volatile (" \
576 call %[bpf_get_prandom_u32]; \
577 w6 = w0; \
578 w7 = w6; \
579 call %[bpf_get_prandom_u32]; \
580 w8 = w0; \
581 w9 = w8; \
582 if w7 != 1 goto l_exit_%=; \
583 w6 += w6; \
584 if w7 == w9 goto l_exit_%=; \
585 if w6 == 3 goto l_exit_%=; \
586 r0 /= 0; \
587l_exit_%=: \
588 r0 = 0; \
589 exit; \
590" :
591 : __imm(bpf_get_prandom_u32)
592 : __clobber_all);
593}
594
595/*
596 * Test that stale delta from a cleared BPF_ADD_CONST does not leak
597 * through assign_scalar_id_before_mov() into a new id, causing
598 * sync_linked_regs() to compute an incorrect offset.
599 */
600SEC("socket")
601__failure
602__msg("div by zero")
603__naked void scalars_stale_delta_from_cleared_id(void)
604{
605 asm volatile (" \
606 call %[bpf_get_prandom_u32]; \
607 r6 = r0; /* r6 unknown, gets id A */ \
608 r6 += 5; /* id A|ADD_CONST, delta 5 */ \
609 r6 ^= 0; /* id cleared; delta stays 5 */ \
610 r8 = r6; /* new id B, stale delta 5 */ \
611 r8 += 3; /* id B|ADD_CONST, delta 3 */ \
612 r9 = r6; /* id B, stale delta 5 */ \
613 if r9 != 10 goto l_exit_%=; \
614 /* Bug: r8 = 10+(3-5) = 8; Fix: r8 = 10+(3-0) = 13 */ \
615 if r8 == 8 goto l_exit_%=; \
616 r0 /= 0; \
617l_exit_%=: \
618 r0 = 0; \
619 exit; \
620" :
621 : __imm(bpf_get_prandom_u32)
622 : __clobber_all);
623}
624
625/* Same as above but with alu32. */
626SEC("socket")
627__failure
628__msg("div by zero")
629__naked void scalars_stale_delta_from_cleared_id_alu32(void)
630{
631 asm volatile (" \
632 call %[bpf_get_prandom_u32]; \
633 w6 = w0; \
634 w6 += 5; \
635 w6 ^= 0; \
636 w8 = w6; \
637 w8 += 3; \
638 w9 = w6; \
639 if w9 != 10 goto l_exit_%=; \
640 if w8 == 8 goto l_exit_%=; \
641 r0 /= 0; \
642l_exit_%=: \
643 r0 = 0; \
644 exit; \
645" :
646 : __imm(bpf_get_prandom_u32)
647 : __clobber_all);
648}
649
650/*
651 * Test that regsafe() verifies base_id consistency for BPF_ADD_CONST
652 * linked scalars during state pruning.
653 *
654 * The false branch (explored first) links R3 to R2 via ADD_CONST.
655 * The true branch (runtime path) links R3 to R4 (unrelated base_id).
656 * At the merge point, pruning must fail because the linkage topology
657 * differs.
658 */
659SEC("socket")
660__description("linked scalars: add_const base_id must be consistent for pruning")
661__failure __msg("invalid variable-offset")
662__flag(BPF_F_TEST_STATE_FREQ)
663__naked void add_const_base_id_pruning(void)
664{
665 asm volatile (" \
666 r1 = 0; \
667 *(u64*)(r10 - 16) = r1; \
668 call %[bpf_get_prandom_u32]; \
669 r6 = r0; \
670 r6 &= 1; \
671 if r6 >= 1 goto l_true_%=; \
672 \
673 /* False branch (explored first, old state) */ \
674 call %[bpf_get_prandom_u32]; \
675 r2 = r0; \
676 r2 &= 0xff; /* R2 = scalar(id=A) [0,255] */ \
677 r3 = r2; /* R3 linked to R2 (id=A) */ \
678 r3 += 10; /* R3 id=A|ADD_CONST, delta=10 */\
679 r6 = 0; \
680 goto l_merge_%=; \
681 \
682l_true_%=: \
683 /* True branch (runtime path, cur state) */ \
684 call %[bpf_get_prandom_u32]; \
685 r2 = r0; \
686 r2 &= 0xff; /* R2 = scalar [0,255], id=0 */ \
687 r4 = r0; \
688 r4 &= 0xff; /* R4 = scalar [0,255], id=0 */ \
689 r3 = r4; /* R3 linked to R4 (new id=C) */\
690 r3 += 10; /* R3 id=C|ADD_CONST, delta=10 */\
691 r6 = 0; \
692 \
693l_merge_%=: \
694 /* At merge, old R3 linked to R2, cur R3 linked to R4. */\
695 /* Pruning must fail: base_ids A vs C inconsistent. */ \
696 if r2 >= 6 goto l_exit_%=; \
697 /* sync_linked_regs: R2<6 => R3<16 in old state. */ \
698 /* Without fix: R3 in [10,15] from incorrect pruning. */\
699 /* With fix: R3 in [10,265], not synced from R2. */ \
700 r3 -= 10; /* [0,5] vs [0,255] */ \
701 r9 = r10; \
702 r9 += -16; \
703 r9 += r3; /* fp-16+[0,5] vs fp-16+[0,255] */\
704 *(u8*)(r9 + 0) = r6; /* within 16B vs past fp */ \
705l_exit_%=: \
706 r0 = 0; \
707 exit; \
708" :
709 : __imm(bpf_get_prandom_u32)
710 : __clobber_all);
711}
712
713char _license[] SEC("license") = "GPL";