Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2023 SUSE LLC */
3#include <linux/bpf.h>
4#include <bpf/bpf_helpers.h>
5#include "../../../include/linux/filter.h"
6#include "bpf_misc.h"
7
8struct {
9 __uint(type, BPF_MAP_TYPE_ARRAY);
10 __uint(max_entries, 1);
11 __type(key, __u32);
12 __type(value, __u64);
13} precision_map SEC(".maps");
14
15SEC("?raw_tp")
16__success __log_level(2)
17__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
18__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0xfffffff8 goto pc+2")
19__msg("mark_precise: frame0: regs=r2 stack= before 1: (87) r2 = -r2")
20__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 8")
21__naked int bpf_neg(void)
22{
23 asm volatile (
24 "r2 = 8;"
25 "r2 = -r2;"
26 "if r2 != -8 goto 1f;"
27 "r1 = r10;"
28 "r1 += r2;"
29 "1:"
30 "r0 = 0;"
31 "exit;"
32 ::: __clobber_all);
33}
34
35SEC("?raw_tp")
36__success __log_level(2)
37__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
38__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
39__msg("mark_precise: frame0: regs=r2 stack= before 1: (d4) r2 = le16 r2")
40__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
41__naked int bpf_end_to_le(void)
42{
43 asm volatile (
44 "r2 = 0;"
45 "r2 = le16 r2;"
46 "if r2 != 0 goto 1f;"
47 "r1 = r10;"
48 "r1 += r2;"
49 "1:"
50 "r0 = 0;"
51 "exit;"
52 ::: __clobber_all);
53}
54
55
56SEC("?raw_tp")
57__success __log_level(2)
58__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
59__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
60__msg("mark_precise: frame0: regs=r2 stack= before 1: (dc) r2 = be16 r2")
61__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
62__naked int bpf_end_to_be(void)
63{
64 asm volatile (
65 "r2 = 0;"
66 "r2 = be16 r2;"
67 "if r2 != 0 goto 1f;"
68 "r1 = r10;"
69 "r1 += r2;"
70 "1:"
71 "r0 = 0;"
72 "exit;"
73 ::: __clobber_all);
74}
75
76#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
77 (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
78 defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
79 defined(__TARGET_ARCH_loongarch)) && __clang_major__ >= 18
80
81SEC("?raw_tp")
82__success __log_level(2)
83__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
84__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
85__msg("mark_precise: frame0: regs=r2 stack= before 1: (d7) r2 = bswap16 r2")
86__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
87__naked int bpf_end_bswap(void)
88{
89 asm volatile (
90 "r2 = 0;"
91 "r2 = bswap16 r2;"
92 "if r2 != 0 goto 1f;"
93 "r1 = r10;"
94 "r1 += r2;"
95 "1:"
96 "r0 = 0;"
97 "exit;"
98 ::: __clobber_all);
99}
100
101#ifdef CAN_USE_LOAD_ACQ_STORE_REL
102
103SEC("?raw_tp")
104__success __log_level(2)
105__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10")
106__msg("mark_precise: frame0: regs=r2 stack= before 2: (db) r2 = load_acquire((u64 *)(r10 -8))")
107__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
108__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
109__naked int bpf_load_acquire(void)
110{
111 asm volatile (
112 "r1 = 8;"
113 "*(u64 *)(r10 - 8) = r1;"
114 ".8byte %[load_acquire_insn];" /* r2 = load_acquire((u64 *)(r10 - 8)); */
115 "r3 = r10;"
116 "r3 += r2;" /* mark_precise */
117 "r0 = 0;"
118 "exit;"
119 :
120 : __imm_insn(load_acquire_insn,
121 BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -8))
122 : __clobber_all);
123}
124
125SEC("?raw_tp")
126__success __log_level(2)
127__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r2 = r10")
128__msg("mark_precise: frame0: regs=r1 stack= before 2: (79) r1 = *(u64 *)(r10 -8)")
129__msg("mark_precise: frame0: regs= stack=-8 before 1: (db) store_release((u64 *)(r10 -8), r1)")
130__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
131__naked int bpf_store_release(void)
132{
133 asm volatile (
134 "r1 = 8;"
135 ".8byte %[store_release_insn];" /* store_release((u64 *)(r10 - 8), r1); */
136 "r1 = *(u64 *)(r10 - 8);"
137 "r2 = r10;"
138 "r2 += r1;" /* mark_precise */
139 "r0 = 0;"
140 "exit;"
141 :
142 : __imm_insn(store_release_insn,
143 BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -8))
144 : __clobber_all);
145}
146
147#endif /* CAN_USE_LOAD_ACQ_STORE_REL */
148#endif /* v4 instruction */
149
150SEC("?raw_tp")
151__success __log_level(2)
152/*
153 * Without the bug fix there will be no history between "last_idx 3 first_idx 3"
154 * and "parent state regs=" lines. "R0=6" parts are here to help anchor
155 * expected log messages to the one specific mark_chain_precision operation.
156 *
157 * This is quite fragile: if verifier checkpointing heuristic changes, this
158 * might need adjusting.
159 */
160__msg("2: (07) r0 += 1 ; R0=6")
161__msg("3: (35) if r0 >= 0xa goto pc+1")
162__msg("mark_precise: frame0: last_idx 3 first_idx 3 subseq_idx -1")
163__msg("mark_precise: frame0: regs=r0 stack= before 2: (07) r0 += 1")
164__msg("mark_precise: frame0: regs=r0 stack= before 1: (07) r0 += 1")
165__msg("mark_precise: frame0: regs=r0 stack= before 4: (05) goto pc-4")
166__msg("mark_precise: frame0: regs=r0 stack= before 3: (35) if r0 >= 0xa goto pc+1")
167__msg("mark_precise: frame0: parent state regs= stack=: R0=P4")
168__msg("3: R0=6")
169__naked int state_loop_first_last_equal(void)
170{
171 asm volatile (
172 "r0 = 0;"
173 "l0_%=:"
174 "r0 += 1;"
175 "r0 += 1;"
176 /* every few iterations we'll have a checkpoint here with
177 * first_idx == last_idx, potentially confusing precision
178 * backtracking logic
179 */
180 "if r0 >= 10 goto l1_%=;" /* checkpoint + mark_precise */
181 "goto l0_%=;"
182 "l1_%=:"
183 "exit;"
184 ::: __clobber_common
185 );
186}
187
188__used __naked static void __bpf_cond_op_r10(void)
189{
190 asm volatile (
191 "r2 = 2314885393468386424 ll;"
192 "goto +0;"
193 "if r2 <= r10 goto +3;"
194 "if r1 >= -1835016 goto +0;"
195 "if r2 <= 8 goto +0;"
196 "if r3 <= 0 goto +0;"
197 "exit;"
198 ::: __clobber_all);
199}
200
201SEC("?raw_tp")
202__success __log_level(2)
203__msg("8: (bd) if r2 <= r10 goto pc+3")
204__msg("9: (35) if r1 >= 0xffe3fff8 goto pc+0")
205__msg("10: (b5) if r2 <= 0x8 goto pc+0")
206__msg("mark_precise: frame1: last_idx 10 first_idx 0 subseq_idx -1")
207__msg("mark_precise: frame1: regs=r2 stack= before 9: (35) if r1 >= 0xffe3fff8 goto pc+0")
208__msg("mark_precise: frame1: regs=r2 stack= before 8: (bd) if r2 <= r10 goto pc+3")
209__msg("mark_precise: frame1: regs=r2 stack= before 7: (05) goto pc+0")
210__naked void bpf_cond_op_r10(void)
211{
212 asm volatile (
213 "r3 = 0 ll;"
214 "call __bpf_cond_op_r10;"
215 "r0 = 0;"
216 "exit;"
217 ::: __clobber_all);
218}
219
220SEC("?raw_tp")
221__success __log_level(2)
222__msg("3: (bf) r3 = r10")
223__msg("4: (bd) if r3 <= r2 goto pc+1")
224__msg("5: (b5) if r2 <= 0x8 goto pc+2")
225__msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1")
226__msg("mark_precise: frame0: regs=r2 stack= before 4: (bd) if r3 <= r2 goto pc+1")
227__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10")
228__naked void bpf_cond_op_not_r10(void)
229{
230 asm volatile (
231 "r0 = 0;"
232 "r2 = 2314885393468386424 ll;"
233 "r3 = r10;"
234 "if r3 <= r2 goto +1;"
235 "if r2 <= 8 goto +2;"
236 "r0 = 2 ll;"
237 "exit;"
238 ::: __clobber_all);
239}
240
241SEC("lsm.s/socket_connect")
242__success __log_level(2)
243__msg("0: (b7) r0 = 1 ; R0=1")
244__msg("1: (84) w0 = -w0 ; R0=0xffffffff")
245__msg("mark_precise: frame0: last_idx 2 first_idx 0 subseq_idx -1")
246__msg("mark_precise: frame0: regs=r0 stack= before 1: (84) w0 = -w0")
247__msg("mark_precise: frame0: regs=r0 stack= before 0: (b7) r0 = 1")
248__naked int bpf_neg_2(void)
249{
250 /*
251 * lsm.s/socket_connect requires a return value within [-4095, 0].
252 * Returning -1 is allowed
253 */
254 asm volatile (
255 "r0 = 1;"
256 "w0 = -w0;"
257 "exit;"
258 ::: __clobber_all);
259}
260
261SEC("lsm.s/socket_connect")
262__failure __msg("At program exit the register R0 has")
263__naked int bpf_neg_3(void)
264{
265 /*
266 * lsm.s/socket_connect requires a return value within [-4095, 0].
267 * Returning -10000 is not allowed.
268 */
269 asm volatile (
270 "r0 = 10000;"
271 "w0 = -w0;"
272 "exit;"
273 ::: __clobber_all);
274}
275
276SEC("lsm.s/socket_connect")
277__success __log_level(2)
278__msg("0: (b7) r0 = 1 ; R0=1")
279__msg("1: (87) r0 = -r0 ; R0=-1")
280__msg("mark_precise: frame0: last_idx 2 first_idx 0 subseq_idx -1")
281__msg("mark_precise: frame0: regs=r0 stack= before 1: (87) r0 = -r0")
282__msg("mark_precise: frame0: regs=r0 stack= before 0: (b7) r0 = 1")
283__naked int bpf_neg_4(void)
284{
285 /*
286 * lsm.s/socket_connect requires a return value within [-4095, 0].
287 * Returning -1 is allowed
288 */
289 asm volatile (
290 "r0 = 1;"
291 "r0 = -r0;"
292 "exit;"
293 ::: __clobber_all);
294}
295
296SEC("lsm.s/socket_connect")
297__failure __msg("At program exit the register R0 has")
298__naked int bpf_neg_5(void)
299{
300 /*
301 * lsm.s/socket_connect requires a return value within [-4095, 0].
302 * Returning -10000 is not allowed.
303 */
304 asm volatile (
305 "r0 = 10000;"
306 "r0 = -r0;"
307 "exit;"
308 ::: __clobber_all);
309}
310
311SEC("?raw_tp")
312__success __log_level(2)
313__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
314__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_add((u64 *)(r10 -8), r2)")
315__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
316__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
317__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
318__naked int bpf_atomic_fetch_add_precision(void)
319{
320 asm volatile (
321 "r1 = 8;"
322 "*(u64 *)(r10 - 8) = r1;"
323 "r2 = 0;"
324 ".8byte %[fetch_add_insn];" /* r2 = atomic_fetch_add(*(u64 *)(r10 - 8), r2) */
325 "r3 = r10;"
326 "r3 += r2;" /* mark_precise */
327 "r0 = 0;"
328 "exit;"
329 :
330 : __imm_insn(fetch_add_insn,
331 BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
332 : __clobber_all);
333}
334
335SEC("?raw_tp")
336__success __log_level(2)
337__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
338__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_xchg((u64 *)(r10 -8), r2)")
339__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
340__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
341__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
342__naked int bpf_atomic_xchg_precision(void)
343{
344 asm volatile (
345 "r1 = 8;"
346 "*(u64 *)(r10 - 8) = r1;"
347 "r2 = 0;"
348 ".8byte %[xchg_insn];" /* r2 = atomic_xchg(*(u64 *)(r10 - 8), r2) */
349 "r3 = r10;"
350 "r3 += r2;" /* mark_precise */
351 "r0 = 0;"
352 "exit;"
353 :
354 : __imm_insn(xchg_insn,
355 BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_10, BPF_REG_2, -8))
356 : __clobber_all);
357}
358
359SEC("?raw_tp")
360__success __log_level(2)
361__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
362__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_or((u64 *)(r10 -8), r2)")
363__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
364__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
365__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
366__naked int bpf_atomic_fetch_or_precision(void)
367{
368 asm volatile (
369 "r1 = 8;"
370 "*(u64 *)(r10 - 8) = r1;"
371 "r2 = 0;"
372 ".8byte %[fetch_or_insn];" /* r2 = atomic_fetch_or(*(u64 *)(r10 - 8), r2) */
373 "r3 = r10;"
374 "r3 += r2;" /* mark_precise */
375 "r0 = 0;"
376 "exit;"
377 :
378 : __imm_insn(fetch_or_insn,
379 BPF_ATOMIC_OP(BPF_DW, BPF_OR | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
380 : __clobber_all);
381}
382
383SEC("?raw_tp")
384__success __log_level(2)
385__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
386__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_and((u64 *)(r10 -8), r2)")
387__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
388__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
389__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
390__naked int bpf_atomic_fetch_and_precision(void)
391{
392 asm volatile (
393 "r1 = 8;"
394 "*(u64 *)(r10 - 8) = r1;"
395 "r2 = 0;"
396 ".8byte %[fetch_and_insn];" /* r2 = atomic_fetch_and(*(u64 *)(r10 - 8), r2) */
397 "r3 = r10;"
398 "r3 += r2;" /* mark_precise */
399 "r0 = 0;"
400 "exit;"
401 :
402 : __imm_insn(fetch_and_insn,
403 BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
404 : __clobber_all);
405}
406
407SEC("?raw_tp")
408__success __log_level(2)
409__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
410__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_xor((u64 *)(r10 -8), r2)")
411__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
412__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
413__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
414__naked int bpf_atomic_fetch_xor_precision(void)
415{
416 asm volatile (
417 "r1 = 8;"
418 "*(u64 *)(r10 - 8) = r1;"
419 "r2 = 0;"
420 ".8byte %[fetch_xor_insn];" /* r2 = atomic_fetch_xor(*(u64 *)(r10 - 8), r2) */
421 "r3 = r10;"
422 "r3 += r2;" /* mark_precise */
423 "r0 = 0;"
424 "exit;"
425 :
426 : __imm_insn(fetch_xor_insn,
427 BPF_ATOMIC_OP(BPF_DW, BPF_XOR | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
428 : __clobber_all);
429}
430
431SEC("?raw_tp")
432__success __log_level(2)
433__msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r3 = r10")
434__msg("mark_precise: frame0: regs=r0 stack= before 4: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r2)")
435__msg("mark_precise: frame0: regs= stack=-8 before 3: (b7) r2 = 0")
436__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r0 = 0")
437__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
438__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
439__naked int bpf_atomic_cmpxchg_precision(void)
440{
441 asm volatile (
442 "r1 = 8;"
443 "*(u64 *)(r10 - 8) = r1;"
444 "r0 = 0;"
445 "r2 = 0;"
446 ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u64 *)(r10 - 8), r0, r2) */
447 "r3 = r10;"
448 "r3 += r0;" /* mark_precise */
449 "r0 = 0;"
450 "exit;"
451 :
452 : __imm_insn(cmpxchg_insn,
453 BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8))
454 : __clobber_all);
455}
456
457/* Regression test for dual precision: Both the fetched value (r2) and
458 * a reread of the same stack slot (r3) are tracked for precision. After
459 * the atomic operation, the stack slot is STACK_MISC. Thus, the ldx at
460 * insn 4 does NOT set INSN_F_STACK_ACCESS. Precision for the stack slot
461 * propagates solely through the atomic fetch's load side (insn 3).
462 */
463SEC("?raw_tp")
464__success __log_level(2)
465__msg("mark_precise: frame0: regs=r2,r3 stack= before 4: (79) r3 = *(u64 *)(r10 -8)")
466__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_add((u64 *)(r10 -8), r2)")
467__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
468__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
469__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
470__naked int bpf_atomic_fetch_add_dual_precision(void)
471{
472 asm volatile (
473 "r1 = 8;"
474 "*(u64 *)(r10 - 8) = r1;"
475 "r2 = 0;"
476 ".8byte %[fetch_add_insn];" /* r2 = atomic_fetch_add(*(u64 *)(r10 - 8), r2) */
477 "r3 = *(u64 *)(r10 - 8);"
478 "r4 = r2;"
479 "r4 += r3;"
480 "r4 &= 7;"
481 "r5 = r10;"
482 "r5 += r4;" /* mark_precise */
483 "r0 = 0;"
484 "exit;"
485 :
486 : __imm_insn(fetch_add_insn,
487 BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
488 : __clobber_all);
489}
490
491SEC("?raw_tp")
492__success __log_level(2)
493__msg("mark_precise: frame0: regs=r0,r3 stack= before 5: (79) r3 = *(u64 *)(r10 -8)")
494__msg("mark_precise: frame0: regs=r0 stack= before 4: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r2)")
495__msg("mark_precise: frame0: regs= stack=-8 before 3: (b7) r2 = 0")
496__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r0 = 8")
497__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
498__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
499__naked int bpf_atomic_cmpxchg_dual_precision(void)
500{
501 asm volatile (
502 "r1 = 8;"
503 "*(u64 *)(r10 - 8) = r1;"
504 "r0 = 8;"
505 "r2 = 0;"
506 ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u64 *)(r10 - 8), r0, r2) */
507 "r3 = *(u64 *)(r10 - 8);"
508 "r4 = r0;"
509 "r4 += r3;"
510 "r4 &= 7;"
511 "r5 = r10;"
512 "r5 += r4;" /* mark_precise */
513 "r0 = 0;"
514 "exit;"
515 :
516 : __imm_insn(cmpxchg_insn,
517 BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8))
518 : __clobber_all);
519}
520
521SEC("?raw_tp")
522__success __log_level(2)
523__msg("mark_precise: frame0: regs=r1 stack= before 10: (57) r1 &= 7")
524__msg("mark_precise: frame0: regs=r1 stack= before 9: (db) r1 = atomic64_fetch_add((u64 *)(r0 +0), r1)")
525__not_msg("falling back to forcing all scalars precise")
526__naked int bpf_atomic_fetch_add_map_precision(void)
527{
528 asm volatile (
529 "r1 = 0;"
530 "*(u64 *)(r10 - 8) = r1;"
531 "r2 = r10;"
532 "r2 += -8;"
533 "r1 = %[precision_map] ll;"
534 "call %[bpf_map_lookup_elem];"
535 "if r0 == 0 goto 1f;"
536 "r1 = 0;"
537 ".8byte %[fetch_add_insn];" /* r1 = atomic_fetch_add(*(u64 *)(r0 + 0), r1) */
538 "r1 &= 7;"
539 "r2 = r10;"
540 "r2 += r1;" /* mark_precise */
541 "1: r0 = 0;"
542 "exit;"
543 :
544 : __imm_addr(precision_map),
545 __imm(bpf_map_lookup_elem),
546 __imm_insn(fetch_add_insn,
547 BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_0, BPF_REG_1, 0))
548 : __clobber_all);
549}
550
551SEC("?raw_tp")
552__success __log_level(2)
553__msg("mark_precise: frame0: regs=r0 stack= before 12: (57) r0 &= 7")
554__msg("mark_precise: frame0: regs=r0 stack= before 11: (db) r0 = atomic64_cmpxchg((u64 *)(r6 +0), r0, r1)")
555__not_msg("falling back to forcing all scalars precise")
556__naked int bpf_atomic_cmpxchg_map_precision(void)
557{
558 asm volatile (
559 "r1 = 0;"
560 "*(u64 *)(r10 - 8) = r1;"
561 "r2 = r10;"
562 "r2 += -8;"
563 "r1 = %[precision_map] ll;"
564 "call %[bpf_map_lookup_elem];"
565 "if r0 == 0 goto 1f;"
566 "r6 = r0;"
567 "r0 = 0;"
568 "r1 = 0;"
569 ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u64 *)(r6 + 0), r0, r1) */
570 "r0 &= 7;"
571 "r2 = r10;"
572 "r2 += r0;" /* mark_precise */
573 "1: r0 = 0;"
574 "exit;"
575 :
576 : __imm_addr(precision_map),
577 __imm(bpf_map_lookup_elem),
578 __imm_insn(cmpxchg_insn,
579 BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_6, BPF_REG_1, 0))
580 : __clobber_all);
581}
582
583SEC("?raw_tp")
584__success __log_level(2)
585__msg("mark_precise: frame0: regs=r1 stack= before 10: (57) r1 &= 7")
586__msg("mark_precise: frame0: regs=r1 stack= before 9: (c3) r1 = atomic_fetch_add((u32 *)(r0 +0), r1)")
587__not_msg("falling back to forcing all scalars precise")
588__naked int bpf_atomic_fetch_add_32bit_precision(void)
589{
590 asm volatile (
591 "r1 = 0;"
592 "*(u64 *)(r10 - 8) = r1;"
593 "r2 = r10;"
594 "r2 += -8;"
595 "r1 = %[precision_map] ll;"
596 "call %[bpf_map_lookup_elem];"
597 "if r0 == 0 goto 1f;"
598 "r1 = 0;"
599 ".8byte %[fetch_add_insn];" /* r1 = atomic_fetch_add(*(u32 *)(r0 + 0), r1) */
600 "r1 &= 7;"
601 "r2 = r10;"
602 "r2 += r1;" /* mark_precise */
603 "1: r0 = 0;"
604 "exit;"
605 :
606 : __imm_addr(precision_map),
607 __imm(bpf_map_lookup_elem),
608 __imm_insn(fetch_add_insn,
609 BPF_ATOMIC_OP(BPF_W, BPF_ADD | BPF_FETCH, BPF_REG_0, BPF_REG_1, 0))
610 : __clobber_all);
611}
612
613SEC("?raw_tp")
614__success __log_level(2)
615__msg("mark_precise: frame0: regs=r0 stack= before 12: (57) r0 &= 7")
616__msg("mark_precise: frame0: regs=r0 stack= before 11: (c3) r0 = atomic_cmpxchg((u32 *)(r6 +0), r0, r1)")
617__not_msg("falling back to forcing all scalars precise")
618__naked int bpf_atomic_cmpxchg_32bit_precision(void)
619{
620 asm volatile (
621 "r1 = 0;"
622 "*(u64 *)(r10 - 8) = r1;"
623 "r2 = r10;"
624 "r2 += -8;"
625 "r1 = %[precision_map] ll;"
626 "call %[bpf_map_lookup_elem];"
627 "if r0 == 0 goto 1f;"
628 "r6 = r0;"
629 "r0 = 0;"
630 "r1 = 0;"
631 ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u32 *)(r6 + 0), r0, r1) */
632 "r0 &= 7;"
633 "r2 = r10;"
634 "r2 += r0;" /* mark_precise */
635 "1: r0 = 0;"
636 "exit;"
637 :
638 : __imm_addr(precision_map),
639 __imm(bpf_map_lookup_elem),
640 __imm_insn(cmpxchg_insn,
641 BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_6, BPF_REG_1, 0))
642 : __clobber_all);
643}
644
645char _license[] SEC("license") = "GPL";