Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 */
4#ifndef _LINUX_BPF_VERIFIER_H
5#define _LINUX_BPF_VERIFIER_H 1
6
7#include <linux/bpf.h> /* for enum bpf_reg_type */
8#include <linux/btf.h> /* for struct btf and btf_id() */
9#include <linux/filter.h> /* for MAX_BPF_STACK */
10#include <linux/tnum.h>
11
12/* Maximum variable offset umax_value permitted when resolving memory accesses.
13 * In practice this is far bigger than any realistic pointer offset; this limit
14 * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
15 */
16#define BPF_MAX_VAR_OFF (1 << 29)
17/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
18 * that converting umax_value to int cannot overflow.
19 */
20#define BPF_MAX_VAR_SIZ (1 << 29)
21/* size of tmp_str_buf in bpf_verifier.
22 * we need at least 306 bytes to fit full stack mask representation
23 * (in the "-8,-16,...,-512" form)
24 */
25#define TMP_STR_BUF_LEN 320
26/* Patch buffer size */
27#define INSN_BUF_SIZE 32
28
29#define ITER_PREFIX "bpf_iter_"
30
31enum bpf_iter_state {
32 BPF_ITER_STATE_INVALID, /* for non-first slot */
33 BPF_ITER_STATE_ACTIVE,
34 BPF_ITER_STATE_DRAINED,
35};
36
37struct bpf_reg_state {
38 /* Ordering of fields matters. See states_equal() */
39 enum bpf_reg_type type;
40 /*
41 * Constant delta between "linked" scalars with the same ID.
42 */
43 s32 delta;
44 union {
45 /* valid when type == PTR_TO_PACKET */
46 int range;
47
48 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
49 * PTR_TO_MAP_VALUE_OR_NULL
50 */
51 struct {
52 struct bpf_map *map_ptr;
53 /* To distinguish map lookups from outer map
54 * the map_uid is non-zero for registers
55 * pointing to inner maps.
56 */
57 u32 map_uid;
58 };
59
60 /* for PTR_TO_BTF_ID */
61 struct {
62 struct btf *btf;
63 u32 btf_id;
64 };
65
66 struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
67 u32 mem_size;
68 u32 dynptr_id; /* for dynptr slices */
69 };
70
71 /* For dynptr stack slots */
72 struct {
73 enum bpf_dynptr_type type;
74 /* A dynptr is 16 bytes so it takes up 2 stack slots.
75 * We need to track which slot is the first slot
76 * to protect against cases where the user may try to
77 * pass in an address starting at the second slot of the
78 * dynptr.
79 */
80 bool first_slot;
81 } dynptr;
82
83 /* For bpf_iter stack slots */
84 struct {
85 /* BTF container and BTF type ID describing
86 * struct bpf_iter_<type> of an iterator state
87 */
88 struct btf *btf;
89 u32 btf_id;
90 /* packing following two fields to fit iter state into 16 bytes */
91 enum bpf_iter_state state:2;
92 int depth:30;
93 } iter;
94
95 /* For irq stack slots */
96 struct {
97 enum {
98 IRQ_NATIVE_KFUNC,
99 IRQ_LOCK_KFUNC,
100 } kfunc_class;
101 } irq;
102
103 /* Max size from any of the above. */
104 struct {
105 unsigned long raw1;
106 unsigned long raw2;
107 } raw;
108
109 u32 subprogno; /* for PTR_TO_FUNC */
110 };
111 /* For scalar types (SCALAR_VALUE), this represents our knowledge of
112 * the actual value.
113 * For pointer types, this represents the variable part of the offset
114 * from the pointed-to object, and is shared with all bpf_reg_states
115 * with the same id as us.
116 */
117 struct tnum var_off;
118 /* Used to determine if any memory access using this register will
119 * result in a bad access.
120 * These refer to the same value as var_off, not necessarily the actual
121 * contents of the register.
122 */
123 s64 smin_value; /* minimum possible (s64)value */
124 s64 smax_value; /* maximum possible (s64)value */
125 u64 umin_value; /* minimum possible (u64)value */
126 u64 umax_value; /* maximum possible (u64)value */
127 s32 s32_min_value; /* minimum possible (s32)value */
128 s32 s32_max_value; /* maximum possible (s32)value */
129 u32 u32_min_value; /* minimum possible (u32)value */
130 u32 u32_max_value; /* maximum possible (u32)value */
131 /* For PTR_TO_PACKET, used to find other pointers with the same variable
132 * offset, so they can share range knowledge.
133 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
134 * came from, when one is tested for != NULL.
135 * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation
136 * for the purpose of tracking that it's freed.
137 * For PTR_TO_SOCKET this is used to share which pointers retain the
138 * same reference to the socket, to determine proper reference freeing.
139 * For stack slots that are dynptrs, this is used to track references to
140 * the dynptr to determine proper reference freeing.
141 * Similarly to dynptrs, we use ID to track "belonging" of a reference
142 * to a specific instance of bpf_iter.
143 */
144 /*
145 * Upper bit of ID is used to remember relationship between "linked"
146 * registers. Example:
147 * r1 = r2; both will have r1->id == r2->id == N
148 * r1 += 10; r1->id == N | BPF_ADD_CONST and r1->delta == 10
149 * r3 = r2; both will have r3->id == r2->id == N
150 * w3 += 10; r3->id == N | BPF_ADD_CONST32 and r3->delta == 10
151 */
152#define BPF_ADD_CONST64 (1U << 31)
153#define BPF_ADD_CONST32 (1U << 30)
154#define BPF_ADD_CONST (BPF_ADD_CONST64 | BPF_ADD_CONST32)
155 u32 id;
156 /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
157 * from a pointer-cast helper, bpf_sk_fullsock() and
158 * bpf_tcp_sock().
159 *
160 * Consider the following where "sk" is a reference counted
161 * pointer returned from "sk = bpf_sk_lookup_tcp();":
162 *
163 * 1: sk = bpf_sk_lookup_tcp();
164 * 2: if (!sk) { return 0; }
165 * 3: fullsock = bpf_sk_fullsock(sk);
166 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
167 * 5: tp = bpf_tcp_sock(fullsock);
168 * 6: if (!tp) { bpf_sk_release(sk); return 0; }
169 * 7: bpf_sk_release(sk);
170 * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain
171 *
172 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
173 * "tp" ptr should be invalidated also. In order to do that,
174 * the reg holding "fullsock" and "sk" need to remember
175 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
176 * such that the verifier can reset all regs which have
177 * ref_obj_id matching the sk_reg->id.
178 *
179 * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
180 * sk_reg->id will stay as NULL-marking purpose only.
181 * After NULL-marking is done, sk_reg->id can be reset to 0.
182 *
183 * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
184 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
185 *
186 * After "tp = bpf_tcp_sock(fullsock);" at line 5,
187 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
188 * which is the same as sk_reg->ref_obj_id.
189 *
190 * From the verifier perspective, if sk, fullsock and tp
191 * are not NULL, they are the same ptr with different
192 * reg->type. In particular, bpf_sk_release(tp) is also
193 * allowed and has the same effect as bpf_sk_release(sk).
194 */
195 u32 ref_obj_id;
196 /* Inside the callee two registers can be both PTR_TO_STACK like
197 * R1=fp-8 and R2=fp-8, but one of them points to this function stack
198 * while another to the caller's stack. To differentiate them 'frameno'
199 * is used which is an index in bpf_verifier_state->frame[] array
200 * pointing to bpf_func_state.
201 */
202 u32 frameno;
203 /* Tracks subreg definition. The stored value is the insn_idx of the
204 * writing insn. This is safe because subreg_def is used before any insn
205 * patching which only happens after main verification finished.
206 */
207 s32 subreg_def;
208 /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
209 bool precise;
210};
211
212enum bpf_stack_slot_type {
213 STACK_INVALID, /* nothing was stored in this stack slot */
214 STACK_SPILL, /* register spilled into stack */
215 STACK_MISC, /* BPF program wrote some data into this slot */
216 STACK_ZERO, /* BPF program wrote constant zero */
217 /* A dynptr is stored in this stack slot. The type of dynptr
218 * is stored in bpf_stack_state->spilled_ptr.dynptr.type
219 */
220 STACK_DYNPTR,
221 STACK_ITER,
222 STACK_IRQ_FLAG,
223 STACK_POISON,
224};
225
226#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
227
228/* 4-byte stack slot granularity for liveness analysis */
229#define BPF_HALF_REG_SIZE 4
230#define STACK_SLOT_SZ 4
231#define STACK_SLOTS (MAX_BPF_STACK / BPF_HALF_REG_SIZE) /* 128 */
232
233typedef struct {
234 u64 v[2];
235} spis_t;
236
237#define SPIS_ZERO ((spis_t){})
238#define SPIS_ALL ((spis_t){{ U64_MAX, U64_MAX }})
239
240static inline bool spis_is_zero(spis_t s)
241{
242 return s.v[0] == 0 && s.v[1] == 0;
243}
244
245static inline bool spis_equal(spis_t a, spis_t b)
246{
247 return a.v[0] == b.v[0] && a.v[1] == b.v[1];
248}
249
250static inline spis_t spis_or(spis_t a, spis_t b)
251{
252 return (spis_t){{ a.v[0] | b.v[0], a.v[1] | b.v[1] }};
253}
254
255static inline spis_t spis_and(spis_t a, spis_t b)
256{
257 return (spis_t){{ a.v[0] & b.v[0], a.v[1] & b.v[1] }};
258}
259
260static inline spis_t spis_not(spis_t s)
261{
262 return (spis_t){{ ~s.v[0], ~s.v[1] }};
263}
264
265static inline bool spis_test_bit(spis_t s, u32 slot)
266{
267 return s.v[slot / 64] & BIT_ULL(slot % 64);
268}
269
270static inline void spis_or_range(spis_t *mask, u32 lo, u32 hi)
271{
272 u32 w;
273
274 for (w = lo; w <= hi && w < STACK_SLOTS; w++)
275 mask->v[w / 64] |= BIT_ULL(w % 64);
276}
277
278#define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \
279 (1 << BPF_REG_3) | (1 << BPF_REG_4) | \
280 (1 << BPF_REG_5))
281
282#define BPF_MAIN_FUNC (-1)
283
284#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
285#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
286
287struct bpf_stack_state {
288 struct bpf_reg_state spilled_ptr;
289 u8 slot_type[BPF_REG_SIZE];
290};
291
292struct bpf_reference_state {
293 /* Each reference object has a type. Ensure REF_TYPE_PTR is zero to
294 * default to pointer reference on zero initialization of a state.
295 */
296 enum ref_state_type {
297 REF_TYPE_PTR = (1 << 1),
298 REF_TYPE_IRQ = (1 << 2),
299 REF_TYPE_LOCK = (1 << 3),
300 REF_TYPE_RES_LOCK = (1 << 4),
301 REF_TYPE_RES_LOCK_IRQ = (1 << 5),
302 REF_TYPE_LOCK_MASK = REF_TYPE_LOCK | REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ,
303 } type;
304 /* Track each reference created with a unique id, even if the same
305 * instruction creates the reference multiple times (eg, via CALL).
306 */
307 int id;
308 /* Instruction where the allocation of this reference occurred. This
309 * is used purely to inform the user of a reference leak.
310 */
311 int insn_idx;
312 /* Use to keep track of the source object of a lock, to ensure
313 * it matches on unlock.
314 */
315 void *ptr;
316};
317
318struct bpf_retval_range {
319 s32 minval;
320 s32 maxval;
321 bool return_32bit;
322};
323
324/* state of the program:
325 * type of all registers and stack info
326 */
327struct bpf_func_state {
328 struct bpf_reg_state regs[MAX_BPF_REG];
329 /* index of call instruction that called into this func */
330 int callsite;
331 /* stack frame number of this function state from pov of
332 * enclosing bpf_verifier_state.
333 * 0 = main function, 1 = first callee.
334 */
335 u32 frameno;
336 /* subprog number == index within subprog_info
337 * zero == main subprog
338 */
339 u32 subprogno;
340 /* Every bpf_timer_start will increment async_entry_cnt.
341 * It's used to distinguish:
342 * void foo(void) { for(;;); }
343 * void foo(void) { bpf_timer_set_callback(,foo); }
344 */
345 u32 async_entry_cnt;
346 struct bpf_retval_range callback_ret_range;
347 bool in_callback_fn;
348 bool in_async_callback_fn;
349 bool in_exception_callback_fn;
350 /* For callback calling functions that limit number of possible
351 * callback executions (e.g. bpf_loop) keeps track of current
352 * simulated iteration number.
353 * Value in frame N refers to number of times callback with frame
354 * N+1 was simulated, e.g. for the following call:
355 *
356 * bpf_loop(..., fn, ...); | suppose current frame is N
357 * | fn would be simulated in frame N+1
358 * | number of simulations is tracked in frame N
359 */
360 u32 callback_depth;
361
362 /* The following fields should be last. See copy_func_state() */
363 /* The state of the stack. Each element of the array describes BPF_REG_SIZE
364 * (i.e. 8) bytes worth of stack memory.
365 * stack[0] represents bytes [*(r10-8)..*(r10-1)]
366 * stack[1] represents bytes [*(r10-16)..*(r10-9)]
367 * ...
368 * stack[allocated_stack/8 - 1] represents [*(r10-allocated_stack)..*(r10-allocated_stack+7)]
369 */
370 struct bpf_stack_state *stack;
371 /* Size of the current stack, in bytes. The stack state is tracked below, in
372 * `stack`. allocated_stack is always a multiple of BPF_REG_SIZE.
373 */
374 int allocated_stack;
375};
376
377#define MAX_CALL_FRAMES 8
378
379/* instruction history flags, used in bpf_jmp_history_entry.flags field */
380enum {
381 /* instruction references stack slot through PTR_TO_STACK register;
382 * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
383 * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512,
384 * 8 bytes per slot, so slot index (spi) is [0, 63])
385 */
386 INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */
387
388 INSN_F_SPI_MASK = 0x3f, /* 6 bits */
389 INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
390
391 INSN_F_STACK_ACCESS = BIT(9),
392
393 INSN_F_DST_REG_STACK = BIT(10), /* dst_reg is PTR_TO_STACK */
394 INSN_F_SRC_REG_STACK = BIT(11), /* src_reg is PTR_TO_STACK */
395 /* total 12 bits are used now. */
396};
397
398static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
399static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
400
401struct bpf_jmp_history_entry {
402 u32 idx;
403 /* insn idx can't be bigger than 1 million */
404 u32 prev_idx : 20;
405 /* special INSN_F_xxx flags */
406 u32 flags : 12;
407 /* additional registers that need precision tracking when this
408 * jump is backtracked, vector of six 10-bit records
409 */
410 u64 linked_regs;
411};
412
413/* Maximum number of register states that can exist at once */
414#define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
415struct bpf_verifier_state {
416 /* call stack tracking */
417 struct bpf_func_state *frame[MAX_CALL_FRAMES];
418 struct bpf_verifier_state *parent;
419 /* Acquired reference states */
420 struct bpf_reference_state *refs;
421 /*
422 * 'branches' field is the number of branches left to explore:
423 * 0 - all possible paths from this state reached bpf_exit or
424 * were safely pruned
425 * 1 - at least one path is being explored.
426 * This state hasn't reached bpf_exit
427 * 2 - at least two paths are being explored.
428 * This state is an immediate parent of two children.
429 * One is fallthrough branch with branches==1 and another
430 * state is pushed into stack (to be explored later) also with
431 * branches==1. The parent of this state has branches==1.
432 * The verifier state tree connected via 'parent' pointer looks like:
433 * 1
434 * 1
435 * 2 -> 1 (first 'if' pushed into stack)
436 * 1
437 * 2 -> 1 (second 'if' pushed into stack)
438 * 1
439 * 1
440 * 1 bpf_exit.
441 *
442 * Once do_check() reaches bpf_exit, it calls update_branch_counts()
443 * and the verifier state tree will look:
444 * 1
445 * 1
446 * 2 -> 1 (first 'if' pushed into stack)
447 * 1
448 * 1 -> 1 (second 'if' pushed into stack)
449 * 0
450 * 0
451 * 0 bpf_exit.
452 * After pop_stack() the do_check() will resume at second 'if'.
453 *
454 * If is_state_visited() sees a state with branches > 0 it means
455 * there is a loop. If such state is exactly equal to the current state
456 * it's an infinite loop. Note states_equal() checks for states
457 * equivalency, so two states being 'states_equal' does not mean
458 * infinite loop. The exact comparison is provided by
459 * states_maybe_looping() function. It's a stronger pre-check and
460 * much faster than states_equal().
461 *
462 * This algorithm may not find all possible infinite loops or
463 * loop iteration count may be too high.
464 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
465 */
466 u32 branches;
467 u32 insn_idx;
468 u32 curframe;
469
470 u32 acquired_refs;
471 u32 active_locks;
472 u32 active_preempt_locks;
473 u32 active_irq_id;
474 u32 active_lock_id;
475 void *active_lock_ptr;
476 u32 active_rcu_locks;
477
478 bool speculative;
479 bool in_sleepable;
480
481 /* first and last insn idx of this verifier state */
482 u32 first_insn_idx;
483 u32 last_insn_idx;
484 /* if this state is a backedge state then equal_state
485 * records cached state to which this state is equal.
486 */
487 struct bpf_verifier_state *equal_state;
488 /* jmp history recorded from first to last.
489 * backtracking is using it to go from last to first.
490 * For most states jmp_history_cnt is [0-3].
491 * For loops can go up to ~40.
492 */
493 struct bpf_jmp_history_entry *jmp_history;
494 u32 jmp_history_cnt;
495 u32 dfs_depth;
496 u32 callback_unroll_depth;
497 u32 may_goto_depth;
498};
499
500#define bpf_get_spilled_reg(slot, frame, mask) \
501 (((slot < frame->allocated_stack / BPF_REG_SIZE) && \
502 ((1 << frame->stack[slot].slot_type[BPF_REG_SIZE - 1]) & (mask))) \
503 ? &frame->stack[slot].spilled_ptr : NULL)
504
505/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
506#define bpf_for_each_spilled_reg(iter, frame, reg, mask) \
507 for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask); \
508 iter < frame->allocated_stack / BPF_REG_SIZE; \
509 iter++, reg = bpf_get_spilled_reg(iter, frame, mask))
510
511#define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr) \
512 ({ \
513 struct bpf_verifier_state *___vstate = __vst; \
514 int ___i, ___j; \
515 for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \
516 struct bpf_reg_state *___regs; \
517 __state = ___vstate->frame[___i]; \
518 ___regs = __state->regs; \
519 for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \
520 __reg = &___regs[___j]; \
521 (void)(__expr); \
522 } \
523 bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \
524 if (!__reg) \
525 continue; \
526 (void)(__expr); \
527 } \
528 } \
529 })
530
531/* Invoke __expr over regsiters in __vst, setting __state and __reg */
532#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
533 bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr)
534
535/* linked list of verifier states used to prune search */
536struct bpf_verifier_state_list {
537 struct bpf_verifier_state state;
538 struct list_head node;
539 u32 miss_cnt;
540 u32 hit_cnt:31;
541 u32 in_free_list:1;
542};
543
544struct bpf_loop_inline_state {
545 unsigned int initialized:1; /* set to true upon first entry */
546 unsigned int fit_for_inline:1; /* true if callback function is the same
547 * at each call and flags are always zero
548 */
549 u32 callback_subprogno; /* valid when fit_for_inline is true */
550};
551
552/* pointer and state for maps */
553struct bpf_map_ptr_state {
554 struct bpf_map *map_ptr;
555 bool poison;
556 bool unpriv;
557};
558
559/* Possible states for alu_state member. */
560#define BPF_ALU_SANITIZE_SRC (1U << 0)
561#define BPF_ALU_SANITIZE_DST (1U << 1)
562#define BPF_ALU_NEG_VALUE (1U << 2)
563#define BPF_ALU_NON_POINTER (1U << 3)
564#define BPF_ALU_IMMEDIATE (1U << 4)
565#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
566 BPF_ALU_SANITIZE_DST)
567
568/*
569 * An array of BPF instructions.
570 * Primary usage: return value of bpf_insn_successors.
571 */
572struct bpf_iarray {
573 int cnt;
574 u32 items[];
575};
576
577struct bpf_insn_aux_data {
578 union {
579 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
580 struct bpf_map_ptr_state map_ptr_state;
581 s32 call_imm; /* saved imm field of call insn */
582 u32 alu_limit; /* limit for add/sub register with pointer */
583 struct {
584 u32 map_index; /* index into used_maps[] */
585 u32 map_off; /* offset from value base address */
586 };
587 struct {
588 enum bpf_reg_type reg_type; /* type of pseudo_btf_id */
589 union {
590 struct {
591 struct btf *btf;
592 u32 btf_id; /* btf_id for struct typed var */
593 };
594 u32 mem_size; /* mem_size for non-struct typed var */
595 };
596 } btf_var;
597 /* if instruction is a call to bpf_loop this field tracks
598 * the state of the relevant registers to make decision about inlining
599 */
600 struct bpf_loop_inline_state loop_inline_state;
601 };
602 union {
603 /* remember the size of type passed to bpf_obj_new to rewrite R1 */
604 u64 obj_new_size;
605 /* remember the offset of node field within type to rewrite */
606 u64 insert_off;
607 };
608 struct bpf_iarray *jt; /* jump table for gotox or bpf_tailcall call instruction */
609 struct btf_struct_meta *kptr_struct_meta;
610 u64 map_key_state; /* constant (32 bit) key tracking for maps */
611 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
612 u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
613 bool nospec; /* do not execute this instruction speculatively */
614 bool nospec_result; /* result is unsafe under speculation, nospec must follow */
615 bool zext_dst; /* this insn zero extends dst reg */
616 bool needs_zext; /* alu op needs to clear upper bits */
617 bool non_sleepable; /* helper/kfunc may be called from non-sleepable context */
618 bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
619 bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
620 u8 alu_state; /* used in combination with alu_limit */
621 /* true if STX or LDX instruction is a part of a spill/fill
622 * pattern for a bpf_fastcall call.
623 */
624 u8 fastcall_pattern:1;
625 /* for CALL instructions, a number of spill/fill pairs in the
626 * bpf_fastcall pattern.
627 */
628 u8 fastcall_spills_num:3;
629 u8 arg_prog:4;
630
631 /* below fields are initialized once */
632 unsigned int orig_idx; /* original instruction index */
633 u32 jmp_point:1;
634 u32 prune_point:1;
635 /* ensure we check state equivalence and save state checkpoint and
636 * this instruction, regardless of any heuristics
637 */
638 u32 force_checkpoint:1;
639 /* true if instruction is a call to a helper function that
640 * accepts callback function as a parameter.
641 */
642 u32 calls_callback:1;
643 u32 indirect_target:1; /* if it is an indirect jump target */
644 /*
645 * CFG strongly connected component this instruction belongs to,
646 * zero if it is a singleton SCC.
647 */
648 u32 scc;
649 /* registers alive before this instruction. */
650 u16 live_regs_before;
651 /*
652 * Bitmask of R0-R9 that hold known values at this instruction.
653 * const_reg_mask: scalar constants that fit in 32 bits.
654 * const_reg_map_mask: map pointers, val is map_index into used_maps[].
655 * const_reg_subprog_mask: subprog pointers, val is subprog number.
656 * const_reg_vals[i] holds the 32-bit value for register i.
657 * Populated by compute_const_regs() pre-pass.
658 */
659 u16 const_reg_mask;
660 u16 const_reg_map_mask;
661 u16 const_reg_subprog_mask;
662 u32 const_reg_vals[10];
663};
664
665#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
666#define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */
667
668#define BPF_VERIFIER_TMP_LOG_SIZE 1024
669
670struct bpf_verifier_log {
671 /* Logical start and end positions of a "log window" of the verifier log.
672 * start_pos == 0 means we haven't truncated anything.
673 * Once truncation starts to happen, start_pos + len_total == end_pos,
674 * except during log reset situations, in which (end_pos - start_pos)
675 * might get smaller than len_total (see bpf_vlog_reset()).
676 * Generally, (end_pos - start_pos) gives number of useful data in
677 * user log buffer.
678 */
679 u64 start_pos;
680 u64 end_pos;
681 char __user *ubuf;
682 u32 level;
683 u32 len_total;
684 u32 len_max;
685 char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
686};
687
688#define BPF_LOG_LEVEL1 1
689#define BPF_LOG_LEVEL2 2
690#define BPF_LOG_STATS 4
691#define BPF_LOG_FIXED 8
692#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
693#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED)
694#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */
695#define BPF_LOG_MIN_ALIGNMENT 8U
696#define BPF_LOG_ALIGNMENT 40U
697
698static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
699{
700 return log && log->level;
701}
702
703#define BPF_MAX_SUBPROGS 256
704
705struct bpf_subprog_arg_info {
706 enum bpf_arg_type arg_type;
707 union {
708 u32 mem_size;
709 u32 btf_id;
710 };
711};
712
713enum priv_stack_mode {
714 PRIV_STACK_UNKNOWN,
715 NO_PRIV_STACK,
716 PRIV_STACK_ADAPTIVE,
717};
718
719struct bpf_subprog_info {
720 const char *name; /* name extracted from BTF */
721 u32 start; /* insn idx of function entry point */
722 u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
723 u32 postorder_start; /* The idx to the env->cfg.insn_postorder */
724 u32 exit_idx; /* Index of one of the BPF_EXIT instructions in this subprogram */
725 u16 stack_depth; /* max. stack depth used by this function */
726 u16 stack_extra;
727 /* offsets in range [stack_depth .. fastcall_stack_off)
728 * are used for bpf_fastcall spills and fills.
729 */
730 s16 fastcall_stack_off;
731 bool has_tail_call: 1;
732 bool tail_call_reachable: 1;
733 bool has_ld_abs: 1;
734 bool is_cb: 1;
735 bool is_async_cb: 1;
736 bool is_exception_cb: 1;
737 bool args_cached: 1;
738 /* true if bpf_fastcall stack region is used by functions that can't be inlined */
739 bool keep_fastcall_stack: 1;
740 bool changes_pkt_data: 1;
741 bool might_sleep: 1;
742 u8 arg_cnt:3;
743
744 enum priv_stack_mode priv_stack_mode;
745 struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
746};
747
748struct bpf_verifier_env;
749
750struct backtrack_state {
751 struct bpf_verifier_env *env;
752 u32 frame;
753 u32 reg_masks[MAX_CALL_FRAMES];
754 u64 stack_masks[MAX_CALL_FRAMES];
755};
756
757struct bpf_id_pair {
758 u32 old;
759 u32 cur;
760};
761
762struct bpf_idmap {
763 u32 tmp_id_gen;
764 u32 cnt;
765 struct bpf_id_pair map[BPF_ID_MAP_SIZE];
766};
767
768struct bpf_idset {
769 u32 num_ids;
770 struct {
771 u32 id;
772 u32 cnt;
773 } entries[BPF_ID_MAP_SIZE];
774};
775
776/* see verifier.c:compute_scc_callchain() */
777struct bpf_scc_callchain {
778 /* call sites from bpf_verifier_state->frame[*]->callsite leading to this SCC */
779 u32 callsites[MAX_CALL_FRAMES - 1];
780 /* last frame in a chain is identified by SCC id */
781 u32 scc;
782};
783
784/* verifier state waiting for propagate_backedges() */
785struct bpf_scc_backedge {
786 struct bpf_scc_backedge *next;
787 struct bpf_verifier_state state;
788};
789
790struct bpf_scc_visit {
791 struct bpf_scc_callchain callchain;
792 /* first state in current verification path that entered SCC
793 * identified by the callchain
794 */
795 struct bpf_verifier_state *entry_state;
796 struct bpf_scc_backedge *backedges; /* list of backedges */
797 u32 num_backedges;
798};
799
800/* An array of bpf_scc_visit structs sharing tht same bpf_scc_callchain->scc
801 * but having different bpf_scc_callchain->callsites.
802 */
803struct bpf_scc_info {
804 u32 num_visits;
805 struct bpf_scc_visit visits[];
806};
807
808struct bpf_liveness;
809
810/* single container for all structs
811 * one verifier_env per bpf_check() call
812 */
813struct bpf_verifier_env {
814 u32 insn_idx;
815 u32 prev_insn_idx;
816 struct bpf_prog *prog; /* eBPF program being verified */
817 const struct bpf_verifier_ops *ops;
818 struct module *attach_btf_mod; /* The owner module of prog->aux->attach_btf */
819 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
820 int stack_size; /* number of states to be processed */
821 bool strict_alignment; /* perform strict pointer alignment checks */
822 bool test_state_freq; /* test verifier with different pruning frequency */
823 bool test_reg_invariants; /* fail verification on register invariants violations */
824 struct bpf_verifier_state *cur_state; /* current verifier state */
825 /* Search pruning optimization, array of list_heads for
826 * lists of struct bpf_verifier_state_list.
827 */
828 struct list_head *explored_states;
829 struct list_head free_list; /* list of struct bpf_verifier_state_list */
830 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
831 struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
832 struct bpf_map *insn_array_maps[MAX_USED_MAPS]; /* array of INSN_ARRAY map's to be relocated */
833 u32 used_map_cnt; /* number of used maps */
834 u32 used_btf_cnt; /* number of used BTF objects */
835 u32 insn_array_map_cnt; /* number of used maps of type BPF_MAP_TYPE_INSN_ARRAY */
836 u32 id_gen; /* used to generate unique reg IDs */
837 u32 hidden_subprog_cnt; /* number of hidden subprogs */
838 int exception_callback_subprog;
839 bool explore_alu_limits;
840 bool allow_ptr_leaks;
841 /* Allow access to uninitialized stack memory. Writes with fixed offset are
842 * always allowed, so this refers to reads (with fixed or variable offset),
843 * to writes with variable offset and to indirect (helper) accesses.
844 */
845 bool allow_uninit_stack;
846 bool bpf_capable;
847 bool bypass_spec_v1;
848 bool bypass_spec_v4;
849 bool seen_direct_write;
850 bool seen_exception;
851 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
852 const struct bpf_line_info *prev_linfo;
853 struct bpf_verifier_log log;
854 struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */
855 /* subprog indices sorted in topological order: leaves first, callers last */
856 int subprog_topo_order[BPF_MAX_SUBPROGS + 2];
857 union {
858 struct bpf_idmap idmap_scratch;
859 struct bpf_idset idset_scratch;
860 };
861 struct {
862 int *insn_state;
863 int *insn_stack;
864 /*
865 * vector of instruction indexes sorted in post-order, grouped by subprogram,
866 * see bpf_subprog_info->postorder_start.
867 */
868 int *insn_postorder;
869 int cur_stack;
870 /* current position in the insn_postorder vector */
871 int cur_postorder;
872 } cfg;
873 struct backtrack_state bt;
874 struct bpf_jmp_history_entry *cur_hist_ent;
875 /* Per-callsite copy of parent's converged at_stack_in for cross-frame fills. */
876 struct arg_track **callsite_at_stack;
877 u32 pass_cnt; /* number of times do_check() was called */
878 u32 subprog_cnt;
879 /* number of instructions analyzed by the verifier */
880 u32 prev_insn_processed, insn_processed;
881 /* number of jmps, calls, exits analyzed so far */
882 u32 prev_jmps_processed, jmps_processed;
883 /* total verification time */
884 u64 verification_time;
885 /* maximum number of verifier states kept in 'branching' instructions */
886 u32 max_states_per_insn;
887 /* total number of allocated verifier states */
888 u32 total_states;
889 /* some states are freed during program analysis.
890 * this is peak number of states. this number dominates kernel
891 * memory consumption during verification
892 */
893 u32 peak_states;
894 /* longest register parentage chain walked for liveness marking */
895 u32 longest_mark_read_walk;
896 u32 free_list_size;
897 u32 explored_states_size;
898 u32 num_backedges;
899 bpfptr_t fd_array;
900
901 /* bit mask to keep track of whether a register has been accessed
902 * since the last time the function state was printed
903 */
904 u32 scratched_regs;
905 /* Same as scratched_regs but for stack slots */
906 u64 scratched_stack_slots;
907 u64 prev_log_pos, prev_insn_print_pos;
908 /* buffer used to temporary hold constants as scalar registers */
909 struct bpf_reg_state fake_reg[1];
910 /* buffers used to save updated reg states while simulating branches */
911 struct bpf_reg_state true_reg1, true_reg2, false_reg1, false_reg2;
912 /* buffer used to generate temporary string representations,
913 * e.g., in reg_type_str() to generate reg_type string
914 */
915 char tmp_str_buf[TMP_STR_BUF_LEN];
916 struct bpf_insn insn_buf[INSN_BUF_SIZE];
917 struct bpf_insn epilogue_buf[INSN_BUF_SIZE];
918 struct bpf_scc_callchain callchain_buf;
919 struct bpf_liveness *liveness;
920 /* array of pointers to bpf_scc_info indexed by SCC id */
921 struct bpf_scc_info **scc_info;
922 u32 scc_cnt;
923 struct bpf_iarray *succ;
924 struct bpf_iarray *gotox_tmp_buf;
925};
926
927static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
928{
929 return &env->prog->aux->func_info_aux[subprog];
930}
931
932static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env, int subprog)
933{
934 return &env->subprog_info[subprog];
935}
936
937struct bpf_call_summary {
938 u8 num_params;
939 bool is_void;
940 bool fastcall;
941};
942
943static inline bool bpf_helper_call(const struct bpf_insn *insn)
944{
945 return insn->code == (BPF_JMP | BPF_CALL) &&
946 insn->src_reg == 0;
947}
948
949static inline bool bpf_pseudo_call(const struct bpf_insn *insn)
950{
951 return insn->code == (BPF_JMP | BPF_CALL) &&
952 insn->src_reg == BPF_PSEUDO_CALL;
953}
954
955static inline bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
956{
957 return insn->code == (BPF_JMP | BPF_CALL) &&
958 insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
959}
960
961__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
962 const char *fmt, va_list args);
963__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
964 const char *fmt, ...);
965__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
966 const char *fmt, ...);
967int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
968 char __user *log_buf, u32 log_size);
969void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos);
970int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual);
971
972__printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
973 u32 insn_off,
974 const char *prefix_fmt, ...);
975
976#define verifier_bug_if(cond, env, fmt, args...) \
977 ({ \
978 bool __cond = (cond); \
979 if (unlikely(__cond)) \
980 verifier_bug(env, fmt " (" #cond ")", ##args); \
981 (__cond); \
982 })
983#define verifier_bug(env, fmt, args...) \
984 ({ \
985 BPF_WARN_ONCE(1, "verifier bug: " fmt "\n", ##args); \
986 bpf_log(&env->log, "verifier bug: " fmt "\n", ##args); \
987 })
988
989static inline void mark_prune_point(struct bpf_verifier_env *env, int idx)
990{
991 env->insn_aux_data[idx].prune_point = true;
992}
993
994static inline bool bpf_is_prune_point(struct bpf_verifier_env *env, int insn_idx)
995{
996 return env->insn_aux_data[insn_idx].prune_point;
997}
998
999static inline void mark_force_checkpoint(struct bpf_verifier_env *env, int idx)
1000{
1001 env->insn_aux_data[idx].force_checkpoint = true;
1002}
1003
1004static inline bool bpf_is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx)
1005{
1006 return env->insn_aux_data[insn_idx].force_checkpoint;
1007}
1008
1009static inline void mark_calls_callback(struct bpf_verifier_env *env, int idx)
1010{
1011 env->insn_aux_data[idx].calls_callback = true;
1012}
1013
1014static inline bool bpf_calls_callback(struct bpf_verifier_env *env, int insn_idx)
1015{
1016 return env->insn_aux_data[insn_idx].calls_callback;
1017}
1018
1019static inline void mark_jmp_point(struct bpf_verifier_env *env, int idx)
1020{
1021 env->insn_aux_data[idx].jmp_point = true;
1022}
1023
1024static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
1025{
1026 struct bpf_verifier_state *cur = env->cur_state;
1027
1028 return cur->frame[cur->curframe];
1029}
1030
1031static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
1032{
1033 return cur_func(env)->regs;
1034}
1035
1036int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
1037int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
1038 int insn_idx, int prev_insn_idx);
1039int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
1040void
1041bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
1042 struct bpf_insn *insn);
1043void
1044bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
1045
1046/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
1047static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
1048 struct btf *btf, u32 btf_id)
1049{
1050 if (tgt_prog)
1051 return ((u64)tgt_prog->aux->id << 32) | btf_id;
1052 else
1053 return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id;
1054}
1055
1056/* unpack the IDs from the key as constructed above */
1057static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id)
1058{
1059 if (obj_id)
1060 *obj_id = key >> 32;
1061 if (btf_id)
1062 *btf_id = key & 0x7FFFFFFF;
1063}
1064
1065int bpf_check_btf_info_early(struct bpf_verifier_env *env,
1066 const union bpf_attr *attr, bpfptr_t uattr);
1067int bpf_check_btf_info(struct bpf_verifier_env *env,
1068 const union bpf_attr *attr, bpfptr_t uattr);
1069
1070int bpf_check_attach_target(struct bpf_verifier_log *log,
1071 const struct bpf_prog *prog,
1072 const struct bpf_prog *tgt_prog,
1073 u32 btf_id,
1074 struct bpf_attach_target_info *tgt_info);
1075void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
1076
1077int mark_chain_precision(struct bpf_verifier_env *env, int regno);
1078
1079int bpf_is_state_visited(struct bpf_verifier_env *env, int insn_idx);
1080int bpf_update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st);
1081
1082void bpf_clear_jmp_history(struct bpf_verifier_state *state);
1083int bpf_copy_verifier_state(struct bpf_verifier_state *dst_state,
1084 const struct bpf_verifier_state *src);
1085struct list_head *bpf_explored_state(struct bpf_verifier_env *env, int idx);
1086void bpf_free_verifier_state(struct bpf_verifier_state *state, bool free_self);
1087void bpf_free_backedges(struct bpf_scc_visit *visit);
1088int bpf_push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
1089 int insn_flags, u64 linked_regs);
1090void bpf_bt_sync_linked_regs(struct backtrack_state *bt, struct bpf_jmp_history_entry *hist);
1091void bpf_mark_reg_not_init(const struct bpf_verifier_env *env,
1092 struct bpf_reg_state *reg);
1093void bpf_mark_reg_unknown_imprecise(struct bpf_reg_state *reg);
1094void bpf_mark_all_scalars_precise(struct bpf_verifier_env *env,
1095 struct bpf_verifier_state *st);
1096void bpf_clear_singular_ids(struct bpf_verifier_env *env, struct bpf_verifier_state *st);
1097int bpf_mark_chain_precision(struct bpf_verifier_env *env,
1098 struct bpf_verifier_state *starting_state,
1099 int regno, bool *changed);
1100
1101static inline int bpf_get_spi(s32 off)
1102{
1103 return (-off - 1) / BPF_REG_SIZE;
1104}
1105
1106static inline struct bpf_func_state *bpf_func(struct bpf_verifier_env *env,
1107 const struct bpf_reg_state *reg)
1108{
1109 struct bpf_verifier_state *cur = env->cur_state;
1110
1111 return cur->frame[reg->frameno];
1112}
1113
1114/* Return IP for a given frame in a call stack */
1115static inline u32 bpf_frame_insn_idx(struct bpf_verifier_state *st, u32 frame)
1116{
1117 return frame == st->curframe
1118 ? st->insn_idx
1119 : st->frame[frame + 1]->callsite;
1120}
1121
1122static inline bool bpf_is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
1123{
1124 return env->insn_aux_data[insn_idx].jmp_point;
1125}
1126
1127static inline bool bpf_is_spilled_reg(const struct bpf_stack_state *stack)
1128{
1129 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
1130}
1131
1132static inline bool bpf_is_spilled_scalar_reg(const struct bpf_stack_state *stack)
1133{
1134 return bpf_is_spilled_reg(stack) && stack->spilled_ptr.type == SCALAR_VALUE;
1135}
1136
1137static inline bool bpf_register_is_null(struct bpf_reg_state *reg)
1138{
1139 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
1140}
1141
1142static inline void bpf_bt_set_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg)
1143{
1144 bt->reg_masks[frame] |= 1 << reg;
1145}
1146
1147static inline void bpf_bt_set_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot)
1148{
1149 bt->stack_masks[frame] |= 1ull << slot;
1150}
1151
1152static inline bool bt_is_frame_reg_set(struct backtrack_state *bt, u32 frame, u32 reg)
1153{
1154 return bt->reg_masks[frame] & (1 << reg);
1155}
1156
1157static inline bool bt_is_frame_slot_set(struct backtrack_state *bt, u32 frame, u32 slot)
1158{
1159 return bt->stack_masks[frame] & (1ull << slot);
1160}
1161
1162bool bpf_map_is_rdonly(const struct bpf_map *map);
1163int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val,
1164 bool is_ldsx);
1165
1166#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
1167
1168/* extract base type from bpf_{arg, return, reg}_type. */
1169static inline u32 base_type(u32 type)
1170{
1171 return type & BPF_BASE_TYPE_MASK;
1172}
1173
1174/* extract flags from an extended type. See bpf_type_flag in bpf.h. */
1175static inline u32 type_flag(u32 type)
1176{
1177 return type & ~BPF_BASE_TYPE_MASK;
1178}
1179
1180/* only use after check_attach_btf_id() */
1181static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
1182{
1183 return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ?
1184 prog->aux->saved_dst_prog_type : prog->type;
1185}
1186
1187static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
1188{
1189 switch (resolve_prog_type(prog)) {
1190 case BPF_PROG_TYPE_TRACING:
1191 return prog->expected_attach_type != BPF_TRACE_ITER;
1192 case BPF_PROG_TYPE_STRUCT_OPS:
1193 return prog->aux->jits_use_priv_stack;
1194 case BPF_PROG_TYPE_LSM:
1195 case BPF_PROG_TYPE_SYSCALL:
1196 return false;
1197 default:
1198 return true;
1199 }
1200}
1201
1202#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF)
1203
1204static inline bool bpf_type_has_unsafe_modifiers(u32 type)
1205{
1206 return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS;
1207}
1208
1209static inline bool type_is_ptr_alloc_obj(u32 type)
1210{
1211 return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
1212}
1213
1214static inline bool type_is_non_owning_ref(u32 type)
1215{
1216 return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
1217}
1218
1219static inline bool type_is_pkt_pointer(enum bpf_reg_type type)
1220{
1221 type = base_type(type);
1222 return type == PTR_TO_PACKET ||
1223 type == PTR_TO_PACKET_META;
1224}
1225
1226static inline bool type_is_sk_pointer(enum bpf_reg_type type)
1227{
1228 return type == PTR_TO_SOCKET ||
1229 type == PTR_TO_SOCK_COMMON ||
1230 type == PTR_TO_TCP_SOCK ||
1231 type == PTR_TO_XDP_SOCK;
1232}
1233
1234static inline bool type_may_be_null(u32 type)
1235{
1236 return type & PTR_MAYBE_NULL;
1237}
1238
1239static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
1240{
1241 env->scratched_regs |= 1U << regno;
1242}
1243
1244static inline void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
1245{
1246 env->scratched_stack_slots |= 1ULL << spi;
1247}
1248
1249static inline bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
1250{
1251 return (env->scratched_regs >> regno) & 1;
1252}
1253
1254static inline bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
1255{
1256 return (env->scratched_stack_slots >> regno) & 1;
1257}
1258
1259static inline bool verifier_state_scratched(const struct bpf_verifier_env *env)
1260{
1261 return env->scratched_regs || env->scratched_stack_slots;
1262}
1263
1264static inline void mark_verifier_state_clean(struct bpf_verifier_env *env)
1265{
1266 env->scratched_regs = 0U;
1267 env->scratched_stack_slots = 0ULL;
1268}
1269
1270/* Used for printing the entire verifier state. */
1271static inline void mark_verifier_state_scratched(struct bpf_verifier_env *env)
1272{
1273 env->scratched_regs = ~0U;
1274 env->scratched_stack_slots = ~0ULL;
1275}
1276
1277static inline bool bpf_stack_narrow_access_ok(int off, int fill_size, int spill_size)
1278{
1279#ifdef __BIG_ENDIAN
1280 off -= spill_size - fill_size;
1281#endif
1282
1283 return !(off % BPF_REG_SIZE);
1284}
1285
1286static inline bool insn_is_gotox(struct bpf_insn *insn)
1287{
1288 return BPF_CLASS(insn->code) == BPF_JMP &&
1289 BPF_OP(insn->code) == BPF_JA &&
1290 BPF_SRC(insn->code) == BPF_X;
1291}
1292
1293const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type);
1294const char *dynptr_type_str(enum bpf_dynptr_type type);
1295const char *iter_type_str(const struct btf *btf, u32 btf_id);
1296const char *iter_state_str(enum bpf_iter_state state);
1297
1298void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
1299 u32 frameno, bool print_all);
1300void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
1301 u32 frameno);
1302u32 bpf_vlog_alignment(u32 pos);
1303
1304struct bpf_subprog_info *bpf_find_containing_subprog(struct bpf_verifier_env *env, int off);
1305int bpf_jmp_offset(struct bpf_insn *insn);
1306struct bpf_iarray *bpf_insn_successors(struct bpf_verifier_env *env, u32 idx);
1307void bpf_fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask);
1308bool bpf_subprog_is_global(const struct bpf_verifier_env *env, int subprog);
1309
1310int bpf_find_subprog(struct bpf_verifier_env *env, int off);
1311int bpf_compute_const_regs(struct bpf_verifier_env *env);
1312int bpf_prune_dead_branches(struct bpf_verifier_env *env);
1313int bpf_check_cfg(struct bpf_verifier_env *env);
1314int bpf_compute_postorder(struct bpf_verifier_env *env);
1315int bpf_compute_scc(struct bpf_verifier_env *env);
1316
1317struct bpf_map_desc {
1318 struct bpf_map *ptr;
1319 int uid;
1320};
1321
1322struct bpf_kfunc_call_arg_meta {
1323 /* In parameters */
1324 struct btf *btf;
1325 u32 func_id;
1326 u32 kfunc_flags;
1327 const struct btf_type *func_proto;
1328 const char *func_name;
1329 /* Out parameters */
1330 u32 ref_obj_id;
1331 u8 release_regno;
1332 bool r0_rdonly;
1333 u32 ret_btf_id;
1334 u64 r0_size;
1335 u32 subprogno;
1336 struct {
1337 u64 value;
1338 bool found;
1339 } arg_constant;
1340
1341 /* arg_{btf,btf_id,owning_ref} are used by kfunc-specific handling,
1342 * generally to pass info about user-defined local kptr types to later
1343 * verification logic
1344 * bpf_obj_drop/bpf_percpu_obj_drop
1345 * Record the local kptr type to be drop'd
1346 * bpf_refcount_acquire (via KF_ARG_PTR_TO_REFCOUNTED_KPTR arg type)
1347 * Record the local kptr type to be refcount_incr'd and use
1348 * arg_owning_ref to determine whether refcount_acquire should be
1349 * fallible
1350 */
1351 struct btf *arg_btf;
1352 u32 arg_btf_id;
1353 bool arg_owning_ref;
1354 bool arg_prog;
1355
1356 struct {
1357 struct btf_field *field;
1358 } arg_list_head;
1359 struct {
1360 struct btf_field *field;
1361 } arg_rbtree_root;
1362 struct {
1363 enum bpf_dynptr_type type;
1364 u32 id;
1365 u32 ref_obj_id;
1366 } initialized_dynptr;
1367 struct {
1368 u8 spi;
1369 u8 frameno;
1370 } iter;
1371 struct bpf_map_desc map;
1372 u64 mem_size;
1373};
1374
1375int bpf_get_helper_proto(struct bpf_verifier_env *env, int func_id,
1376 const struct bpf_func_proto **ptr);
1377int bpf_fetch_kfunc_arg_meta(struct bpf_verifier_env *env, s32 func_id,
1378 s16 offset, struct bpf_kfunc_call_arg_meta *meta);
1379bool bpf_is_async_callback_calling_insn(struct bpf_insn *insn);
1380bool bpf_is_sync_callback_calling_insn(struct bpf_insn *insn);
1381static inline bool bpf_is_iter_next_kfunc(struct bpf_kfunc_call_arg_meta *meta)
1382{
1383 return meta->kfunc_flags & KF_ITER_NEXT;
1384}
1385
1386static inline bool bpf_is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta)
1387{
1388 return meta->kfunc_flags & KF_SLEEPABLE;
1389}
1390bool bpf_is_kfunc_pkt_changing(struct bpf_kfunc_call_arg_meta *meta);
1391struct bpf_iarray *bpf_iarray_realloc(struct bpf_iarray *old, size_t n_elem);
1392int bpf_copy_insn_array_uniq(struct bpf_map *map, u32 start, u32 end, u32 *off);
1393bool bpf_insn_is_cond_jump(u8 code);
1394bool bpf_is_may_goto_insn(struct bpf_insn *insn);
1395
1396void bpf_verbose_insn(struct bpf_verifier_env *env, struct bpf_insn *insn);
1397bool bpf_get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call,
1398 struct bpf_call_summary *cs);
1399s64 bpf_helper_stack_access_bytes(struct bpf_verifier_env *env,
1400 struct bpf_insn *insn, int arg,
1401 int insn_idx);
1402s64 bpf_kfunc_stack_access_bytes(struct bpf_verifier_env *env,
1403 struct bpf_insn *insn, int arg,
1404 int insn_idx);
1405int bpf_compute_subprog_arg_access(struct bpf_verifier_env *env);
1406
1407int bpf_stack_liveness_init(struct bpf_verifier_env *env);
1408void bpf_stack_liveness_free(struct bpf_verifier_env *env);
1409int bpf_live_stack_query_init(struct bpf_verifier_env *env, struct bpf_verifier_state *st);
1410bool bpf_stack_slot_alive(struct bpf_verifier_env *env, u32 frameno, u32 spi);
1411int bpf_compute_live_registers(struct bpf_verifier_env *env);
1412
1413#define BPF_MAP_KEY_POISON (1ULL << 63)
1414#define BPF_MAP_KEY_SEEN (1ULL << 62)
1415
1416static inline bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
1417{
1418 return aux->map_ptr_state.poison;
1419}
1420
1421static inline bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
1422{
1423 return aux->map_ptr_state.unpriv;
1424}
1425
1426static inline bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
1427{
1428 return aux->map_key_state & BPF_MAP_KEY_POISON;
1429}
1430
1431static inline bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
1432{
1433 return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
1434}
1435
1436static inline u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
1437{
1438 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
1439}
1440
1441#define MAX_PACKET_OFF 0xffff
1442#define CALLER_SAVED_REGS 6
1443
1444enum bpf_reg_arg_type {
1445 SRC_OP, /* register is used as source operand */
1446 DST_OP, /* register is used as destination operand */
1447 DST_OP_NO_MARK /* same as above, check only, don't mark */
1448};
1449
1450#define MAX_KFUNC_DESCS 256
1451
1452struct bpf_kfunc_desc {
1453 struct btf_func_model func_model;
1454 u32 func_id;
1455 s32 imm;
1456 u16 offset;
1457 unsigned long addr;
1458};
1459
1460struct bpf_kfunc_desc_tab {
1461 /* Sorted by func_id (BTF ID) and offset (fd_array offset) during
1462 * verification. JITs do lookups by bpf_insn, where func_id may not be
1463 * available, therefore at the end of verification do_misc_fixups()
1464 * sorts this by imm and offset.
1465 */
1466 struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
1467 u32 nr_descs;
1468};
1469
1470/* Functions exported from verifier.c, used by fixups.c */
1471bool bpf_is_reg64(struct bpf_insn *insn, u32 regno, struct bpf_reg_state *reg, enum bpf_reg_arg_type t);
1472void bpf_clear_insn_aux_data(struct bpf_verifier_env *env, int start, int len);
1473void bpf_mark_subprog_exc_cb(struct bpf_verifier_env *env, int subprog);
1474bool bpf_allow_tail_call_in_subprogs(struct bpf_verifier_env *env);
1475bool bpf_verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm);
1476int bpf_add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, u16 offset);
1477int bpf_fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
1478 struct bpf_insn *insn_buf, int insn_idx, int *cnt);
1479
1480/* Functions in fixups.c, called from bpf_check() */
1481int bpf_remove_fastcall_spills_fills(struct bpf_verifier_env *env);
1482int bpf_optimize_bpf_loop(struct bpf_verifier_env *env);
1483void bpf_opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env);
1484int bpf_opt_remove_dead_code(struct bpf_verifier_env *env);
1485int bpf_opt_remove_nops(struct bpf_verifier_env *env);
1486int bpf_opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, const union bpf_attr *attr);
1487int bpf_convert_ctx_accesses(struct bpf_verifier_env *env);
1488int bpf_jit_subprogs(struct bpf_verifier_env *env);
1489int bpf_fixup_call_args(struct bpf_verifier_env *env);
1490int bpf_do_misc_fixups(struct bpf_verifier_env *env);
1491
1492#endif /* _LINUX_BPF_VERIFIER_H */