Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 */
4#ifndef _LINUX_BPF_VERIFIER_H
5#define _LINUX_BPF_VERIFIER_H 1
6
7#include <linux/bpf.h> /* for enum bpf_reg_type */
8#include <linux/btf.h> /* for struct btf and btf_id() */
9#include <linux/filter.h> /* for MAX_BPF_STACK */
10#include <linux/tnum.h>
11
12/* Maximum variable offset umax_value permitted when resolving memory accesses.
13 * In practice this is far bigger than any realistic pointer offset; this limit
14 * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
15 */
16#define BPF_MAX_VAR_OFF (1 << 29)
17/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
18 * that converting umax_value to int cannot overflow.
19 */
20#define BPF_MAX_VAR_SIZ (1 << 29)
21/* size of tmp_str_buf in bpf_verifier.
22 * we need at least 306 bytes to fit full stack mask representation
23 * (in the "-8,-16,...,-512" form)
24 */
25#define TMP_STR_BUF_LEN 320
26/* Patch buffer size */
27#define INSN_BUF_SIZE 32
28
29#define ITER_PREFIX "bpf_iter_"
30
31enum bpf_iter_state {
32 BPF_ITER_STATE_INVALID, /* for non-first slot */
33 BPF_ITER_STATE_ACTIVE,
34 BPF_ITER_STATE_DRAINED,
35};
36
37struct bpf_reg_state {
38 /* Ordering of fields matters. See states_equal() */
39 enum bpf_reg_type type;
40 /*
41 * Fixed part of pointer offset, pointer types only.
42 * Or constant delta between "linked" scalars with the same ID.
43 */
44 s32 off;
45 union {
46 /* valid when type == PTR_TO_PACKET */
47 int range;
48
49 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
50 * PTR_TO_MAP_VALUE_OR_NULL
51 */
52 struct {
53 struct bpf_map *map_ptr;
54 /* To distinguish map lookups from outer map
55 * the map_uid is non-zero for registers
56 * pointing to inner maps.
57 */
58 u32 map_uid;
59 };
60
61 /* for PTR_TO_BTF_ID */
62 struct {
63 struct btf *btf;
64 u32 btf_id;
65 };
66
67 struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
68 u32 mem_size;
69 u32 dynptr_id; /* for dynptr slices */
70 };
71
72 /* For dynptr stack slots */
73 struct {
74 enum bpf_dynptr_type type;
75 /* A dynptr is 16 bytes so it takes up 2 stack slots.
76 * We need to track which slot is the first slot
77 * to protect against cases where the user may try to
78 * pass in an address starting at the second slot of the
79 * dynptr.
80 */
81 bool first_slot;
82 } dynptr;
83
84 /* For bpf_iter stack slots */
85 struct {
86 /* BTF container and BTF type ID describing
87 * struct bpf_iter_<type> of an iterator state
88 */
89 struct btf *btf;
90 u32 btf_id;
91 /* packing following two fields to fit iter state into 16 bytes */
92 enum bpf_iter_state state:2;
93 int depth:30;
94 } iter;
95
96 /* For irq stack slots */
97 struct {
98 enum {
99 IRQ_NATIVE_KFUNC,
100 IRQ_LOCK_KFUNC,
101 } kfunc_class;
102 } irq;
103
104 /* Max size from any of the above. */
105 struct {
106 unsigned long raw1;
107 unsigned long raw2;
108 } raw;
109
110 u32 subprogno; /* for PTR_TO_FUNC */
111 };
112 /* For scalar types (SCALAR_VALUE), this represents our knowledge of
113 * the actual value.
114 * For pointer types, this represents the variable part of the offset
115 * from the pointed-to object, and is shared with all bpf_reg_states
116 * with the same id as us.
117 */
118 struct tnum var_off;
119 /* Used to determine if any memory access using this register will
120 * result in a bad access.
121 * These refer to the same value as var_off, not necessarily the actual
122 * contents of the register.
123 */
124 s64 smin_value; /* minimum possible (s64)value */
125 s64 smax_value; /* maximum possible (s64)value */
126 u64 umin_value; /* minimum possible (u64)value */
127 u64 umax_value; /* maximum possible (u64)value */
128 s32 s32_min_value; /* minimum possible (s32)value */
129 s32 s32_max_value; /* maximum possible (s32)value */
130 u32 u32_min_value; /* minimum possible (u32)value */
131 u32 u32_max_value; /* maximum possible (u32)value */
132 /* For PTR_TO_PACKET, used to find other pointers with the same variable
133 * offset, so they can share range knowledge.
134 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
135 * came from, when one is tested for != NULL.
136 * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation
137 * for the purpose of tracking that it's freed.
138 * For PTR_TO_SOCKET this is used to share which pointers retain the
139 * same reference to the socket, to determine proper reference freeing.
140 * For stack slots that are dynptrs, this is used to track references to
141 * the dynptr to determine proper reference freeing.
142 * Similarly to dynptrs, we use ID to track "belonging" of a reference
143 * to a specific instance of bpf_iter.
144 */
145 /*
146 * Upper bit of ID is used to remember relationship between "linked"
147 * registers. Example:
148 * r1 = r2; both will have r1->id == r2->id == N
149 * r1 += 10; r1->id == N | BPF_ADD_CONST and r1->off == 10
150 * r3 = r2; both will have r3->id == r2->id == N
151 * w3 += 10; r3->id == N | BPF_ADD_CONST32 and r3->off == 10
152 */
153#define BPF_ADD_CONST64 (1U << 31)
154#define BPF_ADD_CONST32 (1U << 30)
155#define BPF_ADD_CONST (BPF_ADD_CONST64 | BPF_ADD_CONST32)
156 u32 id;
157 /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
158 * from a pointer-cast helper, bpf_sk_fullsock() and
159 * bpf_tcp_sock().
160 *
161 * Consider the following where "sk" is a reference counted
162 * pointer returned from "sk = bpf_sk_lookup_tcp();":
163 *
164 * 1: sk = bpf_sk_lookup_tcp();
165 * 2: if (!sk) { return 0; }
166 * 3: fullsock = bpf_sk_fullsock(sk);
167 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
168 * 5: tp = bpf_tcp_sock(fullsock);
169 * 6: if (!tp) { bpf_sk_release(sk); return 0; }
170 * 7: bpf_sk_release(sk);
171 * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain
172 *
173 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
174 * "tp" ptr should be invalidated also. In order to do that,
175 * the reg holding "fullsock" and "sk" need to remember
176 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
177 * such that the verifier can reset all regs which have
178 * ref_obj_id matching the sk_reg->id.
179 *
180 * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
181 * sk_reg->id will stay as NULL-marking purpose only.
182 * After NULL-marking is done, sk_reg->id can be reset to 0.
183 *
184 * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
185 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
186 *
187 * After "tp = bpf_tcp_sock(fullsock);" at line 5,
188 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
189 * which is the same as sk_reg->ref_obj_id.
190 *
191 * From the verifier perspective, if sk, fullsock and tp
192 * are not NULL, they are the same ptr with different
193 * reg->type. In particular, bpf_sk_release(tp) is also
194 * allowed and has the same effect as bpf_sk_release(sk).
195 */
196 u32 ref_obj_id;
197 /* Inside the callee two registers can be both PTR_TO_STACK like
198 * R1=fp-8 and R2=fp-8, but one of them points to this function stack
199 * while another to the caller's stack. To differentiate them 'frameno'
200 * is used which is an index in bpf_verifier_state->frame[] array
201 * pointing to bpf_func_state.
202 */
203 u32 frameno;
204 /* Tracks subreg definition. The stored value is the insn_idx of the
205 * writing insn. This is safe because subreg_def is used before any insn
206 * patching which only happens after main verification finished.
207 */
208 s32 subreg_def;
209 /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
210 bool precise;
211};
212
213enum bpf_stack_slot_type {
214 STACK_INVALID, /* nothing was stored in this stack slot */
215 STACK_SPILL, /* register spilled into stack */
216 STACK_MISC, /* BPF program wrote some data into this slot */
217 STACK_ZERO, /* BPF program wrote constant zero */
218 /* A dynptr is stored in this stack slot. The type of dynptr
219 * is stored in bpf_stack_state->spilled_ptr.dynptr.type
220 */
221 STACK_DYNPTR,
222 STACK_ITER,
223 STACK_IRQ_FLAG,
224};
225
226#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
227
228#define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \
229 (1 << BPF_REG_3) | (1 << BPF_REG_4) | \
230 (1 << BPF_REG_5))
231
232#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
233#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
234
235struct bpf_stack_state {
236 struct bpf_reg_state spilled_ptr;
237 u8 slot_type[BPF_REG_SIZE];
238};
239
240struct bpf_reference_state {
241 /* Each reference object has a type. Ensure REF_TYPE_PTR is zero to
242 * default to pointer reference on zero initialization of a state.
243 */
244 enum ref_state_type {
245 REF_TYPE_PTR = (1 << 1),
246 REF_TYPE_IRQ = (1 << 2),
247 REF_TYPE_LOCK = (1 << 3),
248 REF_TYPE_RES_LOCK = (1 << 4),
249 REF_TYPE_RES_LOCK_IRQ = (1 << 5),
250 REF_TYPE_LOCK_MASK = REF_TYPE_LOCK | REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ,
251 } type;
252 /* Track each reference created with a unique id, even if the same
253 * instruction creates the reference multiple times (eg, via CALL).
254 */
255 int id;
256 /* Instruction where the allocation of this reference occurred. This
257 * is used purely to inform the user of a reference leak.
258 */
259 int insn_idx;
260 /* Use to keep track of the source object of a lock, to ensure
261 * it matches on unlock.
262 */
263 void *ptr;
264};
265
266struct bpf_retval_range {
267 s32 minval;
268 s32 maxval;
269};
270
271/* state of the program:
272 * type of all registers and stack info
273 */
274struct bpf_func_state {
275 struct bpf_reg_state regs[MAX_BPF_REG];
276 /* index of call instruction that called into this func */
277 int callsite;
278 /* stack frame number of this function state from pov of
279 * enclosing bpf_verifier_state.
280 * 0 = main function, 1 = first callee.
281 */
282 u32 frameno;
283 /* subprog number == index within subprog_info
284 * zero == main subprog
285 */
286 u32 subprogno;
287 /* Every bpf_timer_start will increment async_entry_cnt.
288 * It's used to distinguish:
289 * void foo(void) { for(;;); }
290 * void foo(void) { bpf_timer_set_callback(,foo); }
291 */
292 u32 async_entry_cnt;
293 struct bpf_retval_range callback_ret_range;
294 bool in_callback_fn;
295 bool in_async_callback_fn;
296 bool in_exception_callback_fn;
297 /* For callback calling functions that limit number of possible
298 * callback executions (e.g. bpf_loop) keeps track of current
299 * simulated iteration number.
300 * Value in frame N refers to number of times callback with frame
301 * N+1 was simulated, e.g. for the following call:
302 *
303 * bpf_loop(..., fn, ...); | suppose current frame is N
304 * | fn would be simulated in frame N+1
305 * | number of simulations is tracked in frame N
306 */
307 u32 callback_depth;
308
309 /* The following fields should be last. See copy_func_state() */
310 /* The state of the stack. Each element of the array describes BPF_REG_SIZE
311 * (i.e. 8) bytes worth of stack memory.
312 * stack[0] represents bytes [*(r10-8)..*(r10-1)]
313 * stack[1] represents bytes [*(r10-16)..*(r10-9)]
314 * ...
315 * stack[allocated_stack/8 - 1] represents [*(r10-allocated_stack)..*(r10-allocated_stack+7)]
316 */
317 struct bpf_stack_state *stack;
318 /* Size of the current stack, in bytes. The stack state is tracked below, in
319 * `stack`. allocated_stack is always a multiple of BPF_REG_SIZE.
320 */
321 int allocated_stack;
322};
323
324#define MAX_CALL_FRAMES 8
325
326/* instruction history flags, used in bpf_jmp_history_entry.flags field */
327enum {
328 /* instruction references stack slot through PTR_TO_STACK register;
329 * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
330 * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512,
331 * 8 bytes per slot, so slot index (spi) is [0, 63])
332 */
333 INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */
334
335 INSN_F_SPI_MASK = 0x3f, /* 6 bits */
336 INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
337
338 INSN_F_STACK_ACCESS = BIT(9),
339
340 INSN_F_DST_REG_STACK = BIT(10), /* dst_reg is PTR_TO_STACK */
341 INSN_F_SRC_REG_STACK = BIT(11), /* src_reg is PTR_TO_STACK */
342 /* total 12 bits are used now. */
343};
344
345static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
346static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
347
348struct bpf_jmp_history_entry {
349 u32 idx;
350 /* insn idx can't be bigger than 1 million */
351 u32 prev_idx : 20;
352 /* special INSN_F_xxx flags */
353 u32 flags : 12;
354 /* additional registers that need precision tracking when this
355 * jump is backtracked, vector of six 10-bit records
356 */
357 u64 linked_regs;
358};
359
360/* Maximum number of register states that can exist at once */
361#define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
362struct bpf_verifier_state {
363 /* call stack tracking */
364 struct bpf_func_state *frame[MAX_CALL_FRAMES];
365 struct bpf_verifier_state *parent;
366 /* Acquired reference states */
367 struct bpf_reference_state *refs;
368 /*
369 * 'branches' field is the number of branches left to explore:
370 * 0 - all possible paths from this state reached bpf_exit or
371 * were safely pruned
372 * 1 - at least one path is being explored.
373 * This state hasn't reached bpf_exit
374 * 2 - at least two paths are being explored.
375 * This state is an immediate parent of two children.
376 * One is fallthrough branch with branches==1 and another
377 * state is pushed into stack (to be explored later) also with
378 * branches==1. The parent of this state has branches==1.
379 * The verifier state tree connected via 'parent' pointer looks like:
380 * 1
381 * 1
382 * 2 -> 1 (first 'if' pushed into stack)
383 * 1
384 * 2 -> 1 (second 'if' pushed into stack)
385 * 1
386 * 1
387 * 1 bpf_exit.
388 *
389 * Once do_check() reaches bpf_exit, it calls update_branch_counts()
390 * and the verifier state tree will look:
391 * 1
392 * 1
393 * 2 -> 1 (first 'if' pushed into stack)
394 * 1
395 * 1 -> 1 (second 'if' pushed into stack)
396 * 0
397 * 0
398 * 0 bpf_exit.
399 * After pop_stack() the do_check() will resume at second 'if'.
400 *
401 * If is_state_visited() sees a state with branches > 0 it means
402 * there is a loop. If such state is exactly equal to the current state
403 * it's an infinite loop. Note states_equal() checks for states
404 * equivalency, so two states being 'states_equal' does not mean
405 * infinite loop. The exact comparison is provided by
406 * states_maybe_looping() function. It's a stronger pre-check and
407 * much faster than states_equal().
408 *
409 * This algorithm may not find all possible infinite loops or
410 * loop iteration count may be too high.
411 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
412 */
413 u32 branches;
414 u32 insn_idx;
415 u32 curframe;
416
417 u32 acquired_refs;
418 u32 active_locks;
419 u32 active_preempt_locks;
420 u32 active_irq_id;
421 u32 active_lock_id;
422 void *active_lock_ptr;
423 u32 active_rcu_locks;
424
425 bool speculative;
426 bool in_sleepable;
427 bool cleaned;
428
429 /* first and last insn idx of this verifier state */
430 u32 first_insn_idx;
431 u32 last_insn_idx;
432 /* if this state is a backedge state then equal_state
433 * records cached state to which this state is equal.
434 */
435 struct bpf_verifier_state *equal_state;
436 /* jmp history recorded from first to last.
437 * backtracking is using it to go from last to first.
438 * For most states jmp_history_cnt is [0-3].
439 * For loops can go up to ~40.
440 */
441 struct bpf_jmp_history_entry *jmp_history;
442 u32 jmp_history_cnt;
443 u32 dfs_depth;
444 u32 callback_unroll_depth;
445 u32 may_goto_depth;
446};
447
448#define bpf_get_spilled_reg(slot, frame, mask) \
449 (((slot < frame->allocated_stack / BPF_REG_SIZE) && \
450 ((1 << frame->stack[slot].slot_type[BPF_REG_SIZE - 1]) & (mask))) \
451 ? &frame->stack[slot].spilled_ptr : NULL)
452
453/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
454#define bpf_for_each_spilled_reg(iter, frame, reg, mask) \
455 for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask); \
456 iter < frame->allocated_stack / BPF_REG_SIZE; \
457 iter++, reg = bpf_get_spilled_reg(iter, frame, mask))
458
459#define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr) \
460 ({ \
461 struct bpf_verifier_state *___vstate = __vst; \
462 int ___i, ___j; \
463 for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \
464 struct bpf_reg_state *___regs; \
465 __state = ___vstate->frame[___i]; \
466 ___regs = __state->regs; \
467 for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \
468 __reg = &___regs[___j]; \
469 (void)(__expr); \
470 } \
471 bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \
472 if (!__reg) \
473 continue; \
474 (void)(__expr); \
475 } \
476 } \
477 })
478
479/* Invoke __expr over regsiters in __vst, setting __state and __reg */
480#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
481 bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr)
482
483/* linked list of verifier states used to prune search */
484struct bpf_verifier_state_list {
485 struct bpf_verifier_state state;
486 struct list_head node;
487 u32 miss_cnt;
488 u32 hit_cnt:31;
489 u32 in_free_list:1;
490};
491
492struct bpf_loop_inline_state {
493 unsigned int initialized:1; /* set to true upon first entry */
494 unsigned int fit_for_inline:1; /* true if callback function is the same
495 * at each call and flags are always zero
496 */
497 u32 callback_subprogno; /* valid when fit_for_inline is true */
498};
499
500/* pointer and state for maps */
501struct bpf_map_ptr_state {
502 struct bpf_map *map_ptr;
503 bool poison;
504 bool unpriv;
505};
506
507/* Possible states for alu_state member. */
508#define BPF_ALU_SANITIZE_SRC (1U << 0)
509#define BPF_ALU_SANITIZE_DST (1U << 1)
510#define BPF_ALU_NEG_VALUE (1U << 2)
511#define BPF_ALU_NON_POINTER (1U << 3)
512#define BPF_ALU_IMMEDIATE (1U << 4)
513#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
514 BPF_ALU_SANITIZE_DST)
515
516/*
517 * An array of BPF instructions.
518 * Primary usage: return value of bpf_insn_successors.
519 */
520struct bpf_iarray {
521 int cnt;
522 u32 items[];
523};
524
525struct bpf_insn_aux_data {
526 union {
527 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
528 struct bpf_map_ptr_state map_ptr_state;
529 s32 call_imm; /* saved imm field of call insn */
530 u32 alu_limit; /* limit for add/sub register with pointer */
531 struct {
532 u32 map_index; /* index into used_maps[] */
533 u32 map_off; /* offset from value base address */
534 };
535 struct {
536 enum bpf_reg_type reg_type; /* type of pseudo_btf_id */
537 union {
538 struct {
539 struct btf *btf;
540 u32 btf_id; /* btf_id for struct typed var */
541 };
542 u32 mem_size; /* mem_size for non-struct typed var */
543 };
544 } btf_var;
545 /* if instruction is a call to bpf_loop this field tracks
546 * the state of the relevant registers to make decision about inlining
547 */
548 struct bpf_loop_inline_state loop_inline_state;
549 };
550 union {
551 /* remember the size of type passed to bpf_obj_new to rewrite R1 */
552 u64 obj_new_size;
553 /* remember the offset of node field within type to rewrite */
554 u64 insert_off;
555 };
556 struct bpf_iarray *jt; /* jump table for gotox or bpf_tailcall call instruction */
557 struct btf_struct_meta *kptr_struct_meta;
558 u64 map_key_state; /* constant (32 bit) key tracking for maps */
559 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
560 u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
561 bool nospec; /* do not execute this instruction speculatively */
562 bool nospec_result; /* result is unsafe under speculation, nospec must follow */
563 bool zext_dst; /* this insn zero extends dst reg */
564 bool needs_zext; /* alu op needs to clear upper bits */
565 bool non_sleepable; /* helper/kfunc may be called from non-sleepable context */
566 bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
567 bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
568 u8 alu_state; /* used in combination with alu_limit */
569 /* true if STX or LDX instruction is a part of a spill/fill
570 * pattern for a bpf_fastcall call.
571 */
572 u8 fastcall_pattern:1;
573 /* for CALL instructions, a number of spill/fill pairs in the
574 * bpf_fastcall pattern.
575 */
576 u8 fastcall_spills_num:3;
577 u8 arg_prog:4;
578
579 /* below fields are initialized once */
580 unsigned int orig_idx; /* original instruction index */
581 bool jmp_point;
582 bool prune_point;
583 /* ensure we check state equivalence and save state checkpoint and
584 * this instruction, regardless of any heuristics
585 */
586 bool force_checkpoint;
587 /* true if instruction is a call to a helper function that
588 * accepts callback function as a parameter.
589 */
590 bool calls_callback;
591 /*
592 * CFG strongly connected component this instruction belongs to,
593 * zero if it is a singleton SCC.
594 */
595 u32 scc;
596 /* registers alive before this instruction. */
597 u16 live_regs_before;
598};
599
600#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
601#define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */
602
603#define BPF_VERIFIER_TMP_LOG_SIZE 1024
604
605struct bpf_verifier_log {
606 /* Logical start and end positions of a "log window" of the verifier log.
607 * start_pos == 0 means we haven't truncated anything.
608 * Once truncation starts to happen, start_pos + len_total == end_pos,
609 * except during log reset situations, in which (end_pos - start_pos)
610 * might get smaller than len_total (see bpf_vlog_reset()).
611 * Generally, (end_pos - start_pos) gives number of useful data in
612 * user log buffer.
613 */
614 u64 start_pos;
615 u64 end_pos;
616 char __user *ubuf;
617 u32 level;
618 u32 len_total;
619 u32 len_max;
620 char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
621};
622
623#define BPF_LOG_LEVEL1 1
624#define BPF_LOG_LEVEL2 2
625#define BPF_LOG_STATS 4
626#define BPF_LOG_FIXED 8
627#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
628#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED)
629#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */
630#define BPF_LOG_MIN_ALIGNMENT 8U
631#define BPF_LOG_ALIGNMENT 40U
632
633static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
634{
635 return log && log->level;
636}
637
638#define BPF_MAX_SUBPROGS 256
639
640struct bpf_subprog_arg_info {
641 enum bpf_arg_type arg_type;
642 union {
643 u32 mem_size;
644 u32 btf_id;
645 };
646};
647
648enum priv_stack_mode {
649 PRIV_STACK_UNKNOWN,
650 NO_PRIV_STACK,
651 PRIV_STACK_ADAPTIVE,
652};
653
654struct bpf_subprog_info {
655 /* 'start' has to be the first field otherwise find_subprog() won't work */
656 u32 start; /* insn idx of function entry point */
657 u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
658 u32 postorder_start; /* The idx to the env->cfg.insn_postorder */
659 u32 exit_idx; /* Index of one of the BPF_EXIT instructions in this subprogram */
660 u16 stack_depth; /* max. stack depth used by this function */
661 u16 stack_extra;
662 /* offsets in range [stack_depth .. fastcall_stack_off)
663 * are used for bpf_fastcall spills and fills.
664 */
665 s16 fastcall_stack_off;
666 bool has_tail_call: 1;
667 bool tail_call_reachable: 1;
668 bool has_ld_abs: 1;
669 bool is_cb: 1;
670 bool is_async_cb: 1;
671 bool is_exception_cb: 1;
672 bool args_cached: 1;
673 /* true if bpf_fastcall stack region is used by functions that can't be inlined */
674 bool keep_fastcall_stack: 1;
675 bool changes_pkt_data: 1;
676 bool might_sleep: 1;
677 u8 arg_cnt:3;
678
679 enum priv_stack_mode priv_stack_mode;
680 struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
681};
682
683struct bpf_verifier_env;
684
685struct backtrack_state {
686 struct bpf_verifier_env *env;
687 u32 frame;
688 u32 reg_masks[MAX_CALL_FRAMES];
689 u64 stack_masks[MAX_CALL_FRAMES];
690};
691
692struct bpf_id_pair {
693 u32 old;
694 u32 cur;
695};
696
697struct bpf_idmap {
698 u32 tmp_id_gen;
699 u32 cnt;
700 struct bpf_id_pair map[BPF_ID_MAP_SIZE];
701};
702
703struct bpf_idset {
704 u32 num_ids;
705 struct {
706 u32 id;
707 u32 cnt;
708 } entries[BPF_ID_MAP_SIZE];
709};
710
711/* see verifier.c:compute_scc_callchain() */
712struct bpf_scc_callchain {
713 /* call sites from bpf_verifier_state->frame[*]->callsite leading to this SCC */
714 u32 callsites[MAX_CALL_FRAMES - 1];
715 /* last frame in a chain is identified by SCC id */
716 u32 scc;
717};
718
719/* verifier state waiting for propagate_backedges() */
720struct bpf_scc_backedge {
721 struct bpf_scc_backedge *next;
722 struct bpf_verifier_state state;
723};
724
725struct bpf_scc_visit {
726 struct bpf_scc_callchain callchain;
727 /* first state in current verification path that entered SCC
728 * identified by the callchain
729 */
730 struct bpf_verifier_state *entry_state;
731 struct bpf_scc_backedge *backedges; /* list of backedges */
732 u32 num_backedges;
733};
734
735/* An array of bpf_scc_visit structs sharing tht same bpf_scc_callchain->scc
736 * but having different bpf_scc_callchain->callsites.
737 */
738struct bpf_scc_info {
739 u32 num_visits;
740 struct bpf_scc_visit visits[];
741};
742
743struct bpf_liveness;
744
745/* single container for all structs
746 * one verifier_env per bpf_check() call
747 */
748struct bpf_verifier_env {
749 u32 insn_idx;
750 u32 prev_insn_idx;
751 struct bpf_prog *prog; /* eBPF program being verified */
752 const struct bpf_verifier_ops *ops;
753 struct module *attach_btf_mod; /* The owner module of prog->aux->attach_btf */
754 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
755 int stack_size; /* number of states to be processed */
756 bool strict_alignment; /* perform strict pointer alignment checks */
757 bool test_state_freq; /* test verifier with different pruning frequency */
758 bool test_reg_invariants; /* fail verification on register invariants violations */
759 struct bpf_verifier_state *cur_state; /* current verifier state */
760 /* Search pruning optimization, array of list_heads for
761 * lists of struct bpf_verifier_state_list.
762 */
763 struct list_head *explored_states;
764 struct list_head free_list; /* list of struct bpf_verifier_state_list */
765 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
766 struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
767 struct bpf_map *insn_array_maps[MAX_USED_MAPS]; /* array of INSN_ARRAY map's to be relocated */
768 u32 used_map_cnt; /* number of used maps */
769 u32 used_btf_cnt; /* number of used BTF objects */
770 u32 insn_array_map_cnt; /* number of used maps of type BPF_MAP_TYPE_INSN_ARRAY */
771 u32 id_gen; /* used to generate unique reg IDs */
772 u32 hidden_subprog_cnt; /* number of hidden subprogs */
773 int exception_callback_subprog;
774 bool explore_alu_limits;
775 bool allow_ptr_leaks;
776 /* Allow access to uninitialized stack memory. Writes with fixed offset are
777 * always allowed, so this refers to reads (with fixed or variable offset),
778 * to writes with variable offset and to indirect (helper) accesses.
779 */
780 bool allow_uninit_stack;
781 bool bpf_capable;
782 bool bypass_spec_v1;
783 bool bypass_spec_v4;
784 bool seen_direct_write;
785 bool seen_exception;
786 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
787 const struct bpf_line_info *prev_linfo;
788 struct bpf_verifier_log log;
789 struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */
790 union {
791 struct bpf_idmap idmap_scratch;
792 struct bpf_idset idset_scratch;
793 };
794 struct {
795 int *insn_state;
796 int *insn_stack;
797 /*
798 * vector of instruction indexes sorted in post-order, grouped by subprogram,
799 * see bpf_subprog_info->postorder_start.
800 */
801 int *insn_postorder;
802 int cur_stack;
803 /* current position in the insn_postorder vector */
804 int cur_postorder;
805 } cfg;
806 struct backtrack_state bt;
807 struct bpf_jmp_history_entry *cur_hist_ent;
808 u32 pass_cnt; /* number of times do_check() was called */
809 u32 subprog_cnt;
810 /* number of instructions analyzed by the verifier */
811 u32 prev_insn_processed, insn_processed;
812 /* number of jmps, calls, exits analyzed so far */
813 u32 prev_jmps_processed, jmps_processed;
814 /* total verification time */
815 u64 verification_time;
816 /* maximum number of verifier states kept in 'branching' instructions */
817 u32 max_states_per_insn;
818 /* total number of allocated verifier states */
819 u32 total_states;
820 /* some states are freed during program analysis.
821 * this is peak number of states. this number dominates kernel
822 * memory consumption during verification
823 */
824 u32 peak_states;
825 /* longest register parentage chain walked for liveness marking */
826 u32 longest_mark_read_walk;
827 u32 free_list_size;
828 u32 explored_states_size;
829 u32 num_backedges;
830 bpfptr_t fd_array;
831
832 /* bit mask to keep track of whether a register has been accessed
833 * since the last time the function state was printed
834 */
835 u32 scratched_regs;
836 /* Same as scratched_regs but for stack slots */
837 u64 scratched_stack_slots;
838 u64 prev_log_pos, prev_insn_print_pos;
839 /* buffer used to temporary hold constants as scalar registers */
840 struct bpf_reg_state fake_reg[2];
841 /* buffer used to generate temporary string representations,
842 * e.g., in reg_type_str() to generate reg_type string
843 */
844 char tmp_str_buf[TMP_STR_BUF_LEN];
845 struct bpf_insn insn_buf[INSN_BUF_SIZE];
846 struct bpf_insn epilogue_buf[INSN_BUF_SIZE];
847 struct bpf_scc_callchain callchain_buf;
848 struct bpf_liveness *liveness;
849 /* array of pointers to bpf_scc_info indexed by SCC id */
850 struct bpf_scc_info **scc_info;
851 u32 scc_cnt;
852 struct bpf_iarray *succ;
853 struct bpf_iarray *gotox_tmp_buf;
854};
855
856static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
857{
858 return &env->prog->aux->func_info_aux[subprog];
859}
860
861static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env, int subprog)
862{
863 return &env->subprog_info[subprog];
864}
865
866__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
867 const char *fmt, va_list args);
868__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
869 const char *fmt, ...);
870__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
871 const char *fmt, ...);
872int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
873 char __user *log_buf, u32 log_size);
874void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos);
875int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual);
876
877__printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
878 u32 insn_off,
879 const char *prefix_fmt, ...);
880
881#define verifier_bug_if(cond, env, fmt, args...) \
882 ({ \
883 bool __cond = (cond); \
884 if (unlikely(__cond)) \
885 verifier_bug(env, fmt " (" #cond ")", ##args); \
886 (__cond); \
887 })
888#define verifier_bug(env, fmt, args...) \
889 ({ \
890 BPF_WARN_ONCE(1, "verifier bug: " fmt "\n", ##args); \
891 bpf_log(&env->log, "verifier bug: " fmt "\n", ##args); \
892 })
893
894static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
895{
896 struct bpf_verifier_state *cur = env->cur_state;
897
898 return cur->frame[cur->curframe];
899}
900
901static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
902{
903 return cur_func(env)->regs;
904}
905
906int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
907int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
908 int insn_idx, int prev_insn_idx);
909int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
910void
911bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
912 struct bpf_insn *insn);
913void
914bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
915
916/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
917static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
918 struct btf *btf, u32 btf_id)
919{
920 if (tgt_prog)
921 return ((u64)tgt_prog->aux->id << 32) | btf_id;
922 else
923 return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id;
924}
925
926/* unpack the IDs from the key as constructed above */
927static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id)
928{
929 if (obj_id)
930 *obj_id = key >> 32;
931 if (btf_id)
932 *btf_id = key & 0x7FFFFFFF;
933}
934
935int bpf_check_attach_target(struct bpf_verifier_log *log,
936 const struct bpf_prog *prog,
937 const struct bpf_prog *tgt_prog,
938 u32 btf_id,
939 struct bpf_attach_target_info *tgt_info);
940void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
941
942int mark_chain_precision(struct bpf_verifier_env *env, int regno);
943
944#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
945
946/* extract base type from bpf_{arg, return, reg}_type. */
947static inline u32 base_type(u32 type)
948{
949 return type & BPF_BASE_TYPE_MASK;
950}
951
952/* extract flags from an extended type. See bpf_type_flag in bpf.h. */
953static inline u32 type_flag(u32 type)
954{
955 return type & ~BPF_BASE_TYPE_MASK;
956}
957
958/* only use after check_attach_btf_id() */
959static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
960{
961 return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ?
962 prog->aux->saved_dst_prog_type : prog->type;
963}
964
965static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
966{
967 switch (resolve_prog_type(prog)) {
968 case BPF_PROG_TYPE_TRACING:
969 return prog->expected_attach_type != BPF_TRACE_ITER;
970 case BPF_PROG_TYPE_STRUCT_OPS:
971 return prog->aux->jits_use_priv_stack;
972 case BPF_PROG_TYPE_LSM:
973 case BPF_PROG_TYPE_SYSCALL:
974 return false;
975 default:
976 return true;
977 }
978}
979
980#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF)
981
982static inline bool bpf_type_has_unsafe_modifiers(u32 type)
983{
984 return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS;
985}
986
987static inline bool type_is_ptr_alloc_obj(u32 type)
988{
989 return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
990}
991
992static inline bool type_is_non_owning_ref(u32 type)
993{
994 return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
995}
996
997static inline bool type_is_pkt_pointer(enum bpf_reg_type type)
998{
999 type = base_type(type);
1000 return type == PTR_TO_PACKET ||
1001 type == PTR_TO_PACKET_META;
1002}
1003
1004static inline bool type_is_sk_pointer(enum bpf_reg_type type)
1005{
1006 return type == PTR_TO_SOCKET ||
1007 type == PTR_TO_SOCK_COMMON ||
1008 type == PTR_TO_TCP_SOCK ||
1009 type == PTR_TO_XDP_SOCK;
1010}
1011
1012static inline bool type_may_be_null(u32 type)
1013{
1014 return type & PTR_MAYBE_NULL;
1015}
1016
1017static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
1018{
1019 env->scratched_regs |= 1U << regno;
1020}
1021
1022static inline void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
1023{
1024 env->scratched_stack_slots |= 1ULL << spi;
1025}
1026
1027static inline bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
1028{
1029 return (env->scratched_regs >> regno) & 1;
1030}
1031
1032static inline bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
1033{
1034 return (env->scratched_stack_slots >> regno) & 1;
1035}
1036
1037static inline bool verifier_state_scratched(const struct bpf_verifier_env *env)
1038{
1039 return env->scratched_regs || env->scratched_stack_slots;
1040}
1041
1042static inline void mark_verifier_state_clean(struct bpf_verifier_env *env)
1043{
1044 env->scratched_regs = 0U;
1045 env->scratched_stack_slots = 0ULL;
1046}
1047
1048/* Used for printing the entire verifier state. */
1049static inline void mark_verifier_state_scratched(struct bpf_verifier_env *env)
1050{
1051 env->scratched_regs = ~0U;
1052 env->scratched_stack_slots = ~0ULL;
1053}
1054
1055static inline bool bpf_stack_narrow_access_ok(int off, int fill_size, int spill_size)
1056{
1057#ifdef __BIG_ENDIAN
1058 off -= spill_size - fill_size;
1059#endif
1060
1061 return !(off % BPF_REG_SIZE);
1062}
1063
1064static inline bool insn_is_gotox(struct bpf_insn *insn)
1065{
1066 return BPF_CLASS(insn->code) == BPF_JMP &&
1067 BPF_OP(insn->code) == BPF_JA &&
1068 BPF_SRC(insn->code) == BPF_X;
1069}
1070
1071const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type);
1072const char *dynptr_type_str(enum bpf_dynptr_type type);
1073const char *iter_type_str(const struct btf *btf, u32 btf_id);
1074const char *iter_state_str(enum bpf_iter_state state);
1075
1076void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
1077 u32 frameno, bool print_all);
1078void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
1079 u32 frameno);
1080
1081struct bpf_subprog_info *bpf_find_containing_subprog(struct bpf_verifier_env *env, int off);
1082int bpf_jmp_offset(struct bpf_insn *insn);
1083struct bpf_iarray *bpf_insn_successors(struct bpf_verifier_env *env, u32 idx);
1084void bpf_fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask);
1085bool bpf_calls_callback(struct bpf_verifier_env *env, int insn_idx);
1086
1087int bpf_stack_liveness_init(struct bpf_verifier_env *env);
1088void bpf_stack_liveness_free(struct bpf_verifier_env *env);
1089int bpf_update_live_stack(struct bpf_verifier_env *env);
1090int bpf_mark_stack_read(struct bpf_verifier_env *env, u32 frameno, u32 insn_idx, u64 mask);
1091void bpf_mark_stack_write(struct bpf_verifier_env *env, u32 frameno, u64 mask);
1092int bpf_reset_stack_write_marks(struct bpf_verifier_env *env, u32 insn_idx);
1093int bpf_commit_stack_write_marks(struct bpf_verifier_env *env);
1094int bpf_live_stack_query_init(struct bpf_verifier_env *env, struct bpf_verifier_state *st);
1095bool bpf_stack_slot_alive(struct bpf_verifier_env *env, u32 frameno, u32 spi);
1096void bpf_reset_live_stack_callchain(struct bpf_verifier_env *env);
1097
1098#endif /* _LINUX_BPF_VERIFIER_H */