Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

bpf: Pass bpf_verifier_env to JIT

Pass bpf_verifier_env to bpf_int_jit_compile(). The follow-up patch will
use env->insn_aux_data in the JIT stage to detect indirect jump targets.

Since bpf_prog_select_runtime() can be called by cbpf and lib/test_bpf.c
code without verifier, introduce helper __bpf_prog_select_runtime()
to accept the env parameter.

Remove the call to bpf_prog_select_runtime() in bpf_prog_load(), and
switch to call __bpf_prog_select_runtime() in the verifier, with env
variable passed. The original bpf_prog_select_runtime() is preserved for
cbpf and lib/test_bpf.c, where env is NULL.

Now all constants blinding calls are moved into the verifier, except
the cbpf and lib/test_bpf.c cases. The instructions arrays are adjusted
by bpf_patch_insn_data() function for normal cases, so there is no need
to call adjust_insn_arrays() in bpf_jit_blind_constants(). Remove it.

Reviewed-by: Anton Protopopov <a.s.protopopov@gmail.com> # v8
Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com> # v12
Acked-by: Hengqi Chen <hengqi.chen@gmail.com> # v14
Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
Link: https://lore.kernel.org/r/20260416064341.151802-3-xukuohai@huaweicloud.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Xu Kuohai and committed by
Alexei Starovoitov
d9ef13f7 d3e94522

+84 -71
+1 -1
arch/arc/net/bpf_jit_core.c
··· 1400 1400 * (re)locations involved that their addresses are not known 1401 1401 * during the first run. 1402 1402 */ 1403 - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 1403 + struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog) 1404 1404 { 1405 1405 vm_dump(prog); 1406 1406
+1 -1
arch/arm/net/bpf_jit_32.c
··· 2142 2142 return true; 2143 2143 } 2144 2144 2145 - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 2145 + struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog) 2146 2146 { 2147 2147 struct bpf_binary_header *header; 2148 2148 struct jit_ctx ctx;
+1 -1
arch/arm64/net/bpf_jit_comp.c
··· 2000 2000 struct jit_ctx ctx; 2001 2001 }; 2002 2002 2003 - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 2003 + struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog) 2004 2004 { 2005 2005 int image_size, prog_size, extable_size, extable_align, extable_offset; 2006 2006 struct bpf_binary_header *header;
+1 -1
arch/loongarch/net/bpf_jit.c
··· 1920 1920 return ret < 0 ? ret : ret * LOONGARCH_INSN_SIZE; 1921 1921 } 1922 1922 1923 - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 1923 + struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog) 1924 1924 { 1925 1925 bool extra_pass = false; 1926 1926 u8 *image_ptr, *ro_image_ptr;
+1 -1
arch/mips/net/bpf_jit_comp.c
··· 909 909 return true; 910 910 } 911 911 912 - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 912 + struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog) 913 913 { 914 914 struct bpf_binary_header *header = NULL; 915 915 struct jit_context ctx;
+1 -1
arch/parisc/net/bpf_jit_core.c
··· 41 41 return true; 42 42 } 43 43 44 - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 44 + struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog) 45 45 { 46 46 unsigned int prog_size = 0, extable_size = 0; 47 47 bool extra_pass = false;
+1 -1
arch/powerpc/net/bpf_jit_comp.c
··· 162 162 } 163 163 } 164 164 165 - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) 165 + struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *fp) 166 166 { 167 167 u32 proglen; 168 168 u32 alloclen;
+1 -1
arch/riscv/net/bpf_jit_core.c
··· 41 41 return true; 42 42 } 43 43 44 - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 44 + struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog) 45 45 { 46 46 unsigned int prog_size = 0, extable_size = 0; 47 47 bool extra_pass = false;
+1 -1
arch/s390/net/bpf_jit_comp.c
··· 2312 2312 /* 2313 2313 * Compile eBPF program "fp" 2314 2314 */ 2315 - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) 2315 + struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *fp) 2316 2316 { 2317 2317 struct bpf_binary_header *header; 2318 2318 struct s390_jit_data *jit_data;
+1 -1
arch/sparc/net/bpf_jit_comp_64.c
··· 1477 1477 struct jit_ctx ctx; 1478 1478 }; 1479 1479 1480 - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 1480 + struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog) 1481 1481 { 1482 1482 struct sparc64_jit_data *jit_data; 1483 1483 struct bpf_binary_header *header;
+1 -1
arch/x86/net/bpf_jit_comp.c
··· 3713 3713 #define MAX_PASSES 20 3714 3714 #define PADDING_PASSES (MAX_PASSES - 5) 3715 3715 3716 - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 3716 + struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog) 3717 3717 { 3718 3718 struct bpf_binary_header *rw_header = NULL; 3719 3719 struct bpf_binary_header *header = NULL;
+1 -1
arch/x86/net/bpf_jit_comp32.c
··· 2518 2518 return true; 2519 2519 } 2520 2520 2521 - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 2521 + struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog) 2522 2522 { 2523 2523 struct bpf_binary_header *header = NULL; 2524 2524 int proglen, oldproglen = 0;
+16 -1
include/linux/filter.h
··· 1108 1108 return sk_filter_trim_cap(sk, skb, 1); 1109 1109 } 1110 1110 1111 + struct bpf_prog *__bpf_prog_select_runtime(struct bpf_verifier_env *env, struct bpf_prog *fp, 1112 + int *err); 1111 1113 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); 1112 1114 void bpf_prog_free(struct bpf_prog *fp); 1113 1115 ··· 1155 1153 ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ 1156 1154 (void *)__bpf_call_base) 1157 1155 1158 - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); 1156 + struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog); 1159 1157 void bpf_jit_compile(struct bpf_prog *prog); 1160 1158 bool bpf_jit_needs_zext(void); 1161 1159 bool bpf_jit_inlines_helper_call(s32 imm); ··· 1190 1188 #ifdef CONFIG_BPF_SYSCALL 1191 1189 struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 1192 1190 const struct bpf_insn *patch, u32 len); 1191 + struct bpf_insn_aux_data *bpf_dup_insn_aux_data(struct bpf_verifier_env *env); 1192 + void bpf_restore_insn_aux_data(struct bpf_verifier_env *env, 1193 + struct bpf_insn_aux_data *orig_insn_aux); 1193 1194 #else 1194 1195 static inline struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 1195 1196 const struct bpf_insn *patch, u32 len) 1196 1197 { 1197 1198 return ERR_PTR(-ENOTSUPP); 1199 + } 1200 + 1201 + static inline struct bpf_insn_aux_data *bpf_dup_insn_aux_data(struct bpf_verifier_env *env) 1202 + { 1203 + return NULL; 1204 + } 1205 + 1206 + static inline void bpf_restore_insn_aux_data(struct bpf_verifier_env *env, 1207 + struct bpf_insn_aux_data *orig_insn_aux) 1208 + { 1198 1209 } 1199 1210 #endif /* CONFIG_BPF_SYSCALL */ 1200 1211
+43 -43
kernel/bpf/core.c
··· 1491 1491 bpf_prog_clone_free(fp_other); 1492 1492 } 1493 1493 1494 - static void adjust_insn_arrays(struct bpf_prog *prog, u32 off, u32 len) 1495 - { 1496 - #ifdef CONFIG_BPF_SYSCALL 1497 - struct bpf_map *map; 1498 - int i; 1499 - 1500 - if (len <= 1) 1501 - return; 1502 - 1503 - for (i = 0; i < prog->aux->used_map_cnt; i++) { 1504 - map = prog->aux->used_maps[i]; 1505 - if (map->map_type == BPF_MAP_TYPE_INSN_ARRAY) 1506 - bpf_insn_array_adjust(map, off, len); 1507 - } 1508 - #endif 1509 - } 1510 - 1511 1494 /* 1512 1495 * Now this function is used only to blind the main prog and must be invoked only when 1513 1496 * bpf_prog_need_blind() returns true. ··· 1563 1580 1564 1581 if (env) 1565 1582 env->prog = clone; 1566 - else 1567 - /* 1568 - * Instructions arrays must be updated using absolute xlated offsets. 1569 - * The arrays have already been adjusted by bpf_patch_insn_data() when 1570 - * env is not NULL. 1571 - */ 1572 - adjust_insn_arrays(clone, i, rewritten); 1573 1583 1574 1584 /* Walk new program and skip insns we just inserted. */ 1575 1585 insn = clone->insnsi + i + insn_delta; ··· 2531 2555 return select_interpreter; 2532 2556 } 2533 2557 2534 - static struct bpf_prog *bpf_prog_jit_compile(struct bpf_prog *prog) 2558 + static struct bpf_prog *bpf_prog_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog) 2535 2559 { 2536 2560 #ifdef CONFIG_BPF_JIT 2537 2561 struct bpf_prog *orig_prog; 2562 + struct bpf_insn_aux_data *orig_insn_aux; 2538 2563 2539 2564 if (!bpf_prog_need_blind(prog)) 2540 - return bpf_int_jit_compile(prog); 2565 + return bpf_int_jit_compile(env, prog); 2566 + 2567 + if (env) { 2568 + /* 2569 + * If env is not NULL, we are called from the end of bpf_check(), at this 2570 + * point, only insn_aux_data is used after failure, so it should be restored 2571 + * on failure. 2572 + */ 2573 + orig_insn_aux = bpf_dup_insn_aux_data(env); 2574 + if (!orig_insn_aux) 2575 + return prog; 2576 + } 2541 2577 2542 2578 orig_prog = prog; 2543 - prog = bpf_jit_blind_constants(NULL, prog); 2579 + prog = bpf_jit_blind_constants(env, prog); 2544 2580 /* 2545 2581 * If blinding was requested and we failed during blinding, we must fall 2546 2582 * back to the interpreter. 2547 2583 */ 2548 2584 if (IS_ERR(prog)) 2549 - return orig_prog; 2585 + goto out_restore; 2550 2586 2551 - prog = bpf_int_jit_compile(prog); 2587 + prog = bpf_int_jit_compile(env, prog); 2552 2588 if (prog->jited) { 2553 2589 bpf_jit_prog_release_other(prog, orig_prog); 2590 + if (env) 2591 + vfree(orig_insn_aux); 2554 2592 return prog; 2555 2593 } 2556 2594 2557 2595 bpf_jit_prog_release_other(orig_prog, prog); 2596 + 2597 + out_restore: 2558 2598 prog = orig_prog; 2599 + if (env) 2600 + bpf_restore_insn_aux_data(env, orig_insn_aux); 2559 2601 #endif 2560 2602 return prog; 2561 2603 } 2562 2604 2563 - /** 2564 - * bpf_prog_select_runtime - select exec runtime for BPF program 2565 - * @fp: bpf_prog populated with BPF program 2566 - * @err: pointer to error variable 2567 - * 2568 - * Try to JIT eBPF program, if JIT is not available, use interpreter. 2569 - * The BPF program will be executed via bpf_prog_run() function. 2570 - * 2571 - * Return: the &fp argument along with &err set to 0 for success or 2572 - * a negative errno code on failure 2573 - */ 2574 - struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 2605 + struct bpf_prog *__bpf_prog_select_runtime(struct bpf_verifier_env *env, struct bpf_prog *fp, 2606 + int *err) 2575 2607 { 2576 2608 /* In case of BPF to BPF calls, verifier did all the prep 2577 2609 * work with regards to JITing, etc. ··· 2607 2623 if (*err) 2608 2624 return fp; 2609 2625 2610 - fp = bpf_prog_jit_compile(fp); 2626 + fp = bpf_prog_jit_compile(env, fp); 2611 2627 bpf_prog_jit_attempt_done(fp); 2612 2628 if (!fp->jited && jit_needed) { 2613 2629 *err = -ENOTSUPP; ··· 2632 2648 *err = bpf_check_tail_call(fp); 2633 2649 2634 2650 return fp; 2651 + } 2652 + 2653 + /** 2654 + * bpf_prog_select_runtime - select exec runtime for BPF program 2655 + * @fp: bpf_prog populated with BPF program 2656 + * @err: pointer to error variable 2657 + * 2658 + * Try to JIT eBPF program, if JIT is not available, use interpreter. 2659 + * The BPF program will be executed via bpf_prog_run() function. 2660 + * 2661 + * Return: the &fp argument along with &err set to 0 for success or 2662 + * a negative errno code on failure 2663 + */ 2664 + struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 2665 + { 2666 + return __bpf_prog_select_runtime(NULL, fp, err); 2635 2667 } 2636 2668 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 2637 2669 ··· 3136 3136 * It is encouraged to implement bpf_int_jit_compile() instead, so that 3137 3137 * eBPF and implicitly also cBPF can get JITed! 3138 3138 */ 3139 - struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog) 3139 + struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog) 3140 3140 { 3141 3141 return prog; 3142 3142 }
+5 -5
kernel/bpf/fixups.c
··· 993 993 env->subprog_info[env->subprog_cnt].start = env->prog->len; 994 994 } 995 995 996 - static struct bpf_insn_aux_data *bpf_dup_insn_aux_data(struct bpf_verifier_env *env) 996 + struct bpf_insn_aux_data *bpf_dup_insn_aux_data(struct bpf_verifier_env *env) 997 997 { 998 998 size_t size; 999 999 void *new_aux; ··· 1005 1005 return new_aux; 1006 1006 } 1007 1007 1008 - static void bpf_restore_insn_aux_data(struct bpf_verifier_env *env, 1009 - struct bpf_insn_aux_data *orig_insn_aux) 1008 + void bpf_restore_insn_aux_data(struct bpf_verifier_env *env, 1009 + struct bpf_insn_aux_data *orig_insn_aux) 1010 1010 { 1011 1011 /* the expanded elements are zero-filled, so no special handling is required */ 1012 1012 vfree(env->insn_aux_data); ··· 1150 1150 func[i]->aux->token = prog->aux->token; 1151 1151 if (!i) 1152 1152 func[i]->aux->exception_boundary = env->seen_exception; 1153 - func[i] = bpf_int_jit_compile(func[i]); 1153 + func[i] = bpf_int_jit_compile(env, func[i]); 1154 1154 if (!func[i]->jited) { 1155 1155 err = -ENOTSUPP; 1156 1156 goto out_free; ··· 1194 1194 } 1195 1195 for (i = 0; i < env->subprog_cnt; i++) { 1196 1196 old_bpf_func = func[i]->bpf_func; 1197 - tmp = bpf_int_jit_compile(func[i]); 1197 + tmp = bpf_int_jit_compile(env, func[i]); 1198 1198 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { 1199 1199 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); 1200 1200 err = -ENOTSUPP;
-4
kernel/bpf/syscall.c
··· 3083 3083 if (err < 0) 3084 3084 goto free_used_maps; 3085 3085 3086 - prog = bpf_prog_select_runtime(prog, &err); 3087 - if (err < 0) 3088 - goto free_used_maps; 3089 - 3090 3086 err = bpf_prog_mark_insn_arrays_ready(prog); 3091 3087 if (err < 0) 3092 3088 goto free_used_maps;
+8 -6
kernel/bpf/verifier.c
··· 20155 20155 20156 20156 adjust_btf_func(env); 20157 20157 20158 + /* extension progs temporarily inherit the attach_type of their targets 20159 + for verification purposes, so set it back to zero before returning 20160 + */ 20161 + if (env->prog->type == BPF_PROG_TYPE_EXT) 20162 + env->prog->expected_attach_type = 0; 20163 + 20164 + env->prog = __bpf_prog_select_runtime(env, env->prog, &ret); 20165 + 20158 20166 err_release_maps: 20159 20167 if (ret) 20160 20168 release_insn_arrays(env); ··· 20173 20165 release_maps(env); 20174 20166 if (!env->prog->aux->used_btfs) 20175 20167 release_btfs(env); 20176 - 20177 - /* extension progs temporarily inherit the attach_type of their targets 20178 - for verification purposes, so set it back to zero before returning 20179 - */ 20180 - if (env->prog->type == BPF_PROG_TYPE_EXT) 20181 - env->prog->expected_attach_type = 0; 20182 20168 20183 20169 *prog = env->prog; 20184 20170