Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Linux Socket Filter Data Structures
4 */
5#ifndef __LINUX_FILTER_H__
6#define __LINUX_FILTER_H__
7
8#include <linux/atomic.h>
9#include <linux/bpf.h>
10#include <linux/refcount.h>
11#include <linux/compat.h>
12#include <linux/skbuff.h>
13#include <linux/linkage.h>
14#include <linux/printk.h>
15#include <linux/workqueue.h>
16#include <linux/sched.h>
17#include <linux/sched/clock.h>
18#include <linux/capability.h>
19#include <linux/set_memory.h>
20#include <linux/kallsyms.h>
21#include <linux/if_vlan.h>
22#include <linux/vmalloc.h>
23#include <linux/sockptr.h>
24#include <linux/u64_stats_sync.h>
25
26#include <net/sch_generic.h>
27
28#include <asm/byteorder.h>
29#include <uapi/linux/filter.h>
30
31struct sk_buff;
32struct sock;
33struct seccomp_data;
34struct bpf_prog_aux;
35struct xdp_rxq_info;
36struct xdp_buff;
37struct sock_reuseport;
38struct ctl_table;
39struct ctl_table_header;
40
41/* ArgX, context and stack frame pointer register positions. Note,
42 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
43 * calls in BPF_CALL instruction.
44 */
45#define BPF_REG_ARG1 BPF_REG_1
46#define BPF_REG_ARG2 BPF_REG_2
47#define BPF_REG_ARG3 BPF_REG_3
48#define BPF_REG_ARG4 BPF_REG_4
49#define BPF_REG_ARG5 BPF_REG_5
50#define BPF_REG_CTX BPF_REG_6
51#define BPF_REG_FP BPF_REG_10
52
53/* Additional register mappings for converted user programs. */
54#define BPF_REG_A BPF_REG_0
55#define BPF_REG_X BPF_REG_7
56#define BPF_REG_TMP BPF_REG_2 /* scratch reg */
57#define BPF_REG_D BPF_REG_8 /* data, callee-saved */
58#define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */
59
60/* Kernel hidden auxiliary/helper register. */
61#define BPF_REG_AX MAX_BPF_REG
62#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1)
63#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG
64
65/* unused opcode to mark special call to bpf_tail_call() helper */
66#define BPF_TAIL_CALL 0xf0
67
68/* unused opcode to mark special load instruction. Same as BPF_ABS */
69#define BPF_PROBE_MEM 0x20
70
71/* unused opcode to mark special ldsx instruction. Same as BPF_IND */
72#define BPF_PROBE_MEMSX 0x40
73
74/* unused opcode to mark special load instruction. Same as BPF_MSH */
75#define BPF_PROBE_MEM32 0xa0
76
77/* unused opcode to mark special atomic instruction */
78#define BPF_PROBE_ATOMIC 0xe0
79
80/* unused opcode to mark special ldsx instruction. Same as BPF_NOSPEC */
81#define BPF_PROBE_MEM32SX 0xc0
82
83/* unused opcode to mark call to interpreter with arguments */
84#define BPF_CALL_ARGS 0xe0
85
86/* unused opcode to mark speculation barrier for mitigating
87 * Spectre v1 and v4
88 */
89#define BPF_NOSPEC 0xc0
90
91/* As per nm, we expose JITed images as text (code) section for
92 * kallsyms. That way, tools like perf can find it to match
93 * addresses.
94 */
95#define BPF_SYM_ELF_TYPE 't'
96
97/* BPF program can access up to 512 bytes of stack space. */
98#define MAX_BPF_STACK 512
99
100/* Helper macros for filter block array initializers. */
101
102/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
103
104#define BPF_ALU64_REG_OFF(OP, DST, SRC, OFF) \
105 ((struct bpf_insn) { \
106 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
107 .dst_reg = DST, \
108 .src_reg = SRC, \
109 .off = OFF, \
110 .imm = 0 })
111
112#define BPF_ALU64_REG(OP, DST, SRC) \
113 BPF_ALU64_REG_OFF(OP, DST, SRC, 0)
114
115#define BPF_ALU32_REG_OFF(OP, DST, SRC, OFF) \
116 ((struct bpf_insn) { \
117 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
118 .dst_reg = DST, \
119 .src_reg = SRC, \
120 .off = OFF, \
121 .imm = 0 })
122
123#define BPF_ALU32_REG(OP, DST, SRC) \
124 BPF_ALU32_REG_OFF(OP, DST, SRC, 0)
125
126/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
127
128#define BPF_ALU64_IMM_OFF(OP, DST, IMM, OFF) \
129 ((struct bpf_insn) { \
130 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
131 .dst_reg = DST, \
132 .src_reg = 0, \
133 .off = OFF, \
134 .imm = IMM })
135#define BPF_ALU64_IMM(OP, DST, IMM) \
136 BPF_ALU64_IMM_OFF(OP, DST, IMM, 0)
137
138#define BPF_ALU32_IMM_OFF(OP, DST, IMM, OFF) \
139 ((struct bpf_insn) { \
140 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
141 .dst_reg = DST, \
142 .src_reg = 0, \
143 .off = OFF, \
144 .imm = IMM })
145#define BPF_ALU32_IMM(OP, DST, IMM) \
146 BPF_ALU32_IMM_OFF(OP, DST, IMM, 0)
147
148/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
149
150#define BPF_ENDIAN(TYPE, DST, LEN) \
151 ((struct bpf_insn) { \
152 .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
153 .dst_reg = DST, \
154 .src_reg = 0, \
155 .off = 0, \
156 .imm = LEN })
157
158/* Byte Swap, bswap16/32/64 */
159
160#define BPF_BSWAP(DST, LEN) \
161 ((struct bpf_insn) { \
162 .code = BPF_ALU64 | BPF_END | BPF_SRC(BPF_TO_LE), \
163 .dst_reg = DST, \
164 .src_reg = 0, \
165 .off = 0, \
166 .imm = LEN })
167
168/* Short form of mov, dst_reg = src_reg */
169
170#define BPF_MOV64_REG(DST, SRC) \
171 ((struct bpf_insn) { \
172 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
173 .dst_reg = DST, \
174 .src_reg = SRC, \
175 .off = 0, \
176 .imm = 0 })
177
178#define BPF_MOV32_REG(DST, SRC) \
179 ((struct bpf_insn) { \
180 .code = BPF_ALU | BPF_MOV | BPF_X, \
181 .dst_reg = DST, \
182 .src_reg = SRC, \
183 .off = 0, \
184 .imm = 0 })
185
186/* Special (internal-only) form of mov, used to resolve per-CPU addrs:
187 * dst_reg = src_reg + <percpu_base_off>
188 * BPF_ADDR_PERCPU is used as a special insn->off value.
189 */
190#define BPF_ADDR_PERCPU (-1)
191
192#define BPF_MOV64_PERCPU_REG(DST, SRC) \
193 ((struct bpf_insn) { \
194 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
195 .dst_reg = DST, \
196 .src_reg = SRC, \
197 .off = BPF_ADDR_PERCPU, \
198 .imm = 0 })
199
200static inline bool insn_is_mov_percpu_addr(const struct bpf_insn *insn)
201{
202 return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->off == BPF_ADDR_PERCPU;
203}
204
205/* Short form of mov, dst_reg = imm32 */
206
207#define BPF_MOV64_IMM(DST, IMM) \
208 ((struct bpf_insn) { \
209 .code = BPF_ALU64 | BPF_MOV | BPF_K, \
210 .dst_reg = DST, \
211 .src_reg = 0, \
212 .off = 0, \
213 .imm = IMM })
214
215#define BPF_MOV32_IMM(DST, IMM) \
216 ((struct bpf_insn) { \
217 .code = BPF_ALU | BPF_MOV | BPF_K, \
218 .dst_reg = DST, \
219 .src_reg = 0, \
220 .off = 0, \
221 .imm = IMM })
222
223/* Short form of movsx, dst_reg = (s8,s16,s32)src_reg */
224
225#define BPF_MOVSX64_REG(DST, SRC, OFF) \
226 ((struct bpf_insn) { \
227 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
228 .dst_reg = DST, \
229 .src_reg = SRC, \
230 .off = OFF, \
231 .imm = 0 })
232
233#define BPF_MOVSX32_REG(DST, SRC, OFF) \
234 ((struct bpf_insn) { \
235 .code = BPF_ALU | BPF_MOV | BPF_X, \
236 .dst_reg = DST, \
237 .src_reg = SRC, \
238 .off = OFF, \
239 .imm = 0 })
240
241/* Special form of mov32, used for doing explicit zero extension on dst. */
242#define BPF_ZEXT_REG(DST) \
243 ((struct bpf_insn) { \
244 .code = BPF_ALU | BPF_MOV | BPF_X, \
245 .dst_reg = DST, \
246 .src_reg = DST, \
247 .off = 0, \
248 .imm = 1 })
249
250static inline bool insn_is_zext(const struct bpf_insn *insn)
251{
252 return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1;
253}
254
255/* addr_space_cast from as(0) to as(1) is for converting bpf arena pointers
256 * to pointers in user vma.
257 */
258static inline bool insn_is_cast_user(const struct bpf_insn *insn)
259{
260 return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
261 insn->off == BPF_ADDR_SPACE_CAST &&
262 insn->imm == 1U << 16;
263}
264
265/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
266#define BPF_LD_IMM64(DST, IMM) \
267 BPF_LD_IMM64_RAW(DST, 0, IMM)
268
269#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
270 ((struct bpf_insn) { \
271 .code = BPF_LD | BPF_DW | BPF_IMM, \
272 .dst_reg = DST, \
273 .src_reg = SRC, \
274 .off = 0, \
275 .imm = (__u32) (IMM) }), \
276 ((struct bpf_insn) { \
277 .code = 0, /* zero is reserved opcode */ \
278 .dst_reg = 0, \
279 .src_reg = 0, \
280 .off = 0, \
281 .imm = ((__u64) (IMM)) >> 32 })
282
283/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
284#define BPF_LD_MAP_FD(DST, MAP_FD) \
285 BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
286
287/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
288
289#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
290 ((struct bpf_insn) { \
291 .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
292 .dst_reg = DST, \
293 .src_reg = SRC, \
294 .off = 0, \
295 .imm = IMM })
296
297#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \
298 ((struct bpf_insn) { \
299 .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
300 .dst_reg = DST, \
301 .src_reg = SRC, \
302 .off = 0, \
303 .imm = IMM })
304
305/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
306
307#define BPF_LD_ABS(SIZE, IMM) \
308 ((struct bpf_insn) { \
309 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
310 .dst_reg = 0, \
311 .src_reg = 0, \
312 .off = 0, \
313 .imm = IMM })
314
315/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
316
317#define BPF_LD_IND(SIZE, SRC, IMM) \
318 ((struct bpf_insn) { \
319 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
320 .dst_reg = 0, \
321 .src_reg = SRC, \
322 .off = 0, \
323 .imm = IMM })
324
325/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
326
327#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
328 ((struct bpf_insn) { \
329 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
330 .dst_reg = DST, \
331 .src_reg = SRC, \
332 .off = OFF, \
333 .imm = 0 })
334
335/* Memory load, dst_reg = *(signed size *) (src_reg + off16) */
336
337#define BPF_LDX_MEMSX(SIZE, DST, SRC, OFF) \
338 ((struct bpf_insn) { \
339 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEMSX, \
340 .dst_reg = DST, \
341 .src_reg = SRC, \
342 .off = OFF, \
343 .imm = 0 })
344
345/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
346
347#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
348 ((struct bpf_insn) { \
349 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
350 .dst_reg = DST, \
351 .src_reg = SRC, \
352 .off = OFF, \
353 .imm = 0 })
354
355
356/*
357 * Atomic operations:
358 *
359 * BPF_ADD *(uint *) (dst_reg + off16) += src_reg
360 * BPF_AND *(uint *) (dst_reg + off16) &= src_reg
361 * BPF_OR *(uint *) (dst_reg + off16) |= src_reg
362 * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg
363 * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
364 * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
365 * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
366 * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
367 * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
368 * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
369 * BPF_LOAD_ACQ dst_reg = smp_load_acquire(src_reg + off16)
370 * BPF_STORE_REL smp_store_release(dst_reg + off16, src_reg)
371 */
372
373#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
374 ((struct bpf_insn) { \
375 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
376 .dst_reg = DST, \
377 .src_reg = SRC, \
378 .off = OFF, \
379 .imm = OP })
380
381/* Legacy alias */
382#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
383
384/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
385
386#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
387 ((struct bpf_insn) { \
388 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
389 .dst_reg = DST, \
390 .src_reg = 0, \
391 .off = OFF, \
392 .imm = IMM })
393
394/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
395
396#define BPF_JMP_REG(OP, DST, SRC, OFF) \
397 ((struct bpf_insn) { \
398 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
399 .dst_reg = DST, \
400 .src_reg = SRC, \
401 .off = OFF, \
402 .imm = 0 })
403
404/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
405
406#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
407 ((struct bpf_insn) { \
408 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
409 .dst_reg = DST, \
410 .src_reg = 0, \
411 .off = OFF, \
412 .imm = IMM })
413
414/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
415
416#define BPF_JMP32_REG(OP, DST, SRC, OFF) \
417 ((struct bpf_insn) { \
418 .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \
419 .dst_reg = DST, \
420 .src_reg = SRC, \
421 .off = OFF, \
422 .imm = 0 })
423
424/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
425
426#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \
427 ((struct bpf_insn) { \
428 .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \
429 .dst_reg = DST, \
430 .src_reg = 0, \
431 .off = OFF, \
432 .imm = IMM })
433
434/* Unconditional jumps, goto pc + off16 */
435
436#define BPF_JMP_A(OFF) \
437 ((struct bpf_insn) { \
438 .code = BPF_JMP | BPF_JA, \
439 .dst_reg = 0, \
440 .src_reg = 0, \
441 .off = OFF, \
442 .imm = 0 })
443
444/* Unconditional jumps, gotol pc + imm32 */
445
446#define BPF_JMP32_A(IMM) \
447 ((struct bpf_insn) { \
448 .code = BPF_JMP32 | BPF_JA, \
449 .dst_reg = 0, \
450 .src_reg = 0, \
451 .off = 0, \
452 .imm = IMM })
453
454/* Relative call */
455
456#define BPF_CALL_REL(TGT) \
457 ((struct bpf_insn) { \
458 .code = BPF_JMP | BPF_CALL, \
459 .dst_reg = 0, \
460 .src_reg = BPF_PSEUDO_CALL, \
461 .off = 0, \
462 .imm = TGT })
463
464/* Convert function address to BPF immediate */
465
466#define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base)
467
468#define BPF_EMIT_CALL(FUNC) \
469 ((struct bpf_insn) { \
470 .code = BPF_JMP | BPF_CALL, \
471 .dst_reg = 0, \
472 .src_reg = 0, \
473 .off = 0, \
474 .imm = BPF_CALL_IMM(FUNC) })
475
476/* Kfunc call */
477
478#define BPF_CALL_KFUNC(OFF, IMM) \
479 ((struct bpf_insn) { \
480 .code = BPF_JMP | BPF_CALL, \
481 .dst_reg = 0, \
482 .src_reg = BPF_PSEUDO_KFUNC_CALL, \
483 .off = OFF, \
484 .imm = IMM })
485
486/* Raw code statement block */
487
488#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
489 ((struct bpf_insn) { \
490 .code = CODE, \
491 .dst_reg = DST, \
492 .src_reg = SRC, \
493 .off = OFF, \
494 .imm = IMM })
495
496/* Program exit */
497
498#define BPF_EXIT_INSN() \
499 ((struct bpf_insn) { \
500 .code = BPF_JMP | BPF_EXIT, \
501 .dst_reg = 0, \
502 .src_reg = 0, \
503 .off = 0, \
504 .imm = 0 })
505
506/* Speculation barrier */
507
508#define BPF_ST_NOSPEC() \
509 ((struct bpf_insn) { \
510 .code = BPF_ST | BPF_NOSPEC, \
511 .dst_reg = 0, \
512 .src_reg = 0, \
513 .off = 0, \
514 .imm = 0 })
515
516/* Internal classic blocks for direct assignment */
517
518#define __BPF_STMT(CODE, K) \
519 ((struct sock_filter) BPF_STMT(CODE, K))
520
521#define __BPF_JUMP(CODE, K, JT, JF) \
522 ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
523
524#define bytes_to_bpf_size(bytes) \
525({ \
526 int bpf_size = -EINVAL; \
527 \
528 if (bytes == sizeof(u8)) \
529 bpf_size = BPF_B; \
530 else if (bytes == sizeof(u16)) \
531 bpf_size = BPF_H; \
532 else if (bytes == sizeof(u32)) \
533 bpf_size = BPF_W; \
534 else if (bytes == sizeof(u64)) \
535 bpf_size = BPF_DW; \
536 \
537 bpf_size; \
538})
539
540#define bpf_size_to_bytes(bpf_size) \
541({ \
542 int bytes = -EINVAL; \
543 \
544 if (bpf_size == BPF_B) \
545 bytes = sizeof(u8); \
546 else if (bpf_size == BPF_H) \
547 bytes = sizeof(u16); \
548 else if (bpf_size == BPF_W) \
549 bytes = sizeof(u32); \
550 else if (bpf_size == BPF_DW) \
551 bytes = sizeof(u64); \
552 \
553 bytes; \
554})
555
556#define BPF_SIZEOF(type) \
557 ({ \
558 const int __size = bytes_to_bpf_size(sizeof(type)); \
559 BUILD_BUG_ON(__size < 0); \
560 __size; \
561 })
562
563#define BPF_FIELD_SIZEOF(type, field) \
564 ({ \
565 const int __size = bytes_to_bpf_size(sizeof_field(type, field)); \
566 BUILD_BUG_ON(__size < 0); \
567 __size; \
568 })
569
570#define BPF_LDST_BYTES(insn) \
571 ({ \
572 const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \
573 WARN_ON(__size < 0); \
574 __size; \
575 })
576
577#define __BPF_MAP_0(m, v, ...) v
578#define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
579#define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
580#define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
581#define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
582#define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
583
584#define __BPF_REG_0(...) __BPF_PAD(5)
585#define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
586#define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
587#define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
588#define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
589#define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)
590
591#define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
592#define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)
593
594#define __BPF_CAST(t, a) \
595 (__force t) \
596 (__force \
597 typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \
598 (unsigned long)0, (t)0))) a
599#define __BPF_V void
600#define __BPF_N
601
602#define __BPF_DECL_ARGS(t, a) t a
603#define __BPF_DECL_REGS(t, a) u64 a
604
605#define __BPF_PAD(n) \
606 __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
607 u64, __ur_3, u64, __ur_4, u64, __ur_5)
608
609#define BPF_CALL_x(x, attr, name, ...) \
610 static __always_inline \
611 u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
612 typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
613 attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
614 attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
615 { \
616 return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
617 } \
618 static __always_inline \
619 u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
620
621#define __NOATTR
622#define BPF_CALL_0(name, ...) BPF_CALL_x(0, __NOATTR, name, __VA_ARGS__)
623#define BPF_CALL_1(name, ...) BPF_CALL_x(1, __NOATTR, name, __VA_ARGS__)
624#define BPF_CALL_2(name, ...) BPF_CALL_x(2, __NOATTR, name, __VA_ARGS__)
625#define BPF_CALL_3(name, ...) BPF_CALL_x(3, __NOATTR, name, __VA_ARGS__)
626#define BPF_CALL_4(name, ...) BPF_CALL_x(4, __NOATTR, name, __VA_ARGS__)
627#define BPF_CALL_5(name, ...) BPF_CALL_x(5, __NOATTR, name, __VA_ARGS__)
628
629#define NOTRACE_BPF_CALL_1(name, ...) BPF_CALL_x(1, notrace, name, __VA_ARGS__)
630
631#define bpf_ctx_range(TYPE, MEMBER) \
632 offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
633#define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \
634 offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
635#if BITS_PER_LONG == 64
636# define bpf_ctx_range_ptr(TYPE, MEMBER) \
637 offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
638#else
639# define bpf_ctx_range_ptr(TYPE, MEMBER) \
640 offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1
641#endif /* BITS_PER_LONG == 64 */
642
643#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \
644 ({ \
645 BUILD_BUG_ON(sizeof_field(TYPE, MEMBER) != (SIZE)); \
646 *(PTR_SIZE) = (SIZE); \
647 offsetof(TYPE, MEMBER); \
648 })
649
650/* A struct sock_filter is architecture independent. */
651struct compat_sock_fprog {
652 u16 len;
653 compat_uptr_t filter; /* struct sock_filter * */
654};
655
656struct sock_fprog_kern {
657 u16 len;
658 struct sock_filter *filter;
659};
660
661/* Some arches need doubleword alignment for their instructions and/or data */
662#define BPF_IMAGE_ALIGNMENT 8
663
664struct bpf_binary_header {
665 u32 size;
666 u8 image[] __aligned(BPF_IMAGE_ALIGNMENT);
667};
668
669struct bpf_prog_stats {
670 u64_stats_t cnt;
671 u64_stats_t nsecs;
672 u64_stats_t misses;
673 struct u64_stats_sync syncp;
674} __aligned(2 * sizeof(u64));
675
676struct bpf_timed_may_goto {
677 u64 count;
678 u64 timestamp;
679};
680
681struct sk_filter {
682 refcount_t refcnt;
683 struct rcu_head rcu;
684 struct bpf_prog *prog;
685};
686
687DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
688
689extern struct mutex nf_conn_btf_access_lock;
690extern int (*nfct_btf_struct_access)(struct bpf_verifier_log *log,
691 const struct bpf_reg_state *reg,
692 int off, int size);
693
694typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
695 const struct bpf_insn *insnsi,
696 unsigned int (*bpf_func)(const void *,
697 const struct bpf_insn *));
698
699static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
700 const void *ctx,
701 bpf_dispatcher_fn dfunc)
702{
703 u32 ret;
704
705 cant_migrate();
706 if (static_branch_unlikely(&bpf_stats_enabled_key)) {
707 struct bpf_prog_stats *stats;
708 u64 duration, start = sched_clock();
709 unsigned long flags;
710
711 ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
712
713 duration = sched_clock() - start;
714 if (likely(prog->stats)) {
715 stats = this_cpu_ptr(prog->stats);
716 flags = u64_stats_update_begin_irqsave(&stats->syncp);
717 u64_stats_inc(&stats->cnt);
718 u64_stats_add(&stats->nsecs, duration);
719 u64_stats_update_end_irqrestore(&stats->syncp, flags);
720 }
721 } else {
722 ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
723 }
724 return ret;
725}
726
727static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
728{
729 return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
730}
731
732/*
733 * Use in preemptible and therefore migratable context to make sure that
734 * the execution of the BPF program runs on one CPU.
735 *
736 * This uses migrate_disable/enable() explicitly to document that the
737 * invocation of a BPF program does not require reentrancy protection
738 * against a BPF program which is invoked from a preempting task.
739 */
740static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
741 const void *ctx)
742{
743 u32 ret;
744
745 migrate_disable();
746 ret = bpf_prog_run(prog, ctx);
747 migrate_enable();
748 return ret;
749}
750
751#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
752
753struct bpf_skb_data_end {
754 struct qdisc_skb_cb qdisc_cb;
755 void *data_meta;
756 void *data_end;
757};
758
759struct bpf_nh_params {
760 u32 nh_family;
761 union {
762 u32 ipv4_nh;
763 struct in6_addr ipv6_nh;
764 };
765};
766
767/* flags for bpf_redirect_info kern_flags */
768#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
769#define BPF_RI_F_RI_INIT BIT(1)
770#define BPF_RI_F_CPU_MAP_INIT BIT(2)
771#define BPF_RI_F_DEV_MAP_INIT BIT(3)
772#define BPF_RI_F_XSK_MAP_INIT BIT(4)
773
774struct bpf_redirect_info {
775 u64 tgt_index;
776 void *tgt_value;
777 struct bpf_map *map;
778 u32 flags;
779 u32 map_id;
780 enum bpf_map_type map_type;
781 struct bpf_nh_params nh;
782 u32 kern_flags;
783};
784
785struct bpf_net_context {
786 struct bpf_redirect_info ri;
787 struct list_head cpu_map_flush_list;
788 struct list_head dev_map_flush_list;
789 struct list_head xskmap_map_flush_list;
790};
791
792static inline struct bpf_net_context *bpf_net_ctx_set(struct bpf_net_context *bpf_net_ctx)
793{
794 struct task_struct *tsk = current;
795
796 if (tsk->bpf_net_context != NULL)
797 return NULL;
798 bpf_net_ctx->ri.kern_flags = 0;
799
800 tsk->bpf_net_context = bpf_net_ctx;
801 return bpf_net_ctx;
802}
803
804static inline void bpf_net_ctx_clear(struct bpf_net_context *bpf_net_ctx)
805{
806 if (bpf_net_ctx)
807 current->bpf_net_context = NULL;
808}
809
810static inline struct bpf_net_context *bpf_net_ctx_get(void)
811{
812 return current->bpf_net_context;
813}
814
815static inline struct bpf_redirect_info *bpf_net_ctx_get_ri(void)
816{
817 struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
818
819 if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_RI_INIT)) {
820 memset(&bpf_net_ctx->ri, 0, offsetof(struct bpf_net_context, ri.nh));
821 bpf_net_ctx->ri.kern_flags |= BPF_RI_F_RI_INIT;
822 }
823
824 return &bpf_net_ctx->ri;
825}
826
827static inline struct list_head *bpf_net_ctx_get_cpu_map_flush_list(void)
828{
829 struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
830
831 if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_CPU_MAP_INIT)) {
832 INIT_LIST_HEAD(&bpf_net_ctx->cpu_map_flush_list);
833 bpf_net_ctx->ri.kern_flags |= BPF_RI_F_CPU_MAP_INIT;
834 }
835
836 return &bpf_net_ctx->cpu_map_flush_list;
837}
838
839static inline struct list_head *bpf_net_ctx_get_dev_flush_list(void)
840{
841 struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
842
843 if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_DEV_MAP_INIT)) {
844 INIT_LIST_HEAD(&bpf_net_ctx->dev_map_flush_list);
845 bpf_net_ctx->ri.kern_flags |= BPF_RI_F_DEV_MAP_INIT;
846 }
847
848 return &bpf_net_ctx->dev_map_flush_list;
849}
850
851static inline struct list_head *bpf_net_ctx_get_xskmap_flush_list(void)
852{
853 struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
854
855 if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_XSK_MAP_INIT)) {
856 INIT_LIST_HEAD(&bpf_net_ctx->xskmap_map_flush_list);
857 bpf_net_ctx->ri.kern_flags |= BPF_RI_F_XSK_MAP_INIT;
858 }
859
860 return &bpf_net_ctx->xskmap_map_flush_list;
861}
862
863static inline void bpf_net_ctx_get_all_used_flush_lists(struct list_head **lh_map,
864 struct list_head **lh_dev,
865 struct list_head **lh_xsk)
866{
867 struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
868 u32 kern_flags = bpf_net_ctx->ri.kern_flags;
869 struct list_head *lh;
870
871 *lh_map = *lh_dev = *lh_xsk = NULL;
872
873 if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
874 return;
875
876 lh = &bpf_net_ctx->dev_map_flush_list;
877 if (kern_flags & BPF_RI_F_DEV_MAP_INIT && !list_empty(lh))
878 *lh_dev = lh;
879
880 lh = &bpf_net_ctx->cpu_map_flush_list;
881 if (kern_flags & BPF_RI_F_CPU_MAP_INIT && !list_empty(lh))
882 *lh_map = lh;
883
884 lh = &bpf_net_ctx->xskmap_map_flush_list;
885 if (IS_ENABLED(CONFIG_XDP_SOCKETS) &&
886 kern_flags & BPF_RI_F_XSK_MAP_INIT && !list_empty(lh))
887 *lh_xsk = lh;
888}
889
890/* Compute the linear packet data range [data, data_end) which
891 * will be accessed by various program types (cls_bpf, act_bpf,
892 * lwt, ...). Subsystems allowing direct data access must (!)
893 * ensure that cb[] area can be written to when BPF program is
894 * invoked (otherwise cb[] save/restore is necessary).
895 */
896static inline void bpf_compute_data_pointers(struct sk_buff *skb)
897{
898 struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
899
900 BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
901 cb->data_meta = skb->data - skb_metadata_len(skb);
902 cb->data_end = skb->data + skb_headlen(skb);
903}
904
905static inline int bpf_prog_run_data_pointers(
906 const struct bpf_prog *prog,
907 struct sk_buff *skb)
908{
909 struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
910 void *save_data_meta, *save_data_end;
911 int res;
912
913 save_data_meta = cb->data_meta;
914 save_data_end = cb->data_end;
915
916 bpf_compute_data_pointers(skb);
917 res = bpf_prog_run(prog, skb);
918
919 cb->data_meta = save_data_meta;
920 cb->data_end = save_data_end;
921
922 return res;
923}
924
925/* Similar to bpf_compute_data_pointers(), except that save orginal
926 * data in cb->data and cb->meta_data for restore.
927 */
928static inline void bpf_compute_and_save_data_end(
929 struct sk_buff *skb, void **saved_data_end)
930{
931 struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
932
933 *saved_data_end = cb->data_end;
934 cb->data_end = skb->data + skb_headlen(skb);
935}
936
937/* Restore data saved by bpf_compute_and_save_data_end(). */
938static inline void bpf_restore_data_end(
939 struct sk_buff *skb, void *saved_data_end)
940{
941 struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
942
943 cb->data_end = saved_data_end;
944}
945
946static inline u8 *bpf_skb_cb(const struct sk_buff *skb)
947{
948 /* eBPF programs may read/write skb->cb[] area to transfer meta
949 * data between tail calls. Since this also needs to work with
950 * tc, that scratch memory is mapped to qdisc_skb_cb's data area.
951 *
952 * In some socket filter cases, the cb unfortunately needs to be
953 * saved/restored so that protocol specific skb->cb[] data won't
954 * be lost. In any case, due to unpriviledged eBPF programs
955 * attached to sockets, we need to clear the bpf_skb_cb() area
956 * to not leak previous contents to user space.
957 */
958 BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
959 BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) !=
960 sizeof_field(struct qdisc_skb_cb, data));
961
962 return qdisc_skb_cb(skb)->data;
963}
964
965/* Must be invoked with migration disabled */
966static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
967 const void *ctx)
968{
969 const struct sk_buff *skb = ctx;
970 u8 *cb_data = bpf_skb_cb(skb);
971 u8 cb_saved[BPF_SKB_CB_LEN];
972 u32 res;
973
974 if (unlikely(prog->cb_access)) {
975 memcpy(cb_saved, cb_data, sizeof(cb_saved));
976 memset(cb_data, 0, sizeof(cb_saved));
977 }
978
979 res = bpf_prog_run(prog, skb);
980
981 if (unlikely(prog->cb_access))
982 memcpy(cb_data, cb_saved, sizeof(cb_saved));
983
984 return res;
985}
986
987static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
988 struct sk_buff *skb)
989{
990 u32 res;
991
992 migrate_disable();
993 res = __bpf_prog_run_save_cb(prog, skb);
994 migrate_enable();
995 return res;
996}
997
998static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
999 struct sk_buff *skb)
1000{
1001 u8 *cb_data = bpf_skb_cb(skb);
1002 u32 res;
1003
1004 if (unlikely(prog->cb_access))
1005 memset(cb_data, 0, BPF_SKB_CB_LEN);
1006
1007 res = bpf_prog_run_pin_on_cpu(prog, skb);
1008 return res;
1009}
1010
1011DECLARE_BPF_DISPATCHER(xdp)
1012
1013DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
1014
1015u32 xdp_master_redirect(struct xdp_buff *xdp);
1016
1017void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
1018
1019static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
1020{
1021 return prog->len * sizeof(struct bpf_insn);
1022}
1023
1024static inline unsigned int bpf_prog_size(unsigned int proglen)
1025{
1026 return max(sizeof(struct bpf_prog),
1027 offsetof(struct bpf_prog, insns[proglen]));
1028}
1029
1030static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
1031{
1032 /* When classic BPF programs have been loaded and the arch
1033 * does not have a classic BPF JIT (anymore), they have been
1034 * converted via bpf_migrate_filter() to eBPF and thus always
1035 * have an unspec program type.
1036 */
1037 return prog->type == BPF_PROG_TYPE_UNSPEC;
1038}
1039
1040static inline u32 bpf_ctx_off_adjust_machine(u32 size)
1041{
1042 const u32 size_machine = sizeof(unsigned long);
1043
1044 if (size > size_machine && size % size_machine == 0)
1045 size = size_machine;
1046
1047 return size;
1048}
1049
1050static inline bool
1051bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
1052{
1053 return size <= size_default && (size & (size - 1)) == 0;
1054}
1055
1056static inline u8
1057bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default)
1058{
1059 u8 access_off = off & (size_default - 1);
1060
1061#ifdef __LITTLE_ENDIAN
1062 return access_off;
1063#else
1064 return size_default - (access_off + size);
1065#endif
1066}
1067
1068#define bpf_ctx_wide_access_ok(off, size, type, field) \
1069 (size == sizeof(__u64) && \
1070 off >= offsetof(type, field) && \
1071 off + sizeof(__u64) <= offsetofend(type, field) && \
1072 off % sizeof(__u64) == 0)
1073
1074#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
1075
1076static inline int __must_check bpf_prog_lock_ro(struct bpf_prog *fp)
1077{
1078#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1079 if (!fp->jited) {
1080 set_vm_flush_reset_perms(fp);
1081 return set_memory_ro((unsigned long)fp, fp->pages);
1082 }
1083#endif
1084 return 0;
1085}
1086
1087static inline int __must_check
1088bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
1089{
1090 set_vm_flush_reset_perms(hdr);
1091 return set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
1092}
1093
1094enum skb_drop_reason
1095sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
1096
1097static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
1098{
1099 enum skb_drop_reason drop_reason;
1100
1101 drop_reason = sk_filter_trim_cap(sk, skb, 1);
1102 return drop_reason ? -EPERM : 0;
1103}
1104
1105static inline enum skb_drop_reason
1106sk_filter_reason(struct sock *sk, struct sk_buff *skb)
1107{
1108 return sk_filter_trim_cap(sk, skb, 1);
1109}
1110
1111struct bpf_prog *__bpf_prog_select_runtime(struct bpf_verifier_env *env, struct bpf_prog *fp,
1112 int *err);
1113struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
1114void bpf_prog_free(struct bpf_prog *fp);
1115
1116bool bpf_opcode_in_insntable(u8 code);
1117
1118void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
1119 const u32 *insn_to_jit_off);
1120int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog);
1121void bpf_prog_jit_attempt_done(struct bpf_prog *prog);
1122
1123struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
1124struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags);
1125struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
1126 gfp_t gfp_extra_flags);
1127void __bpf_prog_free(struct bpf_prog *fp);
1128
1129static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
1130{
1131 __bpf_prog_free(fp);
1132}
1133
1134typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
1135 unsigned int flen);
1136
1137int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
1138int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
1139 bpf_aux_classic_check_t trans, bool save_orig);
1140void bpf_prog_destroy(struct bpf_prog *fp);
1141
1142int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
1143int sk_attach_bpf(u32 ufd, struct sock *sk);
1144int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
1145int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
1146void sk_reuseport_prog_free(struct bpf_prog *prog);
1147int sk_detach_filter(struct sock *sk);
1148int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len);
1149
1150bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
1151void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
1152
1153u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
1154#define __bpf_call_base_args \
1155 ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
1156 (void *)__bpf_call_base)
1157
1158struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog);
1159void bpf_jit_compile(struct bpf_prog *prog);
1160bool bpf_jit_needs_zext(void);
1161bool bpf_jit_inlines_helper_call(s32 imm);
1162bool bpf_jit_supports_subprog_tailcalls(void);
1163bool bpf_jit_supports_percpu_insn(void);
1164bool bpf_jit_supports_kfunc_call(void);
1165bool bpf_jit_supports_far_kfunc_call(void);
1166bool bpf_jit_supports_exceptions(void);
1167bool bpf_jit_supports_ptr_xchg(void);
1168bool bpf_jit_supports_arena(void);
1169bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
1170bool bpf_jit_supports_private_stack(void);
1171bool bpf_jit_supports_timed_may_goto(void);
1172bool bpf_jit_supports_fsession(void);
1173u64 bpf_arch_uaddress_limit(void);
1174void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
1175u64 arch_bpf_timed_may_goto(void);
1176u64 bpf_check_timed_may_goto(struct bpf_timed_may_goto *);
1177bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id);
1178
1179static inline bool bpf_dump_raw_ok(const struct cred *cred)
1180{
1181 /* Reconstruction of call-sites is dependent on kallsyms,
1182 * thus make dump the same restriction.
1183 */
1184 return kallsyms_show_value(cred);
1185}
1186
1187struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
1188 const struct bpf_insn *patch, u32 len);
1189
1190#ifdef CONFIG_BPF_SYSCALL
1191struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
1192 const struct bpf_insn *patch, u32 len);
1193struct bpf_insn_aux_data *bpf_dup_insn_aux_data(struct bpf_verifier_env *env);
1194void bpf_restore_insn_aux_data(struct bpf_verifier_env *env,
1195 struct bpf_insn_aux_data *orig_insn_aux);
1196#else
1197static inline struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
1198 const struct bpf_insn *patch, u32 len)
1199{
1200 return ERR_PTR(-ENOTSUPP);
1201}
1202
1203static inline struct bpf_insn_aux_data *bpf_dup_insn_aux_data(struct bpf_verifier_env *env)
1204{
1205 return NULL;
1206}
1207
1208static inline void bpf_restore_insn_aux_data(struct bpf_verifier_env *env,
1209 struct bpf_insn_aux_data *orig_insn_aux)
1210{
1211}
1212#endif /* CONFIG_BPF_SYSCALL */
1213
1214int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
1215
1216static inline bool xdp_return_frame_no_direct(void)
1217{
1218 struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
1219
1220 return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
1221}
1222
1223static inline void xdp_set_return_frame_no_direct(void)
1224{
1225 struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
1226
1227 ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
1228}
1229
1230static inline void xdp_clear_return_frame_no_direct(void)
1231{
1232 struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
1233
1234 ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
1235}
1236
1237static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
1238 unsigned int pktlen)
1239{
1240 unsigned int len;
1241
1242 if (unlikely(!(fwd->flags & IFF_UP)))
1243 return -ENETDOWN;
1244
1245 len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
1246 if (pktlen > len)
1247 return -EMSGSIZE;
1248
1249 return 0;
1250}
1251
1252/* The pair of xdp_do_redirect and xdp_do_flush MUST be called in the
1253 * same cpu context. Further for best results no more than a single map
1254 * for the do_redirect/do_flush pair should be used. This limitation is
1255 * because we only track one map and force a flush when the map changes.
1256 * This does not appear to be a real limitation for existing software.
1257 */
1258int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
1259 struct xdp_buff *xdp, const struct bpf_prog *prog);
1260int xdp_do_redirect(struct net_device *dev,
1261 struct xdp_buff *xdp,
1262 const struct bpf_prog *prog);
1263int xdp_do_redirect_frame(struct net_device *dev,
1264 struct xdp_buff *xdp,
1265 struct xdp_frame *xdpf,
1266 const struct bpf_prog *prog);
1267void xdp_do_flush(void);
1268
1269void bpf_warn_invalid_xdp_action(const struct net_device *dev,
1270 const struct bpf_prog *prog, u32 act);
1271
1272#ifdef CONFIG_INET
1273struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
1274 struct bpf_prog *prog, struct sk_buff *skb,
1275 struct sock *migrating_sk,
1276 u32 hash);
1277#else
1278static inline struct sock *
1279bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
1280 struct bpf_prog *prog, struct sk_buff *skb,
1281 struct sock *migrating_sk,
1282 u32 hash)
1283{
1284 return NULL;
1285}
1286#endif
1287
1288#ifdef CONFIG_BPF_JIT
1289extern int bpf_jit_enable;
1290extern int bpf_jit_harden;
1291extern int bpf_jit_kallsyms;
1292extern long bpf_jit_limit;
1293extern long bpf_jit_limit_max;
1294
1295typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
1296
1297void bpf_jit_fill_hole_with_zero(void *area, unsigned int size);
1298
1299struct bpf_binary_header *
1300bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1301 unsigned int alignment,
1302 bpf_jit_fill_hole_t bpf_fill_ill_insns);
1303void bpf_jit_binary_free(struct bpf_binary_header *hdr);
1304u64 bpf_jit_alloc_exec_limit(void);
1305void *bpf_jit_alloc_exec(unsigned long size);
1306void bpf_jit_free_exec(void *addr);
1307void bpf_jit_free(struct bpf_prog *fp);
1308struct bpf_binary_header *
1309bpf_jit_binary_pack_hdr(const struct bpf_prog *fp);
1310
1311void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns);
1312void bpf_prog_pack_free(void *ptr, u32 size);
1313
1314static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
1315{
1316 return list_empty(&fp->aux->ksym.lnode) ||
1317 fp->aux->ksym.lnode.prev == LIST_POISON2;
1318}
1319
1320struct bpf_binary_header *
1321bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image,
1322 unsigned int alignment,
1323 struct bpf_binary_header **rw_hdr,
1324 u8 **rw_image,
1325 bpf_jit_fill_hole_t bpf_fill_ill_insns);
1326int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header,
1327 struct bpf_binary_header *rw_header);
1328void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1329 struct bpf_binary_header *rw_header);
1330
1331int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
1332 struct bpf_jit_poke_descriptor *poke);
1333
1334int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1335 const struct bpf_insn *insn, bool extra_pass,
1336 u64 *func_addr, bool *func_addr_fixed);
1337
1338const char *bpf_jit_get_prog_name(struct bpf_prog *prog);
1339
1340struct bpf_prog *bpf_jit_blind_constants(struct bpf_verifier_env *env, struct bpf_prog *prog);
1341void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
1342
1343static inline bool bpf_prog_need_blind(const struct bpf_prog *prog)
1344{
1345 return prog->blinding_requested && !prog->blinded;
1346}
1347
1348static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
1349 u32 pass, void *image)
1350{
1351 pr_err("flen=%u proglen=%u pass=%u image=%p from=%s pid=%d\n", flen,
1352 proglen, pass, image, current->comm, task_pid_nr(current));
1353
1354 if (image)
1355 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
1356 16, 1, image, proglen, false);
1357}
1358
1359static inline bool bpf_jit_is_ebpf(void)
1360{
1361# ifdef CONFIG_HAVE_EBPF_JIT
1362 return true;
1363# else
1364 return false;
1365# endif
1366}
1367
1368static inline bool ebpf_jit_enabled(void)
1369{
1370 return bpf_jit_enable && bpf_jit_is_ebpf();
1371}
1372
1373static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
1374{
1375 return fp->jited && bpf_jit_is_ebpf();
1376}
1377
1378static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
1379{
1380 /* These are the prerequisites, should someone ever have the
1381 * idea to call blinding outside of them, we make sure to
1382 * bail out.
1383 */
1384 if (!bpf_jit_is_ebpf())
1385 return false;
1386 if (!prog->jit_requested)
1387 return false;
1388 if (!bpf_jit_harden)
1389 return false;
1390 if (bpf_jit_harden == 1 && bpf_token_capable(prog->aux->token, CAP_BPF))
1391 return false;
1392
1393 return true;
1394}
1395
1396static inline bool bpf_jit_kallsyms_enabled(void)
1397{
1398 /* There are a couple of corner cases where kallsyms should
1399 * not be enabled f.e. on hardening.
1400 */
1401 if (bpf_jit_harden)
1402 return false;
1403 if (!bpf_jit_kallsyms)
1404 return false;
1405 if (bpf_jit_kallsyms == 1)
1406 return true;
1407
1408 return false;
1409}
1410
1411int bpf_address_lookup(unsigned long addr, unsigned long *size,
1412 unsigned long *off, char *sym);
1413bool is_bpf_text_address(unsigned long addr);
1414int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
1415 char *sym);
1416struct bpf_prog *bpf_prog_ksym_find(unsigned long addr);
1417
1418void bpf_prog_kallsyms_add(struct bpf_prog *fp);
1419void bpf_prog_kallsyms_del(struct bpf_prog *fp);
1420
1421#else /* CONFIG_BPF_JIT */
1422
1423static inline bool ebpf_jit_enabled(void)
1424{
1425 return false;
1426}
1427
1428static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
1429{
1430 return false;
1431}
1432
1433static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
1434{
1435 return false;
1436}
1437
1438static inline int
1439bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
1440 struct bpf_jit_poke_descriptor *poke)
1441{
1442 return -ENOTSUPP;
1443}
1444
1445static inline void bpf_jit_free(struct bpf_prog *fp)
1446{
1447 bpf_prog_unlock_free(fp);
1448}
1449
1450static inline bool bpf_jit_kallsyms_enabled(void)
1451{
1452 return false;
1453}
1454
1455static inline int
1456bpf_address_lookup(unsigned long addr, unsigned long *size,
1457 unsigned long *off, char *sym)
1458{
1459 return 0;
1460}
1461
1462static inline bool is_bpf_text_address(unsigned long addr)
1463{
1464 return false;
1465}
1466
1467static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
1468 char *type, char *sym)
1469{
1470 return -ERANGE;
1471}
1472
1473static inline struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
1474{
1475 return NULL;
1476}
1477
1478static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
1479{
1480}
1481
1482static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
1483{
1484}
1485
1486static inline bool bpf_prog_need_blind(const struct bpf_prog *prog)
1487{
1488 return false;
1489}
1490
1491static inline
1492struct bpf_prog *bpf_jit_blind_constants(struct bpf_verifier_env *env, struct bpf_prog *prog)
1493{
1494 return prog;
1495}
1496
1497static inline void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1498{
1499}
1500#endif /* CONFIG_BPF_JIT */
1501
1502void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
1503
1504#define BPF_ANC BIT(15)
1505
1506static inline bool bpf_needs_clear_a(const struct sock_filter *first)
1507{
1508 switch (first->code) {
1509 case BPF_RET | BPF_K:
1510 case BPF_LD | BPF_W | BPF_LEN:
1511 return false;
1512
1513 case BPF_LD | BPF_W | BPF_ABS:
1514 case BPF_LD | BPF_H | BPF_ABS:
1515 case BPF_LD | BPF_B | BPF_ABS:
1516 if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X)
1517 return true;
1518 return false;
1519
1520 default:
1521 return true;
1522 }
1523}
1524
1525static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
1526{
1527 BUG_ON(ftest->code & BPF_ANC);
1528
1529 switch (ftest->code) {
1530 case BPF_LD | BPF_W | BPF_ABS:
1531 case BPF_LD | BPF_H | BPF_ABS:
1532 case BPF_LD | BPF_B | BPF_ABS:
1533#define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
1534 return BPF_ANC | SKF_AD_##CODE
1535 switch (ftest->k) {
1536 BPF_ANCILLARY(PROTOCOL);
1537 BPF_ANCILLARY(PKTTYPE);
1538 BPF_ANCILLARY(IFINDEX);
1539 BPF_ANCILLARY(NLATTR);
1540 BPF_ANCILLARY(NLATTR_NEST);
1541 BPF_ANCILLARY(MARK);
1542 BPF_ANCILLARY(QUEUE);
1543 BPF_ANCILLARY(HATYPE);
1544 BPF_ANCILLARY(RXHASH);
1545 BPF_ANCILLARY(CPU);
1546 BPF_ANCILLARY(ALU_XOR_X);
1547 BPF_ANCILLARY(VLAN_TAG);
1548 BPF_ANCILLARY(VLAN_TAG_PRESENT);
1549 BPF_ANCILLARY(PAY_OFFSET);
1550 BPF_ANCILLARY(RANDOM);
1551 BPF_ANCILLARY(VLAN_TPID);
1552 }
1553 fallthrough;
1554 default:
1555 return ftest->code;
1556 }
1557}
1558
1559void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
1560 int k, unsigned int size);
1561
1562static inline int bpf_tell_extensions(void)
1563{
1564 return SKF_AD_MAX;
1565}
1566
1567struct bpf_sock_addr_kern {
1568 struct sock *sk;
1569 struct sockaddr_unsized *uaddr;
1570 /* Temporary "register" to make indirect stores to nested structures
1571 * defined above. We need three registers to make such a store, but
1572 * only two (src and dst) are available at convert_ctx_access time
1573 */
1574 u64 tmp_reg;
1575 void *t_ctx; /* Attach type specific context. */
1576 u32 uaddrlen;
1577};
1578
1579struct bpf_sock_ops_kern {
1580 struct sock *sk;
1581 union {
1582 u32 args[4];
1583 u32 reply;
1584 u32 replylong[4];
1585 };
1586 struct sk_buff *syn_skb;
1587 struct sk_buff *skb;
1588 void *skb_data_end;
1589 u8 op;
1590 u8 is_fullsock;
1591 u8 is_locked_tcp_sock;
1592 u8 remaining_opt_len;
1593 u64 temp; /* temp and everything after is not
1594 * initialized to 0 before calling
1595 * the BPF program. New fields that
1596 * should be initialized to 0 should
1597 * be inserted before temp.
1598 * temp is scratch storage used by
1599 * sock_ops_convert_ctx_access
1600 * as temporary storage of a register.
1601 */
1602};
1603
1604struct bpf_sysctl_kern {
1605 struct ctl_table_header *head;
1606 const struct ctl_table *table;
1607 void *cur_val;
1608 size_t cur_len;
1609 void *new_val;
1610 size_t new_len;
1611 int new_updated;
1612 int write;
1613 loff_t *ppos;
1614 /* Temporary "register" for indirect stores to ppos. */
1615 u64 tmp_reg;
1616};
1617
1618#define BPF_SOCKOPT_KERN_BUF_SIZE 32
1619struct bpf_sockopt_buf {
1620 u8 data[BPF_SOCKOPT_KERN_BUF_SIZE];
1621};
1622
1623struct bpf_sockopt_kern {
1624 struct sock *sk;
1625 u8 *optval;
1626 u8 *optval_end;
1627 s32 level;
1628 s32 optname;
1629 s32 optlen;
1630 /* for retval in struct bpf_cg_run_ctx */
1631 struct task_struct *current_task;
1632 /* Temporary "register" for indirect stores to ppos. */
1633 u64 tmp_reg;
1634};
1635
1636int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len);
1637
1638struct bpf_sk_lookup_kern {
1639 u16 family;
1640 u16 protocol;
1641 __be16 sport;
1642 u16 dport;
1643 struct {
1644 __be32 saddr;
1645 __be32 daddr;
1646 } v4;
1647 struct {
1648 const struct in6_addr *saddr;
1649 const struct in6_addr *daddr;
1650 } v6;
1651 struct sock *selected_sk;
1652 u32 ingress_ifindex;
1653 bool no_reuseport;
1654};
1655
1656extern struct static_key_false bpf_sk_lookup_enabled;
1657
1658/* Runners for BPF_SK_LOOKUP programs to invoke on socket lookup.
1659 *
1660 * Allowed return values for a BPF SK_LOOKUP program are SK_PASS and
1661 * SK_DROP. Their meaning is as follows:
1662 *
1663 * SK_PASS && ctx.selected_sk != NULL: use selected_sk as lookup result
1664 * SK_PASS && ctx.selected_sk == NULL: continue to htable-based socket lookup
1665 * SK_DROP : terminate lookup with -ECONNREFUSED
1666 *
1667 * This macro aggregates return values and selected sockets from
1668 * multiple BPF programs according to following rules in order:
1669 *
1670 * 1. If any program returned SK_PASS and a non-NULL ctx.selected_sk,
1671 * macro result is SK_PASS and last ctx.selected_sk is used.
1672 * 2. If any program returned SK_DROP return value,
1673 * macro result is SK_DROP.
1674 * 3. Otherwise result is SK_PASS and ctx.selected_sk is NULL.
1675 *
1676 * Caller must ensure that the prog array is non-NULL, and that the
1677 * array as well as the programs it contains remain valid.
1678 */
1679#define BPF_PROG_SK_LOOKUP_RUN_ARRAY(array, ctx, func) \
1680 ({ \
1681 struct bpf_sk_lookup_kern *_ctx = &(ctx); \
1682 struct bpf_prog_array_item *_item; \
1683 struct sock *_selected_sk = NULL; \
1684 bool _no_reuseport = false; \
1685 struct bpf_prog *_prog; \
1686 bool _all_pass = true; \
1687 u32 _ret; \
1688 \
1689 migrate_disable(); \
1690 _item = &(array)->items[0]; \
1691 while ((_prog = READ_ONCE(_item->prog))) { \
1692 /* restore most recent selection */ \
1693 _ctx->selected_sk = _selected_sk; \
1694 _ctx->no_reuseport = _no_reuseport; \
1695 \
1696 _ret = func(_prog, _ctx); \
1697 if (_ret == SK_PASS && _ctx->selected_sk) { \
1698 /* remember last non-NULL socket */ \
1699 _selected_sk = _ctx->selected_sk; \
1700 _no_reuseport = _ctx->no_reuseport; \
1701 } else if (_ret == SK_DROP && _all_pass) { \
1702 _all_pass = false; \
1703 } \
1704 _item++; \
1705 } \
1706 _ctx->selected_sk = _selected_sk; \
1707 _ctx->no_reuseport = _no_reuseport; \
1708 migrate_enable(); \
1709 _all_pass || _selected_sk ? SK_PASS : SK_DROP; \
1710 })
1711
1712static inline bool bpf_sk_lookup_run_v4(const struct net *net, int protocol,
1713 const __be32 saddr, const __be16 sport,
1714 const __be32 daddr, const u16 dport,
1715 const int ifindex, struct sock **psk)
1716{
1717 struct bpf_prog_array *run_array;
1718 struct sock *selected_sk = NULL;
1719 bool no_reuseport = false;
1720
1721 rcu_read_lock();
1722 run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]);
1723 if (run_array) {
1724 struct bpf_sk_lookup_kern ctx = {
1725 .family = AF_INET,
1726 .protocol = protocol,
1727 .v4.saddr = saddr,
1728 .v4.daddr = daddr,
1729 .sport = sport,
1730 .dport = dport,
1731 .ingress_ifindex = ifindex,
1732 };
1733 u32 act;
1734
1735 act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
1736 if (act == SK_PASS) {
1737 selected_sk = ctx.selected_sk;
1738 no_reuseport = ctx.no_reuseport;
1739 } else {
1740 selected_sk = ERR_PTR(-ECONNREFUSED);
1741 }
1742 }
1743 rcu_read_unlock();
1744 *psk = selected_sk;
1745 return no_reuseport;
1746}
1747
1748#if IS_ENABLED(CONFIG_IPV6)
1749static inline bool bpf_sk_lookup_run_v6(const struct net *net, int protocol,
1750 const struct in6_addr *saddr,
1751 const __be16 sport,
1752 const struct in6_addr *daddr,
1753 const u16 dport,
1754 const int ifindex, struct sock **psk)
1755{
1756 struct bpf_prog_array *run_array;
1757 struct sock *selected_sk = NULL;
1758 bool no_reuseport = false;
1759
1760 rcu_read_lock();
1761 run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]);
1762 if (run_array) {
1763 struct bpf_sk_lookup_kern ctx = {
1764 .family = AF_INET6,
1765 .protocol = protocol,
1766 .v6.saddr = saddr,
1767 .v6.daddr = daddr,
1768 .sport = sport,
1769 .dport = dport,
1770 .ingress_ifindex = ifindex,
1771 };
1772 u32 act;
1773
1774 act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
1775 if (act == SK_PASS) {
1776 selected_sk = ctx.selected_sk;
1777 no_reuseport = ctx.no_reuseport;
1778 } else {
1779 selected_sk = ERR_PTR(-ECONNREFUSED);
1780 }
1781 }
1782 rcu_read_unlock();
1783 *psk = selected_sk;
1784 return no_reuseport;
1785}
1786#endif /* IS_ENABLED(CONFIG_IPV6) */
1787
1788static __always_inline long __bpf_xdp_redirect_map(struct bpf_map *map, u64 index,
1789 u64 flags, const u64 flag_mask,
1790 void *lookup_elem(struct bpf_map *map, u32 key))
1791{
1792 struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
1793 const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX;
1794
1795 /* Lower bits of the flags are used as return code on lookup failure */
1796 if (unlikely(flags & ~(action_mask | flag_mask)))
1797 return XDP_ABORTED;
1798
1799 ri->tgt_value = lookup_elem(map, index);
1800 if (unlikely(!ri->tgt_value) && !(flags & BPF_F_BROADCAST)) {
1801 /* If the lookup fails we want to clear out the state in the
1802 * redirect_info struct completely, so that if an eBPF program
1803 * performs multiple lookups, the last one always takes
1804 * precedence.
1805 */
1806 ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */
1807 ri->map_type = BPF_MAP_TYPE_UNSPEC;
1808 return flags & action_mask;
1809 }
1810
1811 ri->tgt_index = index;
1812 ri->map_id = map->id;
1813 ri->map_type = map->map_type;
1814
1815 if (flags & BPF_F_BROADCAST) {
1816 WRITE_ONCE(ri->map, map);
1817 ri->flags = flags;
1818 } else {
1819 WRITE_ONCE(ri->map, NULL);
1820 ri->flags = 0;
1821 }
1822
1823 return XDP_REDIRECT;
1824}
1825
1826#ifdef CONFIG_NET
1827int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len);
1828int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from,
1829 u32 len, u64 flags);
1830int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len);
1831int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len);
1832void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len);
1833void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off,
1834 void *buf, unsigned long len, bool flush);
1835int __bpf_skb_meta_store_bytes(struct sk_buff *skb, u32 offset,
1836 const void *from, u32 len, u64 flags);
1837void *bpf_skb_meta_pointer(struct sk_buff *skb, u32 offset);
1838#else /* CONFIG_NET */
1839static inline int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset,
1840 void *to, u32 len)
1841{
1842 return -EOPNOTSUPP;
1843}
1844
1845static inline int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset,
1846 const void *from, u32 len, u64 flags)
1847{
1848 return -EOPNOTSUPP;
1849}
1850
1851static inline int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset,
1852 void *buf, u32 len)
1853{
1854 return -EOPNOTSUPP;
1855}
1856
1857static inline int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset,
1858 void *buf, u32 len)
1859{
1860 return -EOPNOTSUPP;
1861}
1862
1863static inline void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len)
1864{
1865 return NULL;
1866}
1867
1868static inline void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, void *buf,
1869 unsigned long len, bool flush)
1870{
1871}
1872
1873static inline int __bpf_skb_meta_store_bytes(struct sk_buff *skb, u32 offset,
1874 const void *from, u32 len,
1875 u64 flags)
1876{
1877 return -EOPNOTSUPP;
1878}
1879
1880static inline void *bpf_skb_meta_pointer(struct sk_buff *skb, u32 offset)
1881{
1882 return ERR_PTR(-EOPNOTSUPP);
1883}
1884#endif /* CONFIG_NET */
1885
1886#endif /* __LINUX_FILTER_H__ */