Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
2/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3#pragma once
4
5#ifndef PAGE_SIZE
6#define PAGE_SIZE __PAGE_SIZE
7/*
8 * for older kernels try sizeof(struct genradix_node)
9 * or flexible:
10 * static inline long __bpf_page_size(void) {
11 * return bpf_core_enum_value(enum page_size_enum___l, __PAGE_SIZE___l) ?: sizeof(struct genradix_node);
12 * }
13 * but generated code is not great.
14 */
15#endif
16
17#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) && !defined(BPF_ARENA_FORCE_ASM)
18#ifndef __arena
19#define __arena __attribute__((address_space(1)))
20#endif
21#define __arena_global __attribute__((address_space(1)))
22#define cast_kern(ptr) /* nop for bpf prog. emitted by LLVM */
23#define cast_user(ptr) /* nop for bpf prog. emitted by LLVM */
24#else
25
26/* emit instruction:
27 * rX = rX .off = BPF_ADDR_SPACE_CAST .imm32 = (dst_as << 16) | src_as
28 *
29 * This is a workaround for LLVM compiler versions without
30 * __BPF_FEATURE_ADDR_SPACE_CAST that do not automatically cast between arena
31 * pointers and native kernel/userspace ones. In this case we explicitly do so
32 * with cast_kern() and cast_user(). E.g., in the Linux kernel tree,
33 * tools/testing/selftests/bpf includes tests that use these macros to implement
34 * linked lists and hashtables backed by arena memory. In sched_ext, we use
35 * cast_kern() and cast_user() for compatibility with older LLVM toolchains.
36 */
37#ifndef bpf_addr_space_cast
38#define bpf_addr_space_cast(var, dst_as, src_as)\
39 asm volatile(".byte 0xBF; \
40 .ifc %[reg], r0; \
41 .byte 0x00; \
42 .endif; \
43 .ifc %[reg], r1; \
44 .byte 0x11; \
45 .endif; \
46 .ifc %[reg], r2; \
47 .byte 0x22; \
48 .endif; \
49 .ifc %[reg], r3; \
50 .byte 0x33; \
51 .endif; \
52 .ifc %[reg], r4; \
53 .byte 0x44; \
54 .endif; \
55 .ifc %[reg], r5; \
56 .byte 0x55; \
57 .endif; \
58 .ifc %[reg], r6; \
59 .byte 0x66; \
60 .endif; \
61 .ifc %[reg], r7; \
62 .byte 0x77; \
63 .endif; \
64 .ifc %[reg], r8; \
65 .byte 0x88; \
66 .endif; \
67 .ifc %[reg], r9; \
68 .byte 0x99; \
69 .endif; \
70 .short %[off]; \
71 .long %[as]" \
72 : [reg]"+r"(var) \
73 : [off]"i"(BPF_ADDR_SPACE_CAST) \
74 , [as]"i"((dst_as << 16) | src_as));
75#endif
76
77#define __arena
78#define __arena_global SEC(".addr_space.1")
79#define cast_kern(ptr) bpf_addr_space_cast(ptr, 0, 1)
80#define cast_user(ptr) bpf_addr_space_cast(ptr, 1, 0)
81#endif
82
83void __arena* bpf_arena_alloc_pages(void *map, void __arena *addr, __u32 page_cnt,
84 int node_id, __u64 flags) __ksym __weak;
85void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak;
86int bpf_arena_reserve_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak;
87
88/*
89 * Note that cond_break can only be portably used in the body of a breakable
90 * construct, whereas can_loop can be used anywhere.
91 */
92#ifdef SCX_BPF_UNITTEST
93#define can_loop true
94#define __cond_break(expr) expr
95#else
96#ifdef __BPF_FEATURE_MAY_GOTO
97#define can_loop \
98 ({ __label__ l_break, l_continue; \
99 bool ret = true; \
100 asm volatile goto("may_goto %l[l_break]" \
101 :::: l_break); \
102 goto l_continue; \
103 l_break: ret = false; \
104 l_continue:; \
105 ret; \
106 })
107
108#define __cond_break(expr) \
109 ({ __label__ l_break, l_continue; \
110 asm volatile goto("may_goto %l[l_break]" \
111 :::: l_break); \
112 goto l_continue; \
113 l_break: expr; \
114 l_continue:; \
115 })
116#else
117#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
118#define can_loop \
119 ({ __label__ l_break, l_continue; \
120 bool ret = true; \
121 asm volatile goto("1:.byte 0xe5; \
122 .byte 0; \
123 .long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \
124 .short 0" \
125 :::: l_break); \
126 goto l_continue; \
127 l_break: ret = false; \
128 l_continue:; \
129 ret; \
130 })
131
132#define __cond_break(expr) \
133 ({ __label__ l_break, l_continue; \
134 asm volatile goto("1:.byte 0xe5; \
135 .byte 0; \
136 .long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \
137 .short 0" \
138 :::: l_break); \
139 goto l_continue; \
140 l_break: expr; \
141 l_continue:; \
142 })
143#else
144#define can_loop \
145 ({ __label__ l_break, l_continue; \
146 bool ret = true; \
147 asm volatile goto("1:.byte 0xe5; \
148 .byte 0; \
149 .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \
150 .short 0" \
151 :::: l_break); \
152 goto l_continue; \
153 l_break: ret = false; \
154 l_continue:; \
155 ret; \
156 })
157
158#define __cond_break(expr) \
159 ({ __label__ l_break, l_continue; \
160 asm volatile goto("1:.byte 0xe5; \
161 .byte 0; \
162 .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \
163 .short 0" \
164 :::: l_break); \
165 goto l_continue; \
166 l_break: expr; \
167 l_continue:; \
168 })
169#endif /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ */
170#endif /* __BPF_FEATURE_MAY_GOTO */
171#endif /* SCX_BPF_UNITTEST */
172
173#define cond_break __cond_break(break)
174#define cond_break_label(label) __cond_break(goto label)
175
176
177void bpf_preempt_disable(void) __weak __ksym;
178void bpf_preempt_enable(void) __weak __ksym;
179ssize_t bpf_arena_mapping_nr_pages(void *p__map) __weak __ksym;