Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <vmlinux.h>
3#include <bpf/bpf_tracing.h>
4#include <bpf/bpf_helpers.h>
5#include <bpf/bpf_core_read.h>
6#include <bpf/bpf_endian.h>
7#include "bpf_misc.h"
8#include "bpf_experimental.h"
9
10#ifndef ETH_P_IP
11#define ETH_P_IP 0x0800
12#endif
13
14struct {
15 __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
16 __uint(max_entries, 4);
17 __uint(key_size, sizeof(__u32));
18 __uint(value_size, sizeof(__u32));
19} jmp_table SEC(".maps");
20
21static __noinline int static_func(u64 i)
22{
23 bpf_throw(32);
24 return i;
25}
26
27__noinline int global2static_simple(u64 i)
28{
29 static_func(i + 2);
30 return i - 1;
31}
32
33__noinline int global2static(u64 i)
34{
35 if (i == ETH_P_IP)
36 bpf_throw(16);
37 return static_func(i);
38}
39
40static __noinline int static2global(u64 i)
41{
42 return global2static(i) + i;
43}
44
45SEC("tc")
46int exception_throw_always_1(struct __sk_buff *ctx)
47{
48 bpf_throw(64);
49 return 0;
50}
51
52/* In this case, the global func will never be seen executing after call to
53 * static subprog, hence verifier will DCE the remaining instructions. Ensure we
54 * are resilient to that.
55 */
56SEC("tc")
57int exception_throw_always_2(struct __sk_buff *ctx)
58{
59 return global2static_simple(ctx->protocol);
60}
61
62SEC("tc")
63int exception_throw_unwind_1(struct __sk_buff *ctx)
64{
65 return static2global(bpf_ntohs(ctx->protocol));
66}
67
68SEC("tc")
69int exception_throw_unwind_2(struct __sk_buff *ctx)
70{
71 return static2global(bpf_ntohs(ctx->protocol) - 1);
72}
73
74SEC("tc")
75int exception_throw_default(struct __sk_buff *ctx)
76{
77 bpf_throw(0);
78 return 1;
79}
80
81SEC("tc")
82int exception_throw_default_value(struct __sk_buff *ctx)
83{
84 bpf_throw(5);
85 return 1;
86}
87
88SEC("tc")
89int exception_tail_call_target(struct __sk_buff *ctx)
90{
91 bpf_throw(16);
92 return 0;
93}
94
95static __noinline
96int exception_tail_call_subprog(struct __sk_buff *ctx)
97{
98 volatile int ret = 10;
99
100 bpf_tail_call_static(ctx, &jmp_table, 0);
101 return ret;
102}
103
104SEC("tc")
105int exception_tail_call(struct __sk_buff *ctx) {
106 volatile int ret = 0;
107
108 ret = exception_tail_call_subprog(ctx);
109 return ret + 8;
110}
111
112__weak
113void throw_11(void)
114{
115 bpf_throw(11);
116}
117
118SEC("tc")
119int exception_throw_from_void_global(struct __sk_buff *ctx)
120{
121 throw_11();
122
123 return 0;
124}
125
126__noinline int exception_ext_global(struct __sk_buff *ctx)
127{
128 volatile int ret = 0;
129
130 return ret;
131}
132
133static __noinline int exception_ext_static(struct __sk_buff *ctx)
134{
135 return exception_ext_global(ctx);
136}
137
138SEC("tc")
139int exception_ext(struct __sk_buff *ctx)
140{
141 return exception_ext_static(ctx);
142}
143
144__noinline int exception_cb_mod_global(u64 cookie)
145{
146 volatile int ret = 0;
147
148 return ret;
149}
150
151/* Example of how the exception callback supplied during verification can still
152 * introduce extensions by calling to dummy global functions, and alter runtime
153 * behavior.
154 *
155 * Right now we don't allow freplace attachment to exception callback itself,
156 * but if the need arises this restriction is technically feasible to relax in
157 * the future.
158 */
159__noinline int exception_cb_mod(u64 cookie)
160{
161 return exception_cb_mod_global(cookie) + cookie + 10;
162}
163
164SEC("tc")
165__exception_cb(exception_cb_mod)
166int exception_ext_mod_cb_runtime(struct __sk_buff *ctx)
167{
168 bpf_throw(25);
169 return 0;
170}
171
172__noinline static int subprog(struct __sk_buff *ctx)
173{
174 return bpf_ktime_get_ns();
175}
176
177__noinline static int throwing_subprog(struct __sk_buff *ctx)
178{
179 if (ctx->tstamp)
180 bpf_throw(0);
181 return bpf_ktime_get_ns();
182}
183
184__noinline int global_subprog(struct __sk_buff *ctx)
185{
186 return bpf_ktime_get_ns();
187}
188
189__noinline int throwing_global_subprog(struct __sk_buff *ctx)
190{
191 if (ctx->tstamp)
192 bpf_throw(0);
193 return bpf_ktime_get_ns();
194}
195
196SEC("tc")
197int exception_throw_subprog(struct __sk_buff *ctx)
198{
199 switch (ctx->protocol) {
200 case 1:
201 return subprog(ctx);
202 case 2:
203 return global_subprog(ctx);
204 case 3:
205 return throwing_subprog(ctx);
206 case 4:
207 return throwing_global_subprog(ctx);
208 default:
209 break;
210 }
211 bpf_throw(1);
212 return 0;
213}
214
215__noinline int assert_nz_gfunc(u64 c)
216{
217 volatile u64 cookie = c;
218
219 bpf_assert(cookie != 0);
220 return 0;
221}
222
223__noinline int assert_zero_gfunc(u64 c)
224{
225 volatile u64 cookie = c;
226
227 bpf_assert(bpf_cmp_unlikely(cookie, ==, 0));
228 return 0;
229}
230
231__noinline int assert_neg_gfunc(s64 c)
232{
233 volatile s64 cookie = c;
234
235 bpf_assert(bpf_cmp_unlikely(cookie, <, 0));
236 return 0;
237}
238
239__noinline int assert_pos_gfunc(s64 c)
240{
241 volatile s64 cookie = c;
242
243 bpf_assert(bpf_cmp_unlikely(cookie, >, 0));
244 return 0;
245}
246
247__noinline int assert_negeq_gfunc(s64 c)
248{
249 volatile s64 cookie = c;
250
251 bpf_assert(bpf_cmp_unlikely(cookie, <=, -1));
252 return 0;
253}
254
255__noinline int assert_poseq_gfunc(s64 c)
256{
257 volatile s64 cookie = c;
258
259 bpf_assert(bpf_cmp_unlikely(cookie, >=, 1));
260 return 0;
261}
262
263__noinline int assert_nz_gfunc_with(u64 c)
264{
265 volatile u64 cookie = c;
266
267 bpf_assert_with(cookie != 0, cookie + 100);
268 return 0;
269}
270
271__noinline int assert_zero_gfunc_with(u64 c)
272{
273 volatile u64 cookie = c;
274
275 bpf_assert_with(bpf_cmp_unlikely(cookie, ==, 0), cookie + 100);
276 return 0;
277}
278
279__noinline int assert_neg_gfunc_with(s64 c)
280{
281 volatile s64 cookie = c;
282
283 bpf_assert_with(bpf_cmp_unlikely(cookie, <, 0), cookie + 100);
284 return 0;
285}
286
287__noinline int assert_pos_gfunc_with(s64 c)
288{
289 volatile s64 cookie = c;
290
291 bpf_assert_with(bpf_cmp_unlikely(cookie, >, 0), cookie + 100);
292 return 0;
293}
294
295__noinline int assert_negeq_gfunc_with(s64 c)
296{
297 volatile s64 cookie = c;
298
299 bpf_assert_with(bpf_cmp_unlikely(cookie, <=, -1), cookie + 100);
300 return 0;
301}
302
303__noinline int assert_poseq_gfunc_with(s64 c)
304{
305 volatile s64 cookie = c;
306
307 bpf_assert_with(bpf_cmp_unlikely(cookie, >=, 1), cookie + 100);
308 return 0;
309}
310
311#define check_assert(name, cookie, tag) \
312SEC("tc") \
313int exception##tag##name(struct __sk_buff *ctx) \
314{ \
315 return name(cookie) + 1; \
316}
317
318check_assert(assert_nz_gfunc, 5, _);
319check_assert(assert_zero_gfunc, 0, _);
320check_assert(assert_neg_gfunc, -100, _);
321check_assert(assert_pos_gfunc, 100, _);
322check_assert(assert_negeq_gfunc, -1, _);
323check_assert(assert_poseq_gfunc, 1, _);
324
325check_assert(assert_nz_gfunc_with, 5, _);
326check_assert(assert_zero_gfunc_with, 0, _);
327check_assert(assert_neg_gfunc_with, -100, _);
328check_assert(assert_pos_gfunc_with, 100, _);
329check_assert(assert_negeq_gfunc_with, -1, _);
330check_assert(assert_poseq_gfunc_with, 1, _);
331
332check_assert(assert_nz_gfunc, 0, _bad_);
333check_assert(assert_zero_gfunc, 5, _bad_);
334check_assert(assert_neg_gfunc, 100, _bad_);
335check_assert(assert_pos_gfunc, -100, _bad_);
336check_assert(assert_negeq_gfunc, 1, _bad_);
337check_assert(assert_poseq_gfunc, -1, _bad_);
338
339check_assert(assert_nz_gfunc_with, 0, _bad_);
340check_assert(assert_zero_gfunc_with, 5, _bad_);
341check_assert(assert_neg_gfunc_with, 100, _bad_);
342check_assert(assert_pos_gfunc_with, -100, _bad_);
343check_assert(assert_negeq_gfunc_with, 1, _bad_);
344check_assert(assert_poseq_gfunc_with, -1, _bad_);
345
346SEC("tc")
347int exception_assert_range(struct __sk_buff *ctx)
348{
349 u64 time = bpf_ktime_get_ns();
350
351 bpf_assert_range(time, 0, ~0ULL);
352 return 1;
353}
354
355SEC("tc")
356int exception_assert_range_with(struct __sk_buff *ctx)
357{
358 u64 time = bpf_ktime_get_ns();
359
360 bpf_assert_range_with(time, 0, ~0ULL, 10);
361 return 1;
362}
363
364SEC("tc")
365int exception_bad_assert_range(struct __sk_buff *ctx)
366{
367 u64 time = bpf_ktime_get_ns();
368
369 bpf_assert_range(time, -100, 100);
370 return 1;
371}
372
373SEC("tc")
374int exception_bad_assert_range_with(struct __sk_buff *ctx)
375{
376 u64 time = bpf_ktime_get_ns();
377
378 bpf_assert_range_with(time, -1000, 1000, 10);
379 return 1;
380}
381
382char _license[] SEC("license") = "GPL";