Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2021 Facebook */
3
4#include <vmlinux.h>
5#include <stdbool.h>
6#include <errno.h>
7#include <bpf/bpf_helpers.h>
8#include <bpf/bpf_tracing.h>
9
10#define CLOCK_MONOTONIC 1
11#define CLOCK_BOOTTIME 7
12
13char _license[] SEC("license") = "GPL";
14
15struct hmap_elem {
16 int counter;
17 struct bpf_timer timer;
18 struct bpf_spin_lock lock; /* unused */
19};
20
21struct {
22 __uint(type, BPF_MAP_TYPE_HASH);
23 __uint(max_entries, 1000);
24 __type(key, int);
25 __type(value, struct hmap_elem);
26} hmap SEC(".maps");
27
28struct {
29 __uint(type, BPF_MAP_TYPE_HASH);
30 __uint(map_flags, BPF_F_NO_PREALLOC);
31 __uint(max_entries, 1000);
32 __type(key, int);
33 __type(value, struct hmap_elem);
34} hmap_malloc SEC(".maps");
35
36struct elem {
37 struct bpf_timer t;
38};
39
40struct {
41 __uint(type, BPF_MAP_TYPE_ARRAY);
42 __uint(max_entries, 2);
43 __type(key, int);
44 __type(value, struct elem);
45} array SEC(".maps");
46
47struct {
48 __uint(type, BPF_MAP_TYPE_LRU_HASH);
49 __uint(max_entries, 4);
50 __type(key, int);
51 __type(value, struct elem);
52} lru SEC(".maps");
53
54struct {
55 __uint(type, BPF_MAP_TYPE_ARRAY);
56 __uint(max_entries, 1);
57 __type(key, int);
58 __type(value, struct elem);
59} abs_timer SEC(".maps"), soft_timer_pinned SEC(".maps"), abs_timer_pinned SEC(".maps"),
60 race_array SEC(".maps");
61
62__u64 bss_data;
63__u64 abs_data;
64__u64 err;
65__u64 ok;
66__u64 test_hits;
67__u64 update_hits;
68__u64 cancel_hits;
69__u64 callback_check = 52;
70__u64 callback2_check = 52;
71__u64 pinned_callback_check;
72__s32 pinned_cpu;
73bool async_cancel = 0;
74
75#define ARRAY 1
76#define HTAB 2
77#define HTAB_MALLOC 3
78#define LRU 4
79
80/* callback for array and lru timers */
81static int timer_cb1(void *map, int *key, struct bpf_timer *timer)
82{
83 /* increment bss variable twice.
84 * Once via array timer callback and once via lru timer callback
85 */
86 bss_data += 5;
87
88 /* *key == 0 - the callback was called for array timer.
89 * *key == 4 - the callback was called from lru timer.
90 */
91 if (*key == ARRAY) {
92 struct bpf_timer *lru_timer;
93 int lru_key = LRU;
94
95 /* rearm array timer to be called again in ~35 seconds */
96 if (bpf_timer_start(timer, 1ull << 35, 0) != 0)
97 err |= 1;
98
99 lru_timer = bpf_map_lookup_elem(&lru, &lru_key);
100 if (!lru_timer)
101 return 0;
102 bpf_timer_set_callback(lru_timer, timer_cb1);
103 if (bpf_timer_start(lru_timer, 0, 0) != 0)
104 err |= 2;
105 } else if (*key == LRU) {
106 int lru_key, i;
107
108 for (i = LRU + 1;
109 i <= 100 /* for current LRU eviction algorithm this number
110 * should be larger than ~ lru->max_entries * 2
111 */;
112 i++) {
113 struct elem init = {};
114
115 /* lru_key cannot be used as loop induction variable
116 * otherwise the loop will be unbounded.
117 */
118 lru_key = i;
119
120 /* add more elements into lru map to push out current
121 * element and force deletion of this timer
122 */
123 bpf_map_update_elem(map, &lru_key, &init, 0);
124 /* look it up to bump it into active list */
125 bpf_map_lookup_elem(map, &lru_key);
126
127 /* keep adding until *key changes underneath,
128 * which means that key/timer memory was reused
129 */
130 if (*key != LRU)
131 break;
132 }
133
134 /* check that the timer was removed */
135 if (bpf_timer_cancel(timer) != -EINVAL)
136 err |= 4;
137 ok |= 1;
138 }
139 return 0;
140}
141
142SEC("fentry/bpf_fentry_test1")
143int BPF_PROG2(test1, int, a)
144{
145 struct bpf_timer *arr_timer, *lru_timer;
146 struct elem init = {};
147 int lru_key = LRU;
148 int array_key = ARRAY;
149
150 arr_timer = bpf_map_lookup_elem(&array, &array_key);
151 if (!arr_timer)
152 return 0;
153 bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC);
154
155 bpf_map_update_elem(&lru, &lru_key, &init, 0);
156 lru_timer = bpf_map_lookup_elem(&lru, &lru_key);
157 if (!lru_timer)
158 return 0;
159 bpf_timer_init(lru_timer, &lru, CLOCK_MONOTONIC);
160
161 bpf_timer_set_callback(arr_timer, timer_cb1);
162 bpf_timer_start(arr_timer, 0 /* call timer_cb1 asap */, 0);
163
164 /* init more timers to check that array destruction
165 * doesn't leak timer memory.
166 */
167 array_key = 0;
168 arr_timer = bpf_map_lookup_elem(&array, &array_key);
169 if (!arr_timer)
170 return 0;
171 bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC);
172 return 0;
173}
174
175static int timer_error(void *map, int *key, struct bpf_timer *timer)
176{
177 err = 42;
178 return 0;
179}
180
181SEC("syscall")
182int test_async_cancel_succeed(void *ctx)
183{
184 struct bpf_timer *arr_timer;
185 int array_key = ARRAY;
186
187 arr_timer = bpf_map_lookup_elem(&array, &array_key);
188 if (!arr_timer)
189 return 0;
190 bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC);
191 bpf_timer_set_callback(arr_timer, timer_error);
192 bpf_timer_start(arr_timer, 100000 /* 100us */, 0);
193 bpf_timer_cancel_async(arr_timer);
194 ok = 7;
195 return 0;
196}
197
198/* callback for prealloc and non-prealloca hashtab timers */
199static int timer_cb2(void *map, int *key, struct hmap_elem *val)
200{
201 if (*key == HTAB)
202 callback_check--;
203 else
204 callback2_check--;
205 if (val->counter > 0 && --val->counter) {
206 /* re-arm the timer again to execute after 1 usec */
207 bpf_timer_start(&val->timer, 1000, 0);
208 } else if (*key == HTAB) {
209 struct bpf_timer *arr_timer;
210 int array_key = ARRAY;
211
212 /* cancel arr_timer otherwise bpf_fentry_test1 prog
213 * will stay alive forever.
214 */
215 arr_timer = bpf_map_lookup_elem(&array, &array_key);
216 if (!arr_timer)
217 return 0;
218 if (bpf_timer_cancel(arr_timer) != 1)
219 /* bpf_timer_cancel should return 1 to indicate
220 * that arr_timer was active at this time
221 */
222 err |= 8;
223
224 /* try to cancel ourself. It shouldn't deadlock. */
225 if (bpf_timer_cancel(&val->timer) != -EDEADLK)
226 err |= 16;
227
228 /* delete this key and this timer anyway.
229 * It shouldn't deadlock either.
230 */
231 bpf_map_delete_elem(map, key);
232
233 /* in preallocated hashmap both 'key' and 'val' could have been
234 * reused to store another map element (like in LRU above),
235 * but in controlled test environment the below test works.
236 * It's not a use-after-free. The memory is owned by the map.
237 */
238 if (bpf_timer_start(&val->timer, 1000, 0) != -EINVAL)
239 err |= 32;
240 ok |= 2;
241 } else {
242 if (*key != HTAB_MALLOC)
243 err |= 64;
244
245 /* try to cancel ourself. It shouldn't deadlock. */
246 if (bpf_timer_cancel(&val->timer) != -EDEADLK)
247 err |= 128;
248
249 /* delete this key and this timer anyway.
250 * It shouldn't deadlock either.
251 */
252 bpf_map_delete_elem(map, key);
253
254 ok |= 4;
255 }
256 return 0;
257}
258
259int bpf_timer_test(void)
260{
261 struct hmap_elem *val;
262 int key = HTAB, key_malloc = HTAB_MALLOC;
263
264 val = bpf_map_lookup_elem(&hmap, &key);
265 if (val) {
266 if (bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME) != 0)
267 err |= 512;
268 bpf_timer_set_callback(&val->timer, timer_cb2);
269 bpf_timer_start(&val->timer, 1000, 0);
270 }
271 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
272 if (val) {
273 if (bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME) != 0)
274 err |= 1024;
275 bpf_timer_set_callback(&val->timer, timer_cb2);
276 bpf_timer_start(&val->timer, 1000, 0);
277 }
278 return 0;
279}
280
281SEC("fentry/bpf_fentry_test2")
282int BPF_PROG2(test2, int, a, int, b)
283{
284 struct hmap_elem init = {}, *val;
285 int key = HTAB, key_malloc = HTAB_MALLOC;
286
287 init.counter = 10; /* number of times to trigger timer_cb2 */
288 bpf_map_update_elem(&hmap, &key, &init, 0);
289 val = bpf_map_lookup_elem(&hmap, &key);
290 if (val)
291 bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
292 /* update the same key to free the timer */
293 bpf_map_update_elem(&hmap, &key, &init, 0);
294
295 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
296 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
297 if (val)
298 bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
299 /* update the same key to free the timer */
300 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
301
302 /* init more timers to check that htab operations
303 * don't leak timer memory.
304 */
305 key = 0;
306 bpf_map_update_elem(&hmap, &key, &init, 0);
307 val = bpf_map_lookup_elem(&hmap, &key);
308 if (val)
309 bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
310 bpf_map_delete_elem(&hmap, &key);
311 bpf_map_update_elem(&hmap, &key, &init, 0);
312 val = bpf_map_lookup_elem(&hmap, &key);
313 if (val)
314 bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
315
316 /* and with non-prealloc htab */
317 key_malloc = 0;
318 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
319 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
320 if (val)
321 bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
322 bpf_map_delete_elem(&hmap_malloc, &key_malloc);
323 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
324 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
325 if (val)
326 bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
327
328 return bpf_timer_test();
329}
330
331/* callback for absolute timer */
332static int timer_cb3(void *map, int *key, struct bpf_timer *timer)
333{
334 abs_data += 6;
335
336 if (abs_data < 12) {
337 bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000,
338 BPF_F_TIMER_ABS);
339 } else {
340 /* Re-arm timer ~35 seconds in future */
341 bpf_timer_start(timer, bpf_ktime_get_boot_ns() + (1ull << 35),
342 BPF_F_TIMER_ABS);
343 }
344
345 return 0;
346}
347
348SEC("fentry/bpf_fentry_test3")
349int BPF_PROG2(test3, int, a)
350{
351 int key = 0;
352 struct bpf_timer *timer;
353
354 bpf_printk("test3");
355
356 timer = bpf_map_lookup_elem(&abs_timer, &key);
357 if (timer) {
358 if (bpf_timer_init(timer, &abs_timer, CLOCK_BOOTTIME) != 0)
359 err |= 2048;
360 bpf_timer_set_callback(timer, timer_cb3);
361 bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000,
362 BPF_F_TIMER_ABS);
363 }
364
365 return 0;
366}
367
368/* callback for pinned timer */
369static int timer_cb_pinned(void *map, int *key, struct bpf_timer *timer)
370{
371 __s32 cpu = bpf_get_smp_processor_id();
372
373 if (cpu != pinned_cpu)
374 err |= 16384;
375
376 pinned_callback_check++;
377 return 0;
378}
379
380static void test_pinned_timer(bool soft)
381{
382 int key = 0;
383 void *map;
384 struct bpf_timer *timer;
385 __u64 flags = BPF_F_TIMER_CPU_PIN;
386 __u64 start_time;
387
388 if (soft) {
389 map = &soft_timer_pinned;
390 start_time = 0;
391 } else {
392 map = &abs_timer_pinned;
393 start_time = bpf_ktime_get_boot_ns();
394 flags |= BPF_F_TIMER_ABS;
395 }
396
397 timer = bpf_map_lookup_elem(map, &key);
398 if (timer) {
399 if (bpf_timer_init(timer, map, CLOCK_BOOTTIME) != 0)
400 err |= 4096;
401 bpf_timer_set_callback(timer, timer_cb_pinned);
402 pinned_cpu = bpf_get_smp_processor_id();
403 bpf_timer_start(timer, start_time + 1000, flags);
404 } else {
405 err |= 8192;
406 }
407}
408
409SEC("fentry/bpf_fentry_test4")
410int BPF_PROG2(test4, int, a)
411{
412 bpf_printk("test4");
413 test_pinned_timer(true);
414
415 return 0;
416}
417
418SEC("fentry/bpf_fentry_test5")
419int BPF_PROG2(test5, int, a)
420{
421 bpf_printk("test5");
422 test_pinned_timer(false);
423
424 return 0;
425}
426
427static int race_timer_callback(void *race_array, int *race_key, struct bpf_timer *timer)
428{
429 bpf_timer_start(timer, 1000000, 0);
430 return 0;
431}
432
433/* Callback that updates its own map element */
434static int update_self_callback(void *map, int *key, struct bpf_timer *timer)
435{
436 struct elem init = {};
437
438 bpf_map_update_elem(map, key, &init, BPF_ANY);
439 __sync_fetch_and_add(&update_hits, 1);
440 return 0;
441}
442
443/* Callback that cancels itself using async cancel */
444static int cancel_self_callback(void *map, int *key, struct bpf_timer *timer)
445{
446 bpf_timer_cancel_async(timer);
447 __sync_fetch_and_add(&cancel_hits, 1);
448 return 0;
449}
450
451enum test_mode {
452 TEST_RACE_SYNC,
453 TEST_RACE_ASYNC,
454 TEST_UPDATE,
455 TEST_CANCEL,
456};
457
458static __always_inline int test_common(enum test_mode mode)
459{
460 struct bpf_timer *timer;
461 struct elem init;
462 int ret, key = 0;
463
464 __builtin_memset(&init, 0, sizeof(struct elem));
465
466 bpf_map_update_elem(&race_array, &key, &init, BPF_ANY);
467 timer = bpf_map_lookup_elem(&race_array, &key);
468 if (!timer)
469 return 0;
470
471 ret = bpf_timer_init(timer, &race_array, CLOCK_MONOTONIC);
472 if (ret && ret != -EBUSY)
473 return 0;
474
475 if (mode == TEST_RACE_SYNC || mode == TEST_RACE_ASYNC)
476 bpf_timer_set_callback(timer, race_timer_callback);
477 else if (mode == TEST_UPDATE)
478 bpf_timer_set_callback(timer, update_self_callback);
479 else
480 bpf_timer_set_callback(timer, cancel_self_callback);
481
482 bpf_timer_start(timer, 0, 0);
483
484 if (mode == TEST_RACE_ASYNC)
485 bpf_timer_cancel_async(timer);
486 else if (mode == TEST_RACE_SYNC)
487 bpf_timer_cancel(timer);
488
489 return 0;
490}
491
492SEC("syscall")
493int race(void *ctx)
494{
495 return test_common(async_cancel ? TEST_RACE_ASYNC : TEST_RACE_SYNC);
496}
497
498SEC("perf_event")
499int nmi_race(void *ctx)
500{
501 __sync_fetch_and_add(&test_hits, 1);
502 return test_common(TEST_RACE_ASYNC);
503}
504
505SEC("perf_event")
506int nmi_update(void *ctx)
507{
508 __sync_fetch_and_add(&test_hits, 1);
509 return test_common(TEST_UPDATE);
510}
511
512SEC("perf_event")
513int nmi_cancel(void *ctx)
514{
515 __sync_fetch_and_add(&test_hits, 1);
516 return test_common(TEST_CANCEL);
517}