Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3#include "cgroup_helpers.h"
4#include "percpu_alloc_array.skel.h"
5#include "percpu_alloc_cgrp_local_storage.skel.h"
6#include "percpu_alloc_fail.skel.h"
7
8static void test_array(void)
9{
10 struct percpu_alloc_array *skel;
11 int err, prog_fd;
12 LIBBPF_OPTS(bpf_test_run_opts, topts);
13
14 skel = percpu_alloc_array__open();
15 if (!ASSERT_OK_PTR(skel, "percpu_alloc_array__open"))
16 return;
17
18 bpf_program__set_autoload(skel->progs.test_array_map_1, true);
19 bpf_program__set_autoload(skel->progs.test_array_map_2, true);
20 bpf_program__set_autoload(skel->progs.test_array_map_3, true);
21 bpf_program__set_autoload(skel->progs.test_array_map_4, true);
22
23 skel->bss->my_pid = getpid();
24 skel->rodata->nr_cpus = libbpf_num_possible_cpus();
25
26 err = percpu_alloc_array__load(skel);
27 if (!ASSERT_OK(err, "percpu_alloc_array__load"))
28 goto out;
29
30 err = percpu_alloc_array__attach(skel);
31 if (!ASSERT_OK(err, "percpu_alloc_array__attach"))
32 goto out;
33
34 prog_fd = bpf_program__fd(skel->progs.test_array_map_1);
35 err = bpf_prog_test_run_opts(prog_fd, &topts);
36 ASSERT_OK(err, "test_run array_map 1-4");
37 ASSERT_EQ(topts.retval, 0, "test_run array_map 1-4");
38 ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
39 ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
40out:
41 percpu_alloc_array__destroy(skel);
42}
43
44static void test_array_sleepable(void)
45{
46 struct percpu_alloc_array *skel;
47 int err, prog_fd;
48 LIBBPF_OPTS(bpf_test_run_opts, topts);
49
50 skel = percpu_alloc_array__open();
51 if (!ASSERT_OK_PTR(skel, "percpu_alloc__open"))
52 return;
53
54 bpf_program__set_autoload(skel->progs.test_array_map_10, true);
55
56 skel->bss->my_pid = getpid();
57 skel->rodata->nr_cpus = libbpf_num_possible_cpus();
58
59 err = percpu_alloc_array__load(skel);
60 if (!ASSERT_OK(err, "percpu_alloc_array__load"))
61 goto out;
62
63 err = percpu_alloc_array__attach(skel);
64 if (!ASSERT_OK(err, "percpu_alloc_array__attach"))
65 goto out;
66
67 prog_fd = bpf_program__fd(skel->progs.test_array_map_10);
68 err = bpf_prog_test_run_opts(prog_fd, &topts);
69 ASSERT_OK(err, "test_run array_map_10");
70 ASSERT_EQ(topts.retval, 0, "test_run array_map_10");
71 ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
72 ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
73out:
74 percpu_alloc_array__destroy(skel);
75}
76
77static void test_cgrp_local_storage(void)
78{
79 struct percpu_alloc_cgrp_local_storage *skel;
80 int err, cgroup_fd, prog_fd;
81 LIBBPF_OPTS(bpf_test_run_opts, topts);
82
83 cgroup_fd = test__join_cgroup("/percpu_alloc");
84 if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /percpu_alloc"))
85 return;
86
87 skel = percpu_alloc_cgrp_local_storage__open();
88 if (!ASSERT_OK_PTR(skel, "percpu_alloc_cgrp_local_storage__open"))
89 goto close_fd;
90
91 skel->bss->my_pid = getpid();
92 skel->rodata->nr_cpus = libbpf_num_possible_cpus();
93
94 err = percpu_alloc_cgrp_local_storage__load(skel);
95 if (!ASSERT_OK(err, "percpu_alloc_cgrp_local_storage__load"))
96 goto destroy_skel;
97
98 err = percpu_alloc_cgrp_local_storage__attach(skel);
99 if (!ASSERT_OK(err, "percpu_alloc_cgrp_local_storage__attach"))
100 goto destroy_skel;
101
102 prog_fd = bpf_program__fd(skel->progs.test_cgrp_local_storage_1);
103 err = bpf_prog_test_run_opts(prog_fd, &topts);
104 ASSERT_OK(err, "test_run cgrp_local_storage 1-3");
105 ASSERT_EQ(topts.retval, 0, "test_run cgrp_local_storage 1-3");
106 ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
107 ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
108
109destroy_skel:
110 percpu_alloc_cgrp_local_storage__destroy(skel);
111close_fd:
112 close(cgroup_fd);
113}
114
115static void test_failure(void) {
116 RUN_TESTS(percpu_alloc_fail);
117}
118
119static void test_percpu_map_op_cpu_flag(struct bpf_map *map, void *keys, size_t key_sz, u32 entries,
120 int nr_cpus, bool test_batch)
121{
122 size_t value_sz = sizeof(u32), value_sz_cpus, value_sz_total;
123 u32 *values = NULL, *values_percpu = NULL;
124 const u32 value = 0xDEADC0DE;
125 int i, j, cpu, map_fd, err;
126 u64 batch = 0, flags;
127 void *values_row;
128 u32 count, v;
129 LIBBPF_OPTS(bpf_map_batch_opts, batch_opts);
130
131 value_sz_cpus = value_sz * nr_cpus;
132 values = calloc(entries, value_sz_cpus);
133 if (!ASSERT_OK_PTR(values, "calloc values"))
134 return;
135
136 values_percpu = calloc(entries, roundup(value_sz, 8) * nr_cpus);
137 if (!ASSERT_OK_PTR(values_percpu, "calloc values_percpu")) {
138 free(values);
139 return;
140 }
141
142 value_sz_total = value_sz_cpus * entries;
143 memset(values, 0, value_sz_total);
144
145 map_fd = bpf_map__fd(map);
146 flags = BPF_F_CPU | BPF_F_ALL_CPUS;
147 err = bpf_map_lookup_elem_flags(map_fd, keys, values, flags);
148 if (!ASSERT_ERR(err, "bpf_map_lookup_elem_flags cpu|all_cpus"))
149 goto out;
150
151 err = bpf_map_update_elem(map_fd, keys, values, flags);
152 if (!ASSERT_ERR(err, "bpf_map_update_elem cpu|all_cpus"))
153 goto out;
154
155 flags = BPF_F_ALL_CPUS;
156 err = bpf_map_lookup_elem_flags(map_fd, keys, values, flags);
157 if (!ASSERT_ERR(err, "bpf_map_lookup_elem_flags all_cpus"))
158 goto out;
159
160 flags = BPF_F_LOCK | BPF_F_CPU;
161 err = bpf_map_lookup_elem_flags(map_fd, keys, values, flags);
162 if (!ASSERT_ERR(err, "bpf_map_lookup_elem_flags BPF_F_LOCK"))
163 goto out;
164
165 flags = BPF_F_LOCK | BPF_F_ALL_CPUS;
166 err = bpf_map_update_elem(map_fd, keys, values, flags);
167 if (!ASSERT_ERR(err, "bpf_map_update_elem BPF_F_LOCK"))
168 goto out;
169
170 flags = (u64)nr_cpus << 32 | BPF_F_CPU;
171 err = bpf_map_update_elem(map_fd, keys, values, flags);
172 if (!ASSERT_EQ(err, -ERANGE, "bpf_map_update_elem -ERANGE"))
173 goto out;
174
175 err = bpf_map__update_elem(map, keys, key_sz, values, value_sz, flags);
176 if (!ASSERT_EQ(err, -ERANGE, "bpf_map__update_elem -ERANGE"))
177 goto out;
178
179 err = bpf_map_lookup_elem_flags(map_fd, keys, values, flags);
180 if (!ASSERT_EQ(err, -ERANGE, "bpf_map_lookup_elem_flags -ERANGE"))
181 goto out;
182
183 err = bpf_map__lookup_elem(map, keys, key_sz, values, value_sz, flags);
184 if (!ASSERT_EQ(err, -ERANGE, "bpf_map__lookup_elem -ERANGE"))
185 goto out;
186
187 for (cpu = 0; cpu < nr_cpus; cpu++) {
188 /* clear value on all cpus */
189 values[0] = 0;
190 flags = BPF_F_ALL_CPUS;
191 for (i = 0; i < entries; i++) {
192 err = bpf_map__update_elem(map, keys + i * key_sz, key_sz, values,
193 value_sz, flags);
194 if (!ASSERT_OK(err, "bpf_map__update_elem all_cpus"))
195 goto out;
196 }
197
198 /* update value on specified cpu */
199 for (i = 0; i < entries; i++) {
200 values[0] = value;
201 flags = (u64)cpu << 32 | BPF_F_CPU;
202 err = bpf_map__update_elem(map, keys + i * key_sz, key_sz, values,
203 value_sz, flags);
204 if (!ASSERT_OK(err, "bpf_map__update_elem specified cpu"))
205 goto out;
206
207 /* lookup then check value on CPUs */
208 for (j = 0; j < nr_cpus; j++) {
209 flags = (u64)j << 32 | BPF_F_CPU;
210 err = bpf_map__lookup_elem(map, keys + i * key_sz, key_sz, values,
211 value_sz, flags);
212 if (!ASSERT_OK(err, "bpf_map__lookup_elem specified cpu"))
213 goto out;
214 if (!ASSERT_EQ(values[0], j != cpu ? 0 : value,
215 "bpf_map__lookup_elem value on specified cpu"))
216 goto out;
217 }
218 }
219 }
220
221 if (!test_batch)
222 goto out;
223
224 count = entries;
225 batch_opts.elem_flags = (u64)nr_cpus << 32 | BPF_F_CPU;
226 err = bpf_map_update_batch(map_fd, keys, values, &count, &batch_opts);
227 if (!ASSERT_EQ(err, -ERANGE, "bpf_map_update_batch -ERANGE"))
228 goto out;
229
230 for (cpu = 0; cpu < nr_cpus; cpu++) {
231 memset(values, 0, value_sz_total);
232
233 /* clear values across all CPUs */
234 count = entries;
235 batch_opts.elem_flags = BPF_F_ALL_CPUS;
236 err = bpf_map_update_batch(map_fd, keys, values, &count, &batch_opts);
237 if (!ASSERT_OK(err, "bpf_map_update_batch all_cpus"))
238 goto out;
239 if (!ASSERT_EQ(count, entries, "bpf_map_update_batch count"))
240 goto out;
241
242 /* update values on specified CPU */
243 for (i = 0; i < entries; i++)
244 values[i] = value;
245
246 count = entries;
247 batch_opts.elem_flags = (u64)cpu << 32 | BPF_F_CPU;
248 err = bpf_map_update_batch(map_fd, keys, values, &count, &batch_opts);
249 if (!ASSERT_OK(err, "bpf_map_update_batch specified cpu"))
250 goto out;
251 if (!ASSERT_EQ(count, entries, "bpf_map_update_batch count"))
252 goto out;
253
254 /* lookup values on specified CPU */
255 batch = 0;
256 count = entries;
257 memset(values, 0, entries * value_sz);
258 err = bpf_map_lookup_batch(map_fd, NULL, &batch, keys, values, &count, &batch_opts);
259 if (!ASSERT_TRUE(!err || err == -ENOENT, "bpf_map_lookup_batch specified cpu"))
260 goto out;
261 if (!ASSERT_EQ(count, entries, "bpf_map_lookup_batch count"))
262 goto out;
263
264 for (i = 0; i < entries; i++)
265 if (!ASSERT_EQ(values[i], value,
266 "bpf_map_lookup_batch value on specified cpu"))
267 goto out;
268
269 /* lookup values from all CPUs */
270 batch = 0;
271 count = entries;
272 batch_opts.elem_flags = 0;
273 memset(values_percpu, 0, roundup(value_sz, 8) * nr_cpus * entries);
274 err = bpf_map_lookup_batch(map_fd, NULL, &batch, keys, values_percpu, &count,
275 &batch_opts);
276 if (!ASSERT_TRUE(!err || err == -ENOENT, "bpf_map_lookup_batch all_cpus"))
277 goto out;
278 if (!ASSERT_EQ(count, entries, "bpf_map_lookup_batch count"))
279 goto out;
280
281 for (i = 0; i < entries; i++) {
282 values_row = (void *) values_percpu +
283 roundup(value_sz, 8) * i * nr_cpus;
284 for (j = 0; j < nr_cpus; j++) {
285 v = *(u32 *) (values_row + roundup(value_sz, 8) * j);
286 if (!ASSERT_EQ(v, j != cpu ? 0 : value,
287 "bpf_map_lookup_batch value all_cpus"))
288 goto out;
289 }
290 }
291 }
292
293out:
294 free(values_percpu);
295 free(values);
296}
297
298static void test_percpu_map_cpu_flag(enum bpf_map_type map_type)
299{
300 struct percpu_alloc_array *skel;
301 size_t key_sz = sizeof(int);
302 int *keys, nr_cpus, i, err;
303 struct bpf_map *map;
304 u32 max_entries;
305
306 nr_cpus = libbpf_num_possible_cpus();
307 if (!ASSERT_GT(nr_cpus, 0, "libbpf_num_possible_cpus"))
308 return;
309
310 max_entries = nr_cpus * 2;
311 keys = calloc(max_entries, key_sz);
312 if (!ASSERT_OK_PTR(keys, "calloc keys"))
313 return;
314
315 for (i = 0; i < max_entries; i++)
316 keys[i] = i;
317
318 skel = percpu_alloc_array__open();
319 if (!ASSERT_OK_PTR(skel, "percpu_alloc_array__open")) {
320 free(keys);
321 return;
322 }
323
324 map = skel->maps.percpu;
325 bpf_map__set_type(map, map_type);
326 bpf_map__set_max_entries(map, max_entries);
327
328 err = percpu_alloc_array__load(skel);
329 if (!ASSERT_OK(err, "test_percpu_alloc__load"))
330 goto out;
331
332 test_percpu_map_op_cpu_flag(map, keys, key_sz, nr_cpus, nr_cpus, true);
333out:
334 percpu_alloc_array__destroy(skel);
335 free(keys);
336}
337
338static void test_percpu_array_cpu_flag(void)
339{
340 test_percpu_map_cpu_flag(BPF_MAP_TYPE_PERCPU_ARRAY);
341}
342
343static void test_percpu_hash_cpu_flag(void)
344{
345 test_percpu_map_cpu_flag(BPF_MAP_TYPE_PERCPU_HASH);
346}
347
348static void test_lru_percpu_hash_cpu_flag(void)
349{
350 test_percpu_map_cpu_flag(BPF_MAP_TYPE_LRU_PERCPU_HASH);
351}
352
353static void test_percpu_cgroup_storage_cpu_flag(void)
354{
355 struct percpu_alloc_array *skel = NULL;
356 struct bpf_cgroup_storage_key key;
357 int cgroup, prog_fd, nr_cpus, err;
358 struct bpf_map *map;
359
360 nr_cpus = libbpf_num_possible_cpus();
361 if (!ASSERT_GT(nr_cpus, 0, "libbpf_num_possible_cpus"))
362 return;
363
364 err = setup_cgroup_environment();
365 if (!ASSERT_OK(err, "setup_cgroup_environment"))
366 return;
367
368 cgroup = create_and_get_cgroup("/cg_percpu");
369 if (!ASSERT_GE(cgroup, 0, "create_and_get_cgroup")) {
370 cleanup_cgroup_environment();
371 return;
372 }
373
374 err = join_cgroup("/cg_percpu");
375 if (!ASSERT_OK(err, "join_cgroup"))
376 goto out;
377
378 skel = percpu_alloc_array__open_and_load();
379 if (!ASSERT_OK_PTR(skel, "percpu_alloc_array__open_and_load"))
380 goto out;
381
382 prog_fd = bpf_program__fd(skel->progs.cgroup_egress);
383 err = bpf_prog_attach(prog_fd, cgroup, BPF_CGROUP_INET_EGRESS, 0);
384 if (!ASSERT_OK(err, "bpf_prog_attach"))
385 goto out;
386
387 map = skel->maps.percpu_cgroup_storage;
388 err = bpf_map_get_next_key(bpf_map__fd(map), NULL, &key);
389 if (!ASSERT_OK(err, "bpf_map_get_next_key"))
390 goto out;
391
392 test_percpu_map_op_cpu_flag(map, &key, sizeof(key), 1, nr_cpus, false);
393out:
394 bpf_prog_detach2(-1, cgroup, BPF_CGROUP_INET_EGRESS);
395 close(cgroup);
396 cleanup_cgroup_environment();
397 percpu_alloc_array__destroy(skel);
398}
399
400static void test_map_op_cpu_flag(enum bpf_map_type map_type)
401{
402 u32 max_entries = 1, count = max_entries;
403 u64 flags, batch = 0, val = 0;
404 int err, map_fd, key = 0;
405 LIBBPF_OPTS(bpf_map_batch_opts, batch_opts);
406
407 map_fd = bpf_map_create(map_type, "test_cpu_flag", sizeof(int), sizeof(u64), max_entries,
408 NULL);
409 if (!ASSERT_GE(map_fd, 0, "bpf_map_create"))
410 return;
411
412 flags = BPF_F_ALL_CPUS;
413 err = bpf_map_update_elem(map_fd, &key, &val, flags);
414 ASSERT_ERR(err, "bpf_map_update_elem all_cpus");
415
416 batch_opts.elem_flags = BPF_F_ALL_CPUS;
417 err = bpf_map_update_batch(map_fd, &key, &val, &count, &batch_opts);
418 ASSERT_ERR(err, "bpf_map_update_batch all_cpus");
419
420 flags = BPF_F_CPU;
421 err = bpf_map_lookup_elem_flags(map_fd, &key, &val, flags);
422 ASSERT_ERR(err, "bpf_map_lookup_elem_flags cpu");
423
424 batch_opts.elem_flags = BPF_F_CPU;
425 err = bpf_map_lookup_batch(map_fd, NULL, &batch, &key, &val, &count, &batch_opts);
426 ASSERT_ERR(err, "bpf_map_lookup_batch cpu");
427
428 close(map_fd);
429}
430
431static void test_array_cpu_flag(void)
432{
433 test_map_op_cpu_flag(BPF_MAP_TYPE_ARRAY);
434}
435
436static void test_hash_cpu_flag(void)
437{
438 test_map_op_cpu_flag(BPF_MAP_TYPE_HASH);
439}
440
441void test_percpu_alloc(void)
442{
443 if (test__start_subtest("array"))
444 test_array();
445 if (test__start_subtest("array_sleepable"))
446 test_array_sleepable();
447 if (test__start_subtest("cgrp_local_storage"))
448 test_cgrp_local_storage();
449 if (test__start_subtest("failure_tests"))
450 test_failure();
451 if (test__start_subtest("cpu_flag_percpu_array"))
452 test_percpu_array_cpu_flag();
453 if (test__start_subtest("cpu_flag_percpu_hash"))
454 test_percpu_hash_cpu_flag();
455 if (test__start_subtest("cpu_flag_lru_percpu_hash"))
456 test_lru_percpu_hash_cpu_flag();
457 if (test__start_subtest("cpu_flag_percpu_cgroup_storage"))
458 test_percpu_cgroup_storage_cpu_flag();
459 if (test__start_subtest("cpu_flag_array"))
460 test_array_cpu_flag();
461 if (test__start_subtest("cpu_flag_hash"))
462 test_hash_cpu_flag();
463}