Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3#include <linux/capability.h>
4#include <linux/err.h>
5#include <stdlib.h>
6#include <test_progs.h>
7#include <bpf/btf.h>
8
9#include "autoconf_helper.h"
10#include "disasm_helpers.h"
11#include "unpriv_helpers.h"
12#include "cap_helpers.h"
13#include "jit_disasm_helpers.h"
14
15static inline const char *str_has_pfx(const char *str, const char *pfx)
16{
17 size_t len = strlen(pfx);
18
19 return strncmp(str, pfx, len) == 0 ? str + len : NULL;
20}
21
22#define TEST_LOADER_LOG_BUF_SZ 2097152
23
24
25/* Warning: duplicated in bpf_misc.h */
26#define POINTER_VALUE 0xbadcafe
27#define TEST_DATA_LEN 64
28
29#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
30#define EFFICIENT_UNALIGNED_ACCESS 1
31#else
32#define EFFICIENT_UNALIGNED_ACCESS 0
33#endif
34
35static int sysctl_unpriv_disabled = -1;
36
37enum mode {
38 PRIV = 1,
39 UNPRIV = 2
40};
41
42enum load_mode {
43 JITED = 1 << 0,
44 NO_JITED = 1 << 1,
45};
46
47struct test_subspec {
48 char *name;
49 char *description;
50 bool expect_failure;
51 struct expected_msgs expect_msgs;
52 struct expected_msgs expect_xlated;
53 struct expected_msgs jited;
54 struct expected_msgs stderr;
55 struct expected_msgs stdout;
56 int retval;
57 bool execute;
58 __u64 caps;
59};
60
61struct test_spec {
62 const char *prog_name;
63 struct test_subspec priv;
64 struct test_subspec unpriv;
65 const char *btf_custom_path;
66 int log_level;
67 int prog_flags;
68 int mode_mask;
69 int arch_mask;
70 int load_mask;
71 int linear_sz;
72 bool auxiliary;
73 bool valid;
74};
75
76static int tester_init(struct test_loader *tester)
77{
78 if (!tester->log_buf) {
79 tester->log_buf_sz = TEST_LOADER_LOG_BUF_SZ;
80 tester->log_buf = calloc(tester->log_buf_sz, 1);
81 if (!ASSERT_OK_PTR(tester->log_buf, "tester_log_buf"))
82 return -ENOMEM;
83 }
84
85 return 0;
86}
87
88void test_loader_fini(struct test_loader *tester)
89{
90 if (!tester)
91 return;
92
93 free(tester->log_buf);
94}
95
96static void free_msgs(struct expected_msgs *msgs)
97{
98 int i;
99
100 for (i = 0; i < msgs->cnt; i++)
101 if (msgs->patterns[i].is_regex)
102 regfree(&msgs->patterns[i].regex);
103 free(msgs->patterns);
104 msgs->patterns = NULL;
105 msgs->cnt = 0;
106}
107
108static void free_test_spec(struct test_spec *spec)
109{
110 /* Deallocate expect_msgs arrays. */
111 free_msgs(&spec->priv.expect_msgs);
112 free_msgs(&spec->unpriv.expect_msgs);
113 free_msgs(&spec->priv.expect_xlated);
114 free_msgs(&spec->unpriv.expect_xlated);
115 free_msgs(&spec->priv.jited);
116 free_msgs(&spec->unpriv.jited);
117 free_msgs(&spec->unpriv.stderr);
118 free_msgs(&spec->priv.stderr);
119 free_msgs(&spec->unpriv.stdout);
120 free_msgs(&spec->priv.stdout);
121
122 free(spec->priv.name);
123 free(spec->priv.description);
124 free(spec->unpriv.name);
125 free(spec->unpriv.description);
126 spec->priv.name = NULL;
127 spec->priv.description = NULL;
128 spec->unpriv.name = NULL;
129 spec->unpriv.description = NULL;
130}
131
132/* Compiles regular expression matching pattern.
133 * Pattern has a special syntax:
134 *
135 * pattern := (<verbatim text> | regex)*
136 * regex := "{{" <posix extended regular expression> "}}"
137 *
138 * In other words, pattern is a verbatim text with inclusion
139 * of regular expressions enclosed in "{{" "}}" pairs.
140 * For example, pattern "foo{{[0-9]+}}" matches strings like
141 * "foo0", "foo007", etc.
142 */
143static int compile_regex(const char *pattern, regex_t *regex)
144{
145 char err_buf[256], buf[256] = {}, *ptr, *buf_end;
146 const char *original_pattern = pattern, *next;
147 bool in_regex = false;
148 int err;
149
150 buf_end = buf + sizeof(buf);
151 ptr = buf;
152 while (*pattern && ptr < buf_end - 2) {
153 if (!in_regex && (next = str_has_pfx(pattern, "{{"))) {
154 in_regex = true;
155 pattern = next;
156 continue;
157 }
158 if (in_regex && (next = str_has_pfx(pattern, "}}"))) {
159 in_regex = false;
160 pattern = next;
161 continue;
162 }
163 if (in_regex) {
164 *ptr++ = *pattern++;
165 continue;
166 }
167 /* list of characters that need escaping for extended posix regex */
168 if (strchr(".[]\\()*+?{}|^$", *pattern)) {
169 *ptr++ = '\\';
170 *ptr++ = *pattern++;
171 continue;
172 }
173 *ptr++ = *pattern++;
174 }
175 if (*pattern) {
176 PRINT_FAIL("Regexp too long: '%s'\n", original_pattern);
177 return -EINVAL;
178 }
179 if (in_regex) {
180 PRINT_FAIL("Regexp has open '{{' but no closing '}}': '%s'\n", original_pattern);
181 return -EINVAL;
182 }
183 err = regcomp(regex, buf, REG_EXTENDED | REG_NEWLINE);
184 if (err != 0) {
185 regerror(err, regex, err_buf, sizeof(err_buf));
186 PRINT_FAIL("Regexp compilation error in '%s': '%s'\n", buf, err_buf);
187 return -EINVAL;
188 }
189 return 0;
190}
191
192static int __push_msg(const char *pattern, bool on_next_line, bool negative,
193 struct expected_msgs *msgs)
194{
195 struct expect_msg *msg;
196 void *tmp;
197 int err;
198
199 tmp = realloc(msgs->patterns,
200 (1 + msgs->cnt) * sizeof(struct expect_msg));
201 if (!tmp) {
202 ASSERT_FAIL("failed to realloc memory for messages\n");
203 return -ENOMEM;
204 }
205 msgs->patterns = tmp;
206 msg = &msgs->patterns[msgs->cnt];
207 msg->on_next_line = on_next_line;
208 msg->substr = pattern;
209 msg->negative = negative;
210 msg->is_regex = false;
211 if (strstr(pattern, "{{")) {
212 err = compile_regex(pattern, &msg->regex);
213 if (err)
214 return err;
215 msg->is_regex = true;
216 }
217 msgs->cnt += 1;
218 return 0;
219}
220
221static int clone_msgs(struct expected_msgs *from, struct expected_msgs *to)
222{
223 struct expect_msg *msg;
224 int i, err;
225
226 for (i = 0; i < from->cnt; i++) {
227 msg = &from->patterns[i];
228 err = __push_msg(msg->substr, msg->on_next_line, msg->negative, to);
229 if (err)
230 return err;
231 }
232 return 0;
233}
234
235static int push_msg(const char *substr, bool negative, struct expected_msgs *msgs)
236{
237 return __push_msg(substr, false, negative, msgs);
238}
239
240static int push_disasm_msg(const char *regex_str, bool *on_next_line, struct expected_msgs *msgs)
241{
242 int err;
243
244 if (strcmp(regex_str, "...") == 0) {
245 *on_next_line = false;
246 return 0;
247 }
248 err = __push_msg(regex_str, *on_next_line, false, msgs);
249 if (err)
250 return err;
251 *on_next_line = true;
252 return 0;
253}
254
255static int parse_int(const char *str, int *val, const char *name)
256{
257 char *end;
258 long tmp;
259
260 errno = 0;
261 if (str_has_pfx(str, "0x"))
262 tmp = strtol(str + 2, &end, 16);
263 else
264 tmp = strtol(str, &end, 10);
265 if (errno || end[0] != '\0') {
266 PRINT_FAIL("failed to parse %s from '%s'\n", name, str);
267 return -EINVAL;
268 }
269 *val = tmp;
270 return 0;
271}
272
273static int parse_caps(const char *str, __u64 *val, const char *name)
274{
275 int cap_flag = 0;
276 char *token = NULL, *saveptr = NULL;
277
278 char *str_cpy = strdup(str);
279 if (str_cpy == NULL) {
280 PRINT_FAIL("Memory allocation failed\n");
281 return -EINVAL;
282 }
283
284 token = strtok_r(str_cpy, "|", &saveptr);
285 while (token != NULL) {
286 errno = 0;
287 if (!strncmp("CAP_", token, sizeof("CAP_") - 1)) {
288 PRINT_FAIL("define %s constant in bpf_misc.h, failed to parse caps\n", token);
289 return -EINVAL;
290 }
291 cap_flag = strtol(token, NULL, 10);
292 if (!cap_flag || errno) {
293 PRINT_FAIL("failed to parse caps %s\n", name);
294 return -EINVAL;
295 }
296 *val |= (1ULL << cap_flag);
297 token = strtok_r(NULL, "|", &saveptr);
298 }
299
300 free(str_cpy);
301 return 0;
302}
303
304static int parse_retval(const char *str, int *val, const char *name)
305{
306 /*
307 * INT_MIN is defined as (-INT_MAX -1), i.e. it doesn't expand to a
308 * single int and cannot be parsed with strtol, so we handle it
309 * separately here. In addition, it expands to different expressions in
310 * different compilers so we use a prefixed _INT_MIN instead.
311 */
312 if (strcmp(str, "_INT_MIN") == 0) {
313 *val = INT_MIN;
314 return 0;
315 }
316
317 return parse_int(str, val, name);
318}
319
320static void update_flags(int *flags, int flag, bool clear)
321{
322 if (clear)
323 *flags &= ~flag;
324 else
325 *flags |= flag;
326}
327
328static const char *skip_decl_tag_pfx(const char *s)
329{
330 int n = 0;
331
332 if (sscanf(s, "comment:%*d:%n", &n) < 0 || !n)
333 return NULL;
334 return s + n;
335}
336
337static int compare_decl_tags(const void *a, const void *b)
338{
339 return strverscmp(*(const char **)a, *(const char **)b);
340}
341
342/*
343 * Compilers don't guarantee order in which BTF attributes would be generated,
344 * while order is important for test tags like __msg.
345 * Each test tag has the following prefix: "comment:" __COUNTER__,
346 * when sorted using strverscmp this gives same order as in the original C code.
347 */
348static const char **collect_decl_tags(struct btf *btf, int id, int *cnt)
349{
350 const char **tmp, **tags = NULL;
351 const struct btf_type *t;
352 int i;
353
354 *cnt = 0;
355 for (i = 1; i < btf__type_cnt(btf); i++) {
356 t = btf__type_by_id(btf, i);
357 if (!btf_is_decl_tag(t) || t->type != id || btf_decl_tag(t)->component_idx != -1)
358 continue;
359 tmp = realloc(tags, (*cnt + 1) * sizeof(*tags));
360 if (!tmp) {
361 free(tags);
362 return ERR_PTR(-ENOMEM);
363 }
364 tags = tmp;
365 tags[(*cnt)++] = btf__str_by_offset(btf, t->name_off);
366 }
367
368 if (*cnt)
369 qsort(tags, *cnt, sizeof(*tags), compare_decl_tags);
370 return tags;
371}
372
373enum arch {
374 ARCH_UNKNOWN = 0x1,
375 ARCH_X86_64 = 0x2,
376 ARCH_ARM64 = 0x4,
377 ARCH_RISCV64 = 0x8,
378 ARCH_S390X = 0x10,
379};
380
381static int get_current_arch(void)
382{
383#if defined(__x86_64__)
384 return ARCH_X86_64;
385#elif defined(__aarch64__)
386 return ARCH_ARM64;
387#elif defined(__riscv) && __riscv_xlen == 64
388 return ARCH_RISCV64;
389#elif defined(__s390x__)
390 return ARCH_S390X;
391#endif
392 return ARCH_UNKNOWN;
393}
394
395/* Uses btf_decl_tag attributes to describe the expected test
396 * behavior, see bpf_misc.h for detailed description of each attribute
397 * and attribute combinations.
398 */
399static int parse_test_spec(struct test_loader *tester,
400 struct bpf_object *obj,
401 struct bpf_program *prog,
402 struct test_spec *spec)
403{
404 const char *description = NULL;
405 bool has_unpriv_result = false;
406 bool has_unpriv_retval = false;
407 bool unpriv_xlated_on_next_line = true;
408 bool xlated_on_next_line = true;
409 bool unpriv_jit_on_next_line;
410 bool jit_on_next_line;
411 bool stderr_on_next_line = true;
412 bool unpriv_stderr_on_next_line = true;
413 bool stdout_on_next_line = true;
414 bool unpriv_stdout_on_next_line = true;
415 bool collect_jit = false;
416 const char **tags = NULL;
417 int func_id, i, nr_tags;
418 int err = 0;
419 u32 arch_mask = 0;
420 u32 load_mask = 0;
421 struct btf *btf;
422 enum arch arch;
423
424 memset(spec, 0, sizeof(*spec));
425
426 spec->prog_name = bpf_program__name(prog);
427 spec->prog_flags = testing_prog_flags();
428
429 btf = bpf_object__btf(obj);
430 if (!btf) {
431 ASSERT_FAIL("BPF object has no BTF");
432 return -EINVAL;
433 }
434
435 func_id = btf__find_by_name_kind(btf, spec->prog_name, BTF_KIND_FUNC);
436 if (func_id < 0) {
437 ASSERT_FAIL("failed to find FUNC BTF type for '%s'", spec->prog_name);
438 return -EINVAL;
439 }
440
441 tags = collect_decl_tags(btf, func_id, &nr_tags);
442 if (IS_ERR(tags))
443 return PTR_ERR(tags);
444
445 for (i = 0; i < nr_tags; i++) {
446 const char *s, *val, *msg;
447 bool clear;
448 int flags;
449
450 s = skip_decl_tag_pfx(tags[i]);
451 if (!s)
452 continue;
453 if ((val = str_has_pfx(s, "test_description="))) {
454 description = val;
455 } else if (strcmp(s, "test_expect_failure") == 0) {
456 spec->priv.expect_failure = true;
457 spec->mode_mask |= PRIV;
458 } else if (strcmp(s, "test_expect_success") == 0) {
459 spec->priv.expect_failure = false;
460 spec->mode_mask |= PRIV;
461 } else if (strcmp(s, "test_expect_failure_unpriv") == 0) {
462 spec->unpriv.expect_failure = true;
463 spec->mode_mask |= UNPRIV;
464 has_unpriv_result = true;
465 } else if (strcmp(s, "test_expect_success_unpriv") == 0) {
466 spec->unpriv.expect_failure = false;
467 spec->mode_mask |= UNPRIV;
468 has_unpriv_result = true;
469 } else if (strcmp(s, "test_auxiliary") == 0) {
470 spec->auxiliary = true;
471 spec->mode_mask |= PRIV;
472 } else if (strcmp(s, "test_auxiliary_unpriv") == 0) {
473 spec->auxiliary = true;
474 spec->mode_mask |= UNPRIV;
475 } else if ((msg = str_has_pfx(s, "test_expect_msg="))) {
476 err = push_msg(msg, false, &spec->priv.expect_msgs);
477 if (err)
478 goto cleanup;
479 spec->mode_mask |= PRIV;
480 } else if ((msg = str_has_pfx(s, "test_expect_not_msg="))) {
481 err = push_msg(msg, true, &spec->priv.expect_msgs);
482 if (err)
483 goto cleanup;
484 spec->mode_mask |= PRIV;
485 } else if ((msg = str_has_pfx(s, "test_expect_msg_unpriv="))) {
486 err = push_msg(msg, false, &spec->unpriv.expect_msgs);
487 if (err)
488 goto cleanup;
489 spec->mode_mask |= UNPRIV;
490 } else if ((msg = str_has_pfx(s, "test_expect_not_msg_unpriv="))) {
491 err = push_msg(msg, true, &spec->unpriv.expect_msgs);
492 if (err)
493 goto cleanup;
494 spec->mode_mask |= UNPRIV;
495 } else if ((msg = str_has_pfx(s, "test_jited="))) {
496 if (arch_mask == 0) {
497 PRINT_FAIL("__jited used before __arch_*");
498 goto cleanup;
499 }
500 if (collect_jit) {
501 err = push_disasm_msg(msg, &jit_on_next_line,
502 &spec->priv.jited);
503 if (err)
504 goto cleanup;
505 spec->mode_mask |= PRIV;
506 }
507 } else if ((msg = str_has_pfx(s, "test_jited_unpriv="))) {
508 if (arch_mask == 0) {
509 PRINT_FAIL("__unpriv_jited used before __arch_*");
510 goto cleanup;
511 }
512 if (collect_jit) {
513 err = push_disasm_msg(msg, &unpriv_jit_on_next_line,
514 &spec->unpriv.jited);
515 if (err)
516 goto cleanup;
517 spec->mode_mask |= UNPRIV;
518 }
519 } else if ((msg = str_has_pfx(s, "test_expect_xlated="))) {
520 err = push_disasm_msg(msg, &xlated_on_next_line,
521 &spec->priv.expect_xlated);
522 if (err)
523 goto cleanup;
524 spec->mode_mask |= PRIV;
525 } else if ((msg = str_has_pfx(s, "test_expect_xlated_unpriv="))) {
526 err = push_disasm_msg(msg, &unpriv_xlated_on_next_line,
527 &spec->unpriv.expect_xlated);
528 if (err)
529 goto cleanup;
530 spec->mode_mask |= UNPRIV;
531 } else if ((val = str_has_pfx(s, "test_retval="))) {
532 err = parse_retval(val, &spec->priv.retval, "__retval");
533 if (err)
534 goto cleanup;
535 spec->priv.execute = true;
536 spec->mode_mask |= PRIV;
537 } else if ((val = str_has_pfx(s, "test_retval_unpriv="))) {
538 err = parse_retval(val, &spec->unpriv.retval, "__retval_unpriv");
539 if (err)
540 goto cleanup;
541 spec->mode_mask |= UNPRIV;
542 spec->unpriv.execute = true;
543 has_unpriv_retval = true;
544 } else if ((val = str_has_pfx(s, "test_log_level="))) {
545 err = parse_int(val, &spec->log_level, "test log level");
546 if (err)
547 goto cleanup;
548 } else if ((val = str_has_pfx(s, "test_prog_flags="))) {
549 clear = val[0] == '!';
550 if (clear)
551 val++;
552
553 if (strcmp(val, "BPF_F_STRICT_ALIGNMENT") == 0) {
554 update_flags(&spec->prog_flags, BPF_F_STRICT_ALIGNMENT, clear);
555 } else if (strcmp(val, "BPF_F_ANY_ALIGNMENT") == 0) {
556 update_flags(&spec->prog_flags, BPF_F_ANY_ALIGNMENT, clear);
557 } else if (strcmp(val, "BPF_F_TEST_RND_HI32") == 0) {
558 update_flags(&spec->prog_flags, BPF_F_TEST_RND_HI32, clear);
559 } else if (strcmp(val, "BPF_F_TEST_STATE_FREQ") == 0) {
560 update_flags(&spec->prog_flags, BPF_F_TEST_STATE_FREQ, clear);
561 } else if (strcmp(val, "BPF_F_SLEEPABLE") == 0) {
562 update_flags(&spec->prog_flags, BPF_F_SLEEPABLE, clear);
563 } else if (strcmp(val, "BPF_F_XDP_HAS_FRAGS") == 0) {
564 update_flags(&spec->prog_flags, BPF_F_XDP_HAS_FRAGS, clear);
565 } else if (strcmp(val, "BPF_F_TEST_REG_INVARIANTS") == 0) {
566 update_flags(&spec->prog_flags, BPF_F_TEST_REG_INVARIANTS, clear);
567 } else /* assume numeric value */ {
568 err = parse_int(val, &flags, "test prog flags");
569 if (err)
570 goto cleanup;
571 update_flags(&spec->prog_flags, flags, clear);
572 }
573 } else if ((val = str_has_pfx(s, "test_arch="))) {
574 if (strcmp(val, "X86_64") == 0) {
575 arch = ARCH_X86_64;
576 } else if (strcmp(val, "ARM64") == 0) {
577 arch = ARCH_ARM64;
578 } else if (strcmp(val, "RISCV64") == 0) {
579 arch = ARCH_RISCV64;
580 } else if (strcmp(val, "s390x") == 0) {
581 arch = ARCH_S390X;
582 } else {
583 PRINT_FAIL("bad arch spec: '%s'\n", val);
584 err = -EINVAL;
585 goto cleanup;
586 }
587 arch_mask |= arch;
588 collect_jit = get_current_arch() == arch;
589 unpriv_jit_on_next_line = true;
590 jit_on_next_line = true;
591 } else if ((val = str_has_pfx(s, "test_btf_path="))) {
592 spec->btf_custom_path = val;
593 } else if ((val = str_has_pfx(s, "test_caps_unpriv="))) {
594 err = parse_caps(val, &spec->unpriv.caps, "test caps");
595 if (err)
596 goto cleanup;
597 spec->mode_mask |= UNPRIV;
598 } else if ((val = str_has_pfx(s, "load_mode="))) {
599 if (strcmp(val, "jited") == 0) {
600 load_mask = JITED;
601 } else if (strcmp(val, "no_jited") == 0) {
602 load_mask = NO_JITED;
603 } else {
604 PRINT_FAIL("bad load spec: '%s'", val);
605 err = -EINVAL;
606 goto cleanup;
607 }
608 } else if ((msg = str_has_pfx(s, "test_expect_stderr="))) {
609 err = push_disasm_msg(msg, &stderr_on_next_line,
610 &spec->priv.stderr);
611 if (err)
612 goto cleanup;
613 } else if ((msg = str_has_pfx(s, "test_expect_stderr_unpriv="))) {
614 err = push_disasm_msg(msg, &unpriv_stderr_on_next_line,
615 &spec->unpriv.stderr);
616 if (err)
617 goto cleanup;
618 } else if ((msg = str_has_pfx(s, "test_expect_stdout="))) {
619 err = push_disasm_msg(msg, &stdout_on_next_line,
620 &spec->priv.stdout);
621 if (err)
622 goto cleanup;
623 } else if ((msg = str_has_pfx(s, "test_expect_stdout_unpriv="))) {
624 err = push_disasm_msg(msg, &unpriv_stdout_on_next_line,
625 &spec->unpriv.stdout);
626 if (err)
627 goto cleanup;
628 } else if ((val = str_has_pfx(s, "test_linear_size="))) {
629 switch (bpf_program__type(prog)) {
630 case BPF_PROG_TYPE_SCHED_ACT:
631 case BPF_PROG_TYPE_SCHED_CLS:
632 case BPF_PROG_TYPE_CGROUP_SKB:
633 err = parse_int(val, &spec->linear_sz, "test linear size");
634 if (err)
635 goto cleanup;
636 break;
637 default:
638 PRINT_FAIL("__linear_size for unsupported program type");
639 err = -EINVAL;
640 goto cleanup;
641 }
642 }
643 }
644
645 spec->arch_mask = arch_mask ?: -1;
646 spec->load_mask = load_mask ?: (JITED | NO_JITED);
647
648 if (spec->mode_mask == 0)
649 spec->mode_mask = PRIV;
650
651 if (spec->mode_mask & PRIV) {
652 spec->priv.name = strdup(spec->prog_name);
653 if (!spec->priv.name) {
654 PRINT_FAIL("failed to allocate memory for priv.name\n");
655 err = -ENOMEM;
656 goto cleanup;
657 }
658
659 if (description) {
660 spec->priv.description = strdup(description);
661 if (!spec->priv.description) {
662 PRINT_FAIL("failed to allocate memory for priv.description\n");
663 err = -ENOMEM;
664 goto cleanup;
665 }
666 }
667 }
668
669 if (spec->mode_mask & UNPRIV) {
670 int name_len = strlen(spec->prog_name);
671 const char *suffix = " @unpriv";
672 int suffix_len = strlen(suffix);
673 char *name;
674
675 name = malloc(name_len + suffix_len + 1);
676 if (!name) {
677 PRINT_FAIL("failed to allocate memory for unpriv.name\n");
678 err = -ENOMEM;
679 goto cleanup;
680 }
681
682 strcpy(name, spec->prog_name);
683 strcpy(&name[name_len], suffix);
684 spec->unpriv.name = name;
685
686 if (description) {
687 int descr_len = strlen(description);
688 char *descr;
689
690 descr = malloc(descr_len + suffix_len + 1);
691 if (!descr) {
692 PRINT_FAIL("failed to allocate memory for unpriv.description\n");
693 err = -ENOMEM;
694 goto cleanup;
695 }
696
697 strcpy(descr, description);
698 strcpy(&descr[descr_len], suffix);
699 spec->unpriv.description = descr;
700 }
701 }
702
703 if (spec->mode_mask & (PRIV | UNPRIV)) {
704 if (!has_unpriv_result)
705 spec->unpriv.expect_failure = spec->priv.expect_failure;
706
707 if (!has_unpriv_retval) {
708 spec->unpriv.retval = spec->priv.retval;
709 spec->unpriv.execute = spec->priv.execute;
710 }
711
712 if (spec->unpriv.expect_msgs.cnt == 0)
713 clone_msgs(&spec->priv.expect_msgs, &spec->unpriv.expect_msgs);
714 if (spec->unpriv.expect_xlated.cnt == 0)
715 clone_msgs(&spec->priv.expect_xlated, &spec->unpriv.expect_xlated);
716 if (spec->unpriv.jited.cnt == 0)
717 clone_msgs(&spec->priv.jited, &spec->unpriv.jited);
718 if (spec->unpriv.stderr.cnt == 0)
719 clone_msgs(&spec->priv.stderr, &spec->unpriv.stderr);
720 if (spec->unpriv.stdout.cnt == 0)
721 clone_msgs(&spec->priv.stdout, &spec->unpriv.stdout);
722 }
723
724 spec->valid = true;
725
726 free(tags);
727 return 0;
728
729cleanup:
730 free(tags);
731 free_test_spec(spec);
732 return err;
733}
734
735static void prepare_case(struct test_loader *tester,
736 struct test_spec *spec,
737 struct bpf_object *obj,
738 struct bpf_program *prog)
739{
740 int min_log_level = 0, prog_flags;
741
742 if (env.verbosity > VERBOSE_NONE)
743 min_log_level = 1;
744 if (env.verbosity > VERBOSE_VERY)
745 min_log_level = 2;
746
747 bpf_program__set_log_buf(prog, tester->log_buf, tester->log_buf_sz);
748
749 /* Make sure we set at least minimal log level, unless test requires
750 * even higher level already. Make sure to preserve independent log
751 * level 4 (verifier stats), though.
752 */
753 if ((spec->log_level & 3) < min_log_level)
754 bpf_program__set_log_level(prog, (spec->log_level & 4) | min_log_level);
755 else
756 bpf_program__set_log_level(prog, spec->log_level);
757
758 prog_flags = bpf_program__flags(prog);
759 bpf_program__set_flags(prog, prog_flags | spec->prog_flags);
760
761 tester->log_buf[0] = '\0';
762}
763
764static void emit_verifier_log(const char *log_buf, bool force)
765{
766 if (!force && env.verbosity == VERBOSE_NONE)
767 return;
768 fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n", log_buf);
769}
770
771static void emit_xlated(const char *xlated, bool force)
772{
773 if (!force && env.verbosity == VERBOSE_NONE)
774 return;
775 fprintf(stdout, "XLATED:\n=============\n%s=============\n", xlated);
776}
777
778static void emit_jited(const char *jited, bool force)
779{
780 if (!force && env.verbosity == VERBOSE_NONE)
781 return;
782 fprintf(stdout, "JITED:\n=============\n%s=============\n", jited);
783}
784
785static void emit_stderr(const char *stderr, bool force)
786{
787 if (!force && env.verbosity == VERBOSE_NONE)
788 return;
789 fprintf(stdout, "STDERR:\n=============\n%s=============\n", stderr);
790}
791
792static void emit_stdout(const char *bpf_stdout, bool force)
793{
794 if (!force && env.verbosity == VERBOSE_NONE)
795 return;
796 fprintf(stdout, "STDOUT:\n=============\n%s=============\n", bpf_stdout);
797}
798
799static const char *match_msg(struct expect_msg *msg, const char **log)
800{
801 const char *match = NULL;
802 regmatch_t reg_match[1];
803 int err;
804
805 if (!msg->is_regex) {
806 match = strstr(*log, msg->substr);
807 if (match)
808 *log = match + strlen(msg->substr);
809 } else {
810 err = regexec(&msg->regex, *log, 1, reg_match, 0);
811 if (err == 0) {
812 match = *log + reg_match[0].rm_so;
813 *log += reg_match[0].rm_eo;
814 }
815 }
816 return match;
817}
818
819static int count_lines(const char *start, const char *end)
820{
821 const char *tmp;
822 int n = 0;
823
824 for (tmp = start; tmp < end; ++tmp)
825 if (*tmp == '\n')
826 n++;
827 return n;
828}
829
830struct match {
831 const char *start;
832 const char *end;
833 int line;
834};
835
836/*
837 * Positive messages are matched sequentially, each next message
838 * is looked for starting from the end of a previous matched one.
839 */
840static void match_positive_msgs(const char *log, struct expected_msgs *msgs, struct match *matches)
841{
842 const char *prev_match;
843 int i, line;
844
845 prev_match = log;
846 line = 0;
847 for (i = 0; i < msgs->cnt; i++) {
848 struct expect_msg *msg = &msgs->patterns[i];
849 const char *match = NULL;
850
851 if (msg->negative)
852 continue;
853
854 match = match_msg(msg, &log);
855 if (match) {
856 line += count_lines(prev_match, match);
857 matches[i].start = match;
858 matches[i].end = log;
859 matches[i].line = line;
860 prev_match = match;
861 }
862 }
863}
864
865/*
866 * Each negative messages N located between positive messages P1 and P2
867 * is matched in the span P1.end .. P2.start. Consequently, negative messages
868 * are unordered within the span.
869 */
870static void match_negative_msgs(const char *log, struct expected_msgs *msgs, struct match *matches)
871{
872 const char *start = log, *end, *next, *match;
873 const char *log_end = log + strlen(log);
874 int i, j, next_positive;
875
876 for (i = 0; i < msgs->cnt; i++) {
877 struct expect_msg *msg = &msgs->patterns[i];
878
879 /* positive message bumps span start */
880 if (!msg->negative) {
881 start = matches[i].end ?: start;
882 continue;
883 }
884
885 /* count stride of negative patterns and adjust span end */
886 end = log_end;
887 for (next_positive = i + 1; next_positive < msgs->cnt; next_positive++) {
888 if (!msgs->patterns[next_positive].negative) {
889 end = matches[next_positive].start;
890 break;
891 }
892 }
893
894 /* try matching negative messages within identified span */
895 for (j = i; j < next_positive; j++) {
896 next = start;
897 match = match_msg(msg, &next);
898 if (match && next <= end) {
899 matches[j].start = match;
900 matches[j].end = next;
901 }
902 }
903
904 /* -1 to account for i++ */
905 i = next_positive - 1;
906 }
907}
908
909void validate_msgs(const char *log_buf, struct expected_msgs *msgs,
910 void (*emit_fn)(const char *buf, bool force))
911{
912 struct match matches[msgs->cnt];
913 struct match *prev_match = NULL;
914 int i, j;
915
916 memset(matches, 0, sizeof(*matches) * msgs->cnt);
917 match_positive_msgs(log_buf, msgs, matches);
918 match_negative_msgs(log_buf, msgs, matches);
919
920 for (i = 0; i < msgs->cnt; i++) {
921 struct expect_msg *msg = &msgs->patterns[i];
922 struct match *match = &matches[i];
923 const char *pat_status;
924 bool unexpected;
925 bool wrong_line;
926 bool no_match;
927
928 no_match = !msg->negative && !match->start;
929 wrong_line = !msg->negative &&
930 msg->on_next_line &&
931 prev_match && prev_match->line + 1 != match->line;
932 unexpected = msg->negative && match->start;
933 if (no_match || wrong_line || unexpected) {
934 PRINT_FAIL("expect_msg\n");
935 if (env.verbosity == VERBOSE_NONE)
936 emit_fn(log_buf, true /*force*/);
937 for (j = 0; j <= i; j++) {
938 msg = &msgs->patterns[j];
939 if (j < i)
940 pat_status = "MATCHED ";
941 else if (wrong_line)
942 pat_status = "WRONG LINE";
943 else if (no_match)
944 pat_status = "EXPECTED ";
945 else
946 pat_status = "UNEXPECTED";
947 msg = &msgs->patterns[j];
948 fprintf(stderr, "%s %s: '%s'\n",
949 pat_status,
950 msg->is_regex ? " REGEX" : "SUBSTR",
951 msg->substr);
952 }
953 if (wrong_line) {
954 fprintf(stderr,
955 "expecting match at line %d, actual match is at line %d\n",
956 prev_match->line + 1, match->line);
957 }
958 break;
959 }
960
961 if (!msg->negative)
962 prev_match = match;
963 }
964}
965
966struct cap_state {
967 __u64 old_caps;
968 bool initialized;
969};
970
971static int drop_capabilities(struct cap_state *caps)
972{
973 const __u64 caps_to_drop = (1ULL << CAP_SYS_ADMIN | 1ULL << CAP_NET_ADMIN |
974 1ULL << CAP_PERFMON | 1ULL << CAP_BPF);
975 int err;
976
977 err = cap_disable_effective(caps_to_drop, &caps->old_caps);
978 if (err) {
979 PRINT_FAIL("failed to drop capabilities: %i, %s\n", err, strerror(-err));
980 return err;
981 }
982
983 caps->initialized = true;
984 return 0;
985}
986
987static int restore_capabilities(struct cap_state *caps)
988{
989 int err;
990
991 if (!caps->initialized)
992 return 0;
993
994 err = cap_enable_effective(caps->old_caps, NULL);
995 if (err)
996 PRINT_FAIL("failed to restore capabilities: %i, %s\n", err, strerror(-err));
997 caps->initialized = false;
998 return err;
999}
1000
1001static bool can_execute_unpriv(struct test_loader *tester, struct test_spec *spec)
1002{
1003 if (sysctl_unpriv_disabled < 0)
1004 sysctl_unpriv_disabled = get_unpriv_disabled() ? 1 : 0;
1005 if (sysctl_unpriv_disabled)
1006 return false;
1007 if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS)
1008 return false;
1009 return true;
1010}
1011
1012static bool is_unpriv_capable_map(struct bpf_map *map)
1013{
1014 enum bpf_map_type type;
1015 __u32 flags;
1016
1017 type = bpf_map__type(map);
1018
1019 switch (type) {
1020 case BPF_MAP_TYPE_HASH:
1021 case BPF_MAP_TYPE_PERCPU_HASH:
1022 case BPF_MAP_TYPE_HASH_OF_MAPS:
1023 flags = bpf_map__map_flags(map);
1024 return !(flags & BPF_F_ZERO_SEED);
1025 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
1026 case BPF_MAP_TYPE_ARRAY:
1027 case BPF_MAP_TYPE_RINGBUF:
1028 case BPF_MAP_TYPE_PROG_ARRAY:
1029 case BPF_MAP_TYPE_CGROUP_ARRAY:
1030 case BPF_MAP_TYPE_PERCPU_ARRAY:
1031 case BPF_MAP_TYPE_USER_RINGBUF:
1032 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1033 case BPF_MAP_TYPE_CGROUP_STORAGE:
1034 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1035 return true;
1036 default:
1037 return false;
1038 }
1039}
1040
1041static int do_prog_test_run(int fd_prog, int *retval, bool empty_opts, int linear_sz)
1042{
1043 __u8 tmp_out[TEST_DATA_LEN << 2] = {};
1044 __u8 tmp_in[TEST_DATA_LEN] = {};
1045 struct __sk_buff ctx = {};
1046 int err, saved_errno;
1047 LIBBPF_OPTS(bpf_test_run_opts, topts,
1048 .data_in = tmp_in,
1049 .data_size_in = sizeof(tmp_in),
1050 .data_out = tmp_out,
1051 .data_size_out = sizeof(tmp_out),
1052 .repeat = 1,
1053 );
1054
1055 if (linear_sz) {
1056 ctx.data_end = linear_sz;
1057 topts.ctx_in = &ctx;
1058 topts.ctx_size_in = sizeof(ctx);
1059 }
1060
1061 if (empty_opts) {
1062 memset(&topts, 0, sizeof(struct bpf_test_run_opts));
1063 topts.sz = sizeof(struct bpf_test_run_opts);
1064 }
1065 err = bpf_prog_test_run_opts(fd_prog, &topts);
1066 saved_errno = errno;
1067
1068 if (err) {
1069 PRINT_FAIL("FAIL: Unexpected bpf_prog_test_run error: %d (%s) ",
1070 saved_errno, strerror(saved_errno));
1071 return err;
1072 }
1073
1074 ASSERT_OK(0, "bpf_prog_test_run");
1075 *retval = topts.retval;
1076
1077 return 0;
1078}
1079
1080static bool should_do_test_run(struct test_spec *spec, struct test_subspec *subspec)
1081{
1082 if (!subspec->execute)
1083 return false;
1084
1085 if (subspec->expect_failure)
1086 return false;
1087
1088 if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS) {
1089 if (env.verbosity != VERBOSE_NONE)
1090 printf("alignment prevents execution\n");
1091 return false;
1092 }
1093
1094 return true;
1095}
1096
1097/* Get a disassembly of BPF program after verifier applies all rewrites */
1098static int get_xlated_program_text(int prog_fd, char *text, size_t text_sz)
1099{
1100 struct bpf_insn *insn_start = NULL, *insn, *insn_end;
1101 __u32 insns_cnt = 0, i;
1102 char buf[64];
1103 FILE *out = NULL;
1104 int err;
1105
1106 err = get_xlated_program(prog_fd, &insn_start, &insns_cnt);
1107 if (!ASSERT_OK(err, "get_xlated_program"))
1108 goto out;
1109 out = fmemopen(text, text_sz, "w");
1110 if (!ASSERT_OK_PTR(out, "open_memstream"))
1111 goto out;
1112 insn_end = insn_start + insns_cnt;
1113 insn = insn_start;
1114 while (insn < insn_end) {
1115 i = insn - insn_start;
1116 insn = disasm_insn(insn, buf, sizeof(buf));
1117 fprintf(out, "%d: %s\n", i, buf);
1118 }
1119 fflush(out);
1120
1121out:
1122 free(insn_start);
1123 if (out)
1124 fclose(out);
1125 return err;
1126}
1127
1128/* Read the bpf stream corresponding to the stream_id */
1129static int get_stream(int stream_id, int prog_fd, char *text, size_t text_sz)
1130{
1131 LIBBPF_OPTS(bpf_prog_stream_read_opts, ropts);
1132 int ret;
1133
1134 ret = bpf_prog_stream_read(prog_fd, stream_id, text, text_sz, &ropts);
1135 ASSERT_GT(ret, 0, "stream read");
1136 text[ret] = '\0';
1137
1138 return ret;
1139}
1140
1141/* this function is forced noinline and has short generic name to look better
1142 * in test_progs output (in case of a failure)
1143 */
1144static noinline
1145void run_subtest(struct test_loader *tester,
1146 struct bpf_object_open_opts *open_opts,
1147 const void *obj_bytes,
1148 size_t obj_byte_cnt,
1149 struct test_spec *specs,
1150 struct test_spec *spec,
1151 bool unpriv)
1152{
1153 struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv;
1154 int current_runtime = is_jit_enabled() ? JITED : NO_JITED;
1155 struct bpf_program *tprog = NULL, *tprog_iter;
1156 struct bpf_link *link, *links[32] = {};
1157 struct test_spec *spec_iter;
1158 struct cap_state caps = {};
1159 struct bpf_object *tobj;
1160 struct bpf_map *map;
1161 int retval, err, i;
1162 int links_cnt = 0;
1163 bool should_load;
1164
1165 if (!test__start_subtest_with_desc(subspec->name, subspec->description))
1166 return;
1167
1168 if ((get_current_arch() & spec->arch_mask) == 0) {
1169 test__skip();
1170 return;
1171 }
1172
1173 if ((current_runtime & spec->load_mask) == 0) {
1174 test__skip();
1175 return;
1176 }
1177
1178 if (unpriv) {
1179 if (!can_execute_unpriv(tester, spec)) {
1180 test__skip();
1181 test__end_subtest();
1182 return;
1183 }
1184 if (drop_capabilities(&caps)) {
1185 test__end_subtest();
1186 return;
1187 }
1188 if (subspec->caps) {
1189 err = cap_enable_effective(subspec->caps, NULL);
1190 if (err) {
1191 PRINT_FAIL("failed to set capabilities: %i, %s\n", err, strerror(-err));
1192 goto subtest_cleanup;
1193 }
1194 }
1195 }
1196
1197 /* Implicitly reset to NULL if next test case doesn't specify */
1198 open_opts->btf_custom_path = spec->btf_custom_path;
1199
1200 tobj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, open_opts);
1201 if (!ASSERT_OK_PTR(tobj, "obj_open_mem")) /* shouldn't happen */
1202 goto subtest_cleanup;
1203
1204 i = 0;
1205 bpf_object__for_each_program(tprog_iter, tobj) {
1206 spec_iter = &specs[i++];
1207 should_load = false;
1208
1209 if (spec_iter->valid) {
1210 if (strcmp(bpf_program__name(tprog_iter), spec->prog_name) == 0) {
1211 tprog = tprog_iter;
1212 should_load = true;
1213 }
1214
1215 if (spec_iter->auxiliary &&
1216 spec_iter->mode_mask & (unpriv ? UNPRIV : PRIV))
1217 should_load = true;
1218 }
1219
1220 bpf_program__set_autoload(tprog_iter, should_load);
1221 }
1222
1223 prepare_case(tester, spec, tobj, tprog);
1224
1225 /* By default bpf_object__load() automatically creates all
1226 * maps declared in the skeleton. Some map types are only
1227 * allowed in priv mode. Disable autoload for such maps in
1228 * unpriv mode.
1229 */
1230 bpf_object__for_each_map(map, tobj)
1231 bpf_map__set_autocreate(map, !unpriv || is_unpriv_capable_map(map));
1232
1233 err = bpf_object__load(tobj);
1234 if (subspec->expect_failure) {
1235 if (!ASSERT_ERR(err, "unexpected_load_success")) {
1236 emit_verifier_log(tester->log_buf, false /*force*/);
1237 goto tobj_cleanup;
1238 }
1239 } else {
1240 if (!ASSERT_OK(err, "unexpected_load_failure")) {
1241 emit_verifier_log(tester->log_buf, true /*force*/);
1242 goto tobj_cleanup;
1243 }
1244 }
1245 emit_verifier_log(tester->log_buf, false /*force*/);
1246 validate_msgs(tester->log_buf, &subspec->expect_msgs, emit_verifier_log);
1247
1248 /* Restore capabilities because the kernel will silently ignore requests
1249 * for program info (such as xlated program text) if we are not
1250 * bpf-capable. Also, for some reason test_verifier executes programs
1251 * with all capabilities restored. Do the same here.
1252 */
1253 if (restore_capabilities(&caps))
1254 goto tobj_cleanup;
1255
1256 if (subspec->expect_xlated.cnt) {
1257 err = get_xlated_program_text(bpf_program__fd(tprog),
1258 tester->log_buf, tester->log_buf_sz);
1259 if (err)
1260 goto tobj_cleanup;
1261 emit_xlated(tester->log_buf, false /*force*/);
1262 validate_msgs(tester->log_buf, &subspec->expect_xlated, emit_xlated);
1263 }
1264
1265 if (subspec->jited.cnt) {
1266 err = get_jited_program_text(bpf_program__fd(tprog),
1267 tester->log_buf, tester->log_buf_sz);
1268 if (err == -EOPNOTSUPP) {
1269 printf("%s:SKIP: jited programs disassembly is not supported,\n", __func__);
1270 printf("%s:SKIP: tests are built w/o LLVM development libs\n", __func__);
1271 test__skip();
1272 goto tobj_cleanup;
1273 }
1274 if (!ASSERT_EQ(err, 0, "get_jited_program_text"))
1275 goto tobj_cleanup;
1276 emit_jited(tester->log_buf, false /*force*/);
1277 validate_msgs(tester->log_buf, &subspec->jited, emit_jited);
1278 }
1279
1280 if (should_do_test_run(spec, subspec)) {
1281 /* Do bpf_map__attach_struct_ops() for each struct_ops map.
1282 * This should trigger bpf_struct_ops->reg callback on kernel side.
1283 */
1284 bpf_object__for_each_map(map, tobj) {
1285 if (!bpf_map__autocreate(map) ||
1286 bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
1287 continue;
1288 if (links_cnt >= ARRAY_SIZE(links)) {
1289 PRINT_FAIL("too many struct_ops maps");
1290 goto tobj_cleanup;
1291 }
1292 link = bpf_map__attach_struct_ops(map);
1293 if (!link) {
1294 PRINT_FAIL("bpf_map__attach_struct_ops failed for map %s: err=%d\n",
1295 bpf_map__name(map), -errno);
1296 goto tobj_cleanup;
1297 }
1298 links[links_cnt++] = link;
1299 }
1300
1301 if (tester->pre_execution_cb) {
1302 err = tester->pre_execution_cb(tobj);
1303 if (err) {
1304 PRINT_FAIL("pre_execution_cb failed: %d\n", err);
1305 goto tobj_cleanup;
1306 }
1307 }
1308
1309 err = do_prog_test_run(bpf_program__fd(tprog), &retval,
1310 bpf_program__type(tprog) == BPF_PROG_TYPE_SYSCALL ? true : false,
1311 spec->linear_sz);
1312 if (!err && retval != subspec->retval && subspec->retval != POINTER_VALUE) {
1313 PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval);
1314 goto tobj_cleanup;
1315 }
1316
1317 if (subspec->stderr.cnt) {
1318 err = get_stream(2, bpf_program__fd(tprog),
1319 tester->log_buf, tester->log_buf_sz);
1320 if (err <= 0) {
1321 PRINT_FAIL("Unexpected retval from get_stream(): %d, errno = %d\n",
1322 err, errno);
1323 goto tobj_cleanup;
1324 }
1325 emit_stderr(tester->log_buf, false /*force*/);
1326 validate_msgs(tester->log_buf, &subspec->stderr, emit_stderr);
1327 }
1328
1329 if (subspec->stdout.cnt) {
1330 err = get_stream(1, bpf_program__fd(tprog),
1331 tester->log_buf, tester->log_buf_sz);
1332 if (err <= 0) {
1333 PRINT_FAIL("Unexpected retval from get_stream(): %d, errno = %d\n",
1334 err, errno);
1335 goto tobj_cleanup;
1336 }
1337 emit_stdout(tester->log_buf, false /*force*/);
1338 validate_msgs(tester->log_buf, &subspec->stdout, emit_stdout);
1339 }
1340
1341 /* redo bpf_map__attach_struct_ops for each test */
1342 while (links_cnt > 0)
1343 bpf_link__destroy(links[--links_cnt]);
1344 }
1345
1346tobj_cleanup:
1347 while (links_cnt > 0)
1348 bpf_link__destroy(links[--links_cnt]);
1349 bpf_object__close(tobj);
1350subtest_cleanup:
1351 test__end_subtest();
1352 restore_capabilities(&caps);
1353}
1354
1355static void process_subtest(struct test_loader *tester,
1356 const char *skel_name,
1357 skel_elf_bytes_fn elf_bytes_factory)
1358{
1359 LIBBPF_OPTS(bpf_object_open_opts, open_opts, .object_name = skel_name);
1360 struct test_spec *specs = NULL;
1361 struct bpf_object *obj = NULL;
1362 struct bpf_program *prog;
1363 const void *obj_bytes;
1364 int err, i, nr_progs;
1365 size_t obj_byte_cnt;
1366
1367 if (tester_init(tester) < 0)
1368 return; /* failed to initialize tester */
1369
1370 obj_bytes = elf_bytes_factory(&obj_byte_cnt);
1371 obj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, &open_opts);
1372 if (!ASSERT_OK_PTR(obj, "obj_open_mem"))
1373 return;
1374
1375 nr_progs = 0;
1376 bpf_object__for_each_program(prog, obj)
1377 ++nr_progs;
1378
1379 specs = calloc(nr_progs, sizeof(struct test_spec));
1380 if (!ASSERT_OK_PTR(specs, "specs_alloc"))
1381 return;
1382
1383 i = 0;
1384 bpf_object__for_each_program(prog, obj) {
1385 /* ignore tests for which we can't derive test specification */
1386 err = parse_test_spec(tester, obj, prog, &specs[i++]);
1387 if (err)
1388 PRINT_FAIL("Can't parse test spec for program '%s'\n",
1389 bpf_program__name(prog));
1390 }
1391
1392 i = 0;
1393 bpf_object__for_each_program(prog, obj) {
1394 struct test_spec *spec = &specs[i++];
1395
1396 if (!spec->valid || spec->auxiliary)
1397 continue;
1398
1399 if (spec->mode_mask & PRIV)
1400 run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt,
1401 specs, spec, false);
1402 if (spec->mode_mask & UNPRIV)
1403 run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt,
1404 specs, spec, true);
1405
1406 }
1407
1408 for (i = 0; i < nr_progs; ++i)
1409 free_test_spec(&specs[i]);
1410 free(specs);
1411 bpf_object__close(obj);
1412}
1413
1414void test_loader__run_subtests(struct test_loader *tester,
1415 const char *skel_name,
1416 skel_elf_bytes_fn elf_bytes_factory)
1417{
1418 /* see comment in run_subtest() for why we do this function nesting */
1419 process_subtest(tester, skel_name, elf_bytes_factory);
1420}