Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <linux/kconfig.h>
4#include <linux/kernel.h>
5#include <linux/rbtree.h>
6#include <linux/types.h>
7#include <linux/zalloc.h>
8#include <inttypes.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <stdio.h>
12#include <string.h>
13#include <sys/param.h>
14#include <sys/utsname.h>
15#include <perf/cpumap.h>
16#include <perf/evlist.h>
17#include <perf/mmap.h>
18
19#include "debug.h"
20#include "dso.h"
21#include "env.h"
22#include "parse-events.h"
23#include "evlist.h"
24#include "evsel.h"
25#include "thread_map.h"
26#include "machine.h"
27#include "map.h"
28#include "symbol.h"
29#include "event.h"
30#include "record.h"
31#include "util/mmap.h"
32#include "util/string2.h"
33#include "util/synthetic-events.h"
34#include "util/util.h"
35#include "thread.h"
36
37#include "tests.h"
38
39#include <linux/ctype.h>
40
41#define BUFSZ 1024
42#define READLEN 128
43
44struct tested_section {
45 struct rb_node rb_node;
46 u64 addr;
47 char *path;
48};
49
50static bool tested_code_insert_or_exists(const char *path, u64 addr,
51 struct rb_root *tested_sections)
52{
53 struct rb_node **node = &tested_sections->rb_node;
54 struct rb_node *parent = NULL;
55 struct tested_section *data;
56
57 while (*node) {
58 int cmp;
59
60 parent = *node;
61 data = rb_entry(*node, struct tested_section, rb_node);
62 cmp = strcmp(path, data->path);
63 if (!cmp) {
64 if (addr < data->addr)
65 cmp = -1;
66 else if (addr > data->addr)
67 cmp = 1;
68 else
69 return true; /* already tested */
70 }
71
72 if (cmp < 0)
73 node = &(*node)->rb_left;
74 else
75 node = &(*node)->rb_right;
76 }
77
78 data = zalloc(sizeof(*data));
79 if (!data)
80 return true;
81
82 data->addr = addr;
83 data->path = strdup(path);
84 if (!data->path) {
85 free(data);
86 return true;
87 }
88 rb_link_node(&data->rb_node, parent, node);
89 rb_insert_color(&data->rb_node, tested_sections);
90 return false;
91}
92
93static void tested_sections__free(struct rb_root *root)
94{
95 while (!RB_EMPTY_ROOT(root)) {
96 struct rb_node *node = rb_first(root);
97 struct tested_section *ts = rb_entry(node,
98 struct tested_section,
99 rb_node);
100
101 rb_erase(node, root);
102 free(ts->path);
103 free(ts);
104 }
105}
106
107static size_t read_objdump_chunk(const char **line, unsigned char **buf,
108 size_t *buf_len)
109{
110 size_t bytes_read = 0;
111 unsigned char *chunk_start = *buf;
112
113 /* Read bytes */
114 while (*buf_len > 0) {
115 char c1, c2;
116
117 /* Get 2 hex digits */
118 c1 = *(*line)++;
119 if (!isxdigit(c1))
120 break;
121 c2 = *(*line)++;
122 if (!isxdigit(c2))
123 break;
124
125 /* Store byte and advance buf */
126 **buf = (hex(c1) << 4) | hex(c2);
127 (*buf)++;
128 (*buf_len)--;
129 bytes_read++;
130
131 /* End of chunk? */
132 if (isspace(**line))
133 break;
134 }
135
136 /*
137 * objdump will display raw insn as LE if code endian
138 * is LE and bytes_per_chunk > 1. In that case reverse
139 * the chunk we just read.
140 *
141 * see disassemble_bytes() at binutils/objdump.c for details
142 * how objdump chooses display endian)
143 */
144 if (bytes_read > 1 && !host_is_bigendian()) {
145 unsigned char *chunk_end = chunk_start + bytes_read - 1;
146 unsigned char tmp;
147
148 while (chunk_start < chunk_end) {
149 tmp = *chunk_start;
150 *chunk_start = *chunk_end;
151 *chunk_end = tmp;
152 chunk_start++;
153 chunk_end--;
154 }
155 }
156
157 return bytes_read;
158}
159
160static size_t read_objdump_line(const char *line, unsigned char *buf,
161 size_t buf_len)
162{
163 const char *p;
164 size_t ret, bytes_read = 0;
165
166 /* Skip to a colon */
167 p = strchr(line, ':');
168 if (!p)
169 return 0;
170 p++;
171
172 /* Skip initial spaces */
173 while (*p) {
174 if (!isspace(*p))
175 break;
176 p++;
177 }
178
179 do {
180 ret = read_objdump_chunk(&p, &buf, &buf_len);
181 bytes_read += ret;
182 p++;
183 } while (ret > 0);
184
185 /* return number of successfully read bytes */
186 return bytes_read;
187}
188
189static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr)
190{
191 char *line = NULL;
192 size_t line_len, off_last = 0;
193 ssize_t ret;
194 int err = 0;
195 u64 addr, last_addr = start_addr;
196
197 while (off_last < *len) {
198 size_t off, read_bytes, written_bytes;
199 unsigned char tmp[BUFSZ];
200
201 ret = getline(&line, &line_len, f);
202 if (feof(f))
203 break;
204 if (ret < 0) {
205 pr_debug("getline failed\n");
206 err = -1;
207 break;
208 }
209
210 /* read objdump data into temporary buffer */
211 read_bytes = read_objdump_line(line, tmp, sizeof(tmp));
212 if (!read_bytes)
213 continue;
214
215 if (sscanf(line, "%"PRIx64, &addr) != 1)
216 continue;
217 if (addr < last_addr) {
218 pr_debug("addr going backwards, read beyond section?\n");
219 break;
220 }
221 last_addr = addr;
222
223 /* copy it from temporary buffer to 'buf' according
224 * to address on current objdump line */
225 off = addr - start_addr;
226 if (off >= *len)
227 break;
228 written_bytes = MIN(read_bytes, *len - off);
229 memcpy(buf + off, tmp, written_bytes);
230 off_last = off + written_bytes;
231 }
232
233 /* len returns number of bytes that could not be read */
234 *len -= off_last;
235
236 free(line);
237
238 return err;
239}
240
241/*
242 * Only gets GNU objdump version. Returns 0 for llvm-objdump.
243 */
244static int objdump_version(void)
245{
246 size_t line_len;
247 char cmd[PATH_MAX * 2];
248 char *line = NULL;
249 const char *fmt;
250 FILE *f;
251 int ret;
252
253 int version_tmp, version_num = 0;
254 char *version = 0, *token;
255
256 fmt = "%s --version";
257 ret = snprintf(cmd, sizeof(cmd), fmt, test_objdump_path);
258 if (ret <= 0 || (size_t)ret >= sizeof(cmd))
259 return -1;
260 /* Ignore objdump errors */
261 strcat(cmd, " 2>/dev/null");
262 f = popen(cmd, "r");
263 if (!f) {
264 pr_debug("popen failed\n");
265 return -1;
266 }
267 /* Get first line of objdump --version output */
268 ret = getline(&line, &line_len, f);
269 pclose(f);
270 if (ret < 0) {
271 pr_debug("getline failed\n");
272 return -1;
273 }
274
275 token = strsep(&line, " ");
276 if (token != NULL && !strcmp(token, "GNU")) {
277 // version is last part of first line of objdump --version output.
278 while ((token = strsep(&line, " ")))
279 version = token;
280
281 // Convert version into a format we can compare with
282 token = strsep(&version, ".");
283 version_num = atoi(token);
284 if (version_num)
285 version_num *= 10000;
286
287 token = strsep(&version, ".");
288 version_tmp = atoi(token);
289 if (token)
290 version_num += version_tmp * 100;
291
292 token = strsep(&version, ".");
293 version_tmp = atoi(token);
294 if (token)
295 version_num += version_tmp;
296 }
297
298 return version_num;
299}
300
301static int read_via_objdump(const char *filename, u64 addr, void *buf,
302 size_t len)
303{
304 u64 stop_address = addr + len;
305 struct utsname uname_buf;
306 char cmd[PATH_MAX * 2];
307 const char *fmt;
308 FILE *f;
309 int ret;
310
311 ret = uname(&uname_buf);
312 if (ret) {
313 pr_debug("uname failed\n");
314 return -1;
315 }
316
317 if (!strncmp(uname_buf.machine, "riscv", 5)) {
318 int version = objdump_version();
319
320 /* Default to this workaround if version parsing fails */
321 if (version < 0 || version > 24100) {
322 /*
323 * Starting at riscv objdump version 2.41, dumping in
324 * the middle of an instruction is not supported. riscv
325 * instructions are aligned along 2-byte intervals and
326 * can be either 2-bytes or 4-bytes. This makes it
327 * possible that the stop-address lands in the middle of
328 * a 4-byte instruction. Increase the stop_address by
329 * two to ensure an instruction is not cut in half, but
330 * leave the len as-is so only the expected number of
331 * bytes are collected.
332 */
333 stop_address += 2;
334 }
335 }
336
337 fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
338 ret = snprintf(cmd, sizeof(cmd), fmt, test_objdump_path, addr, stop_address,
339 filename);
340 if (ret <= 0 || (size_t)ret >= sizeof(cmd))
341 return -1;
342
343 pr_debug("Objdump command is: %s\n", cmd);
344
345 /* Ignore objdump errors */
346 strcat(cmd, " 2>/dev/null");
347
348 f = popen(cmd, "r");
349 if (!f) {
350 pr_debug("popen failed\n");
351 return -1;
352 }
353
354 ret = read_objdump_output(f, buf, &len, addr);
355 if (len) {
356 pr_debug("objdump read too few bytes: %zd\n", len);
357 if (!ret)
358 ret = len;
359 }
360
361 pclose(f);
362
363 return ret;
364}
365
366static void dump_buf(unsigned char *buf, size_t len)
367{
368 size_t i;
369
370 for (i = 0; i < len; i++) {
371 pr_debug("0x%02x ", buf[i]);
372 if (i % 16 == 15)
373 pr_debug("\n");
374 }
375 pr_debug("\n");
376}
377
378static int read_object_code(u64 addr, size_t len, u8 cpumode,
379 struct thread *thread,
380 struct rb_root *tested_sections)
381{
382 struct addr_location al;
383 unsigned char buf1[BUFSZ] = {0};
384 unsigned char buf2[BUFSZ] = {0};
385 size_t ret_len;
386 u64 objdump_addr;
387 u64 skip_addr;
388 const char *objdump_name;
389 char decomp_name[KMOD_DECOMP_LEN];
390 bool decomp = false;
391 int ret, err = 0;
392 struct dso *dso;
393
394 pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
395
396 addr_location__init(&al);
397 if (!thread__find_map(thread, cpumode, addr, &al) || !map__dso(al.map)) {
398 if (cpumode == PERF_RECORD_MISC_HYPERVISOR) {
399 pr_debug("Hypervisor address can not be resolved - skipping\n");
400 goto out;
401 }
402
403 pr_debug("thread__find_map failed\n");
404 err = -1;
405 goto out;
406 }
407 dso = map__dso(al.map);
408 pr_debug("File is: %s\n", dso__long_name(dso));
409
410 if (dso__symtab_type(dso) == DSO_BINARY_TYPE__KALLSYMS && !dso__is_kcore(dso)) {
411 pr_debug("Unexpected kernel address - skipping\n");
412 goto out;
413 }
414
415 /*
416 * Don't retest the same addresses. objdump struggles with kcore - try
417 * each map only once even if the address is different.
418 */
419 skip_addr = dso__is_kcore(dso) ? map__start(al.map) : al.addr;
420 if (tested_code_insert_or_exists(dso__long_name(dso), skip_addr,
421 tested_sections)) {
422 pr_debug("Already tested %s @ %#"PRIx64" - skipping\n",
423 dso__long_name(dso), skip_addr);
424 goto out;
425 }
426
427 pr_debug("On file address is: %#"PRIx64"\n", al.addr);
428
429 if (len > BUFSZ)
430 len = BUFSZ;
431
432 /* Do not go off the map */
433 if (addr + len > map__end(al.map))
434 len = map__end(al.map) - addr;
435
436 /*
437 * Some architectures (ex: powerpc) have stubs (trampolines) in kernel
438 * modules to manage long jumps. Check if the ip offset falls in stubs
439 * sections for kernel modules. And skip module address after text end
440 */
441 if (dso__is_kmod(dso) && al.addr > dso__text_end(dso)) {
442 pr_debug("skipping the module address %#"PRIx64" after text end\n", al.addr);
443 goto out;
444 }
445
446 /* Read the object code using perf */
447 ret_len = dso__data_read_offset(dso, maps__machine(thread__maps(thread)),
448 al.addr, buf1, len);
449 if (ret_len != len) {
450 pr_debug("dso__data_read_offset failed\n");
451 err = -1;
452 goto out;
453 }
454
455 /*
456 * Converting addresses for use by objdump requires more information.
457 * map__load() does that. See map__rip_2objdump() for details.
458 */
459 if (map__load(al.map)) {
460 err = -1;
461 goto out;
462 }
463
464 objdump_name = dso__long_name(dso);
465 if (dso__needs_decompress(dso)) {
466 if (dso__decompress_kmodule_path(dso, objdump_name,
467 decomp_name,
468 sizeof(decomp_name)) < 0) {
469 pr_debug("decompression failed\n");
470 err = -1;
471 goto out;
472 }
473
474 decomp = true;
475 objdump_name = decomp_name;
476 }
477
478 /* Read the object code using objdump */
479 objdump_addr = map__rip_2objdump(al.map, al.addr);
480 ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
481
482 if (decomp)
483 unlink(objdump_name);
484
485 if (ret > 0) {
486 /*
487 * The kernel maps are inaccurate - assume objdump is right in
488 * that case.
489 */
490 if (cpumode == PERF_RECORD_MISC_KERNEL ||
491 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) {
492 len -= ret;
493 if (len) {
494 pr_debug("Reducing len to %zu\n", len);
495 } else if (dso__is_kcore(dso)) {
496 /*
497 * objdump cannot handle very large segments
498 * that may be found in kcore.
499 */
500 pr_debug("objdump failed for kcore");
501 pr_debug(" - skipping\n");
502 } else {
503 err = -1;
504 }
505 goto out;
506 }
507 }
508 if (ret < 0) {
509 pr_debug("read_via_objdump failed\n");
510 err = -1;
511 goto out;
512 }
513
514 /* The results should be identical */
515 if (memcmp(buf1, buf2, len)) {
516 pr_debug("Bytes read differ from those read by objdump\n");
517 pr_debug("buf1 (dso):\n");
518 dump_buf(buf1, len);
519 pr_debug("buf2 (objdump):\n");
520 dump_buf(buf2, len);
521 err = -1;
522 goto out;
523 }
524 pr_debug("Bytes read match those read by objdump\n");
525out:
526 addr_location__exit(&al);
527 return err;
528}
529
530static int process_sample_event(struct machine *machine, struct evlist *evlist,
531 union perf_event *event,
532 struct rb_root *tested_sections)
533{
534 struct perf_sample sample;
535 struct thread *thread;
536 int ret;
537
538 perf_sample__init(&sample, /*all=*/false);
539 ret = evlist__parse_sample(evlist, event, &sample);
540 if (ret) {
541 pr_debug("evlist__parse_sample failed\n");
542 ret = -1;
543 goto out;
544 }
545
546 thread = machine__findnew_thread(machine, sample.pid, sample.tid);
547 if (!thread) {
548 pr_debug("machine__findnew_thread failed\n");
549 ret = -1;
550 goto out;
551 }
552
553 ret = read_object_code(sample.ip, READLEN, sample.cpumode, thread,
554 tested_sections);
555 thread__put(thread);
556out:
557 perf_sample__exit(&sample);
558 return ret;
559}
560
561static int process_event(struct machine *machine, struct evlist *evlist,
562 union perf_event *event, struct rb_root *tested_sections)
563{
564 if (event->header.type == PERF_RECORD_SAMPLE)
565 return process_sample_event(machine, evlist, event,
566 tested_sections);
567
568 if (event->header.type == PERF_RECORD_THROTTLE ||
569 event->header.type == PERF_RECORD_UNTHROTTLE)
570 return 0;
571
572 if (event->header.type < PERF_RECORD_MAX) {
573 int ret;
574
575 ret = machine__process_event(machine, event, NULL);
576 if (ret < 0)
577 pr_debug("machine__process_event failed, event type %u\n",
578 event->header.type);
579 return ret;
580 }
581
582 return 0;
583}
584
585static int process_events(struct machine *machine, struct evlist *evlist,
586 struct rb_root *tested_sections)
587{
588 union perf_event *event;
589 struct mmap *md;
590 int i, ret;
591
592 for (i = 0; i < evlist->core.nr_mmaps; i++) {
593 md = &evlist->mmap[i];
594 if (perf_mmap__read_init(&md->core) < 0)
595 continue;
596
597 while ((event = perf_mmap__read_event(&md->core)) != NULL) {
598 ret = process_event(machine, evlist, event, tested_sections);
599 perf_mmap__consume(&md->core);
600 if (ret < 0)
601 return ret;
602 }
603 perf_mmap__read_done(&md->core);
604 }
605 return 0;
606}
607
608static int comp(const void *a, const void *b)
609{
610 return *(int *)a - *(int *)b;
611}
612
613static void do_sort_something(void)
614{
615 int buf[40960], i;
616
617 for (i = 0; i < (int)ARRAY_SIZE(buf); i++)
618 buf[i] = ARRAY_SIZE(buf) - i - 1;
619
620 qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp);
621
622 for (i = 0; i < (int)ARRAY_SIZE(buf); i++) {
623 if (buf[i] != i) {
624 pr_debug("qsort failed\n");
625 break;
626 }
627 }
628}
629
630static void sort_something(void)
631{
632 int i;
633
634 for (i = 0; i < 10; i++)
635 do_sort_something();
636}
637
638static void syscall_something(void)
639{
640 int pipefd[2];
641 int i;
642
643 for (i = 0; i < 1000; i++) {
644 if (pipe(pipefd) < 0) {
645 pr_debug("pipe failed\n");
646 break;
647 }
648 close(pipefd[1]);
649 close(pipefd[0]);
650 }
651}
652
653static void fs_something(void)
654{
655 const char *test_file_name = "temp-perf-code-reading-test-file--";
656 FILE *f;
657 int i;
658
659 for (i = 0; i < 1000; i++) {
660 f = fopen(test_file_name, "w+");
661 if (f) {
662 fclose(f);
663 unlink(test_file_name);
664 }
665 }
666}
667
668static void do_something(void)
669{
670 fs_something();
671
672 sort_something();
673
674 syscall_something();
675}
676
677enum {
678 TEST_CODE_READING_OK,
679 TEST_CODE_READING_NO_VMLINUX,
680 TEST_CODE_READING_NO_KCORE,
681 TEST_CODE_READING_NO_ACCESS,
682 TEST_CODE_READING_NO_KERNEL_OBJ,
683};
684
685static int do_test_code_reading(bool try_kcore)
686{
687 struct machine *machine;
688 struct thread *thread;
689 struct record_opts opts = {
690 .mmap_pages = UINT_MAX,
691 .user_freq = UINT_MAX,
692 .user_interval = ULLONG_MAX,
693 .freq = 500,
694 .target = {
695 .uses_mmap = true,
696 },
697 };
698 struct rb_root tested_sections = RB_ROOT;
699 struct perf_thread_map *threads = NULL;
700 struct perf_cpu_map *cpus = NULL;
701 struct evlist *evlist = NULL;
702 struct evsel *evsel = NULL;
703 int err = -1, ret;
704 pid_t pid;
705 struct map *map;
706 bool have_vmlinux, have_kcore;
707 struct dso *dso;
708 const char *events[] = { "cpu-cycles", "cpu-cycles:u", "cpu-clock", "cpu-clock:u", NULL };
709 int evidx = 0;
710 struct perf_env host_env;
711
712 pid = getpid();
713
714 perf_env__init(&host_env);
715 machine = machine__new_host(&host_env);
716
717 ret = machine__create_kernel_maps(machine);
718 if (ret < 0) {
719 pr_debug("machine__create_kernel_maps failed\n");
720 goto out_err;
721 }
722
723 /* Force the use of kallsyms instead of vmlinux to try kcore */
724 if (try_kcore)
725 symbol_conf.kallsyms_name = "/proc/kallsyms";
726
727 /* Load kernel map */
728 map = machine__kernel_map(machine);
729 ret = map__load(map);
730 if (ret < 0) {
731 pr_debug("map__load failed\n");
732 goto out_err;
733 }
734 dso = map__dso(map);
735 have_vmlinux = dso__is_vmlinux(dso);
736 have_kcore = dso__is_kcore(dso);
737
738 /* 2nd time through we just try kcore */
739 if (try_kcore && !have_kcore)
740 return TEST_CODE_READING_NO_KCORE;
741
742 /* No point getting kernel events if there is no kernel object */
743 if (!have_vmlinux && !have_kcore)
744 evidx++;
745
746 threads = thread_map__new_by_tid(pid);
747 if (!threads) {
748 pr_debug("thread_map__new_by_tid failed\n");
749 goto out_err;
750 }
751
752 ret = perf_event__synthesize_thread_map(NULL, threads,
753 perf_event__process, machine,
754 true, false);
755 if (ret < 0) {
756 pr_debug("perf_event__synthesize_thread_map failed\n");
757 goto out_err;
758 }
759
760 thread = machine__findnew_thread(machine, pid, pid);
761 if (!thread) {
762 pr_debug("machine__findnew_thread failed\n");
763 goto out_put;
764 }
765
766 cpus = perf_cpu_map__new_online_cpus();
767 if (!cpus) {
768 pr_debug("perf_cpu_map__new failed\n");
769 goto out_put;
770 }
771
772 while (events[evidx]) {
773 const char *str;
774
775 evlist = evlist__new();
776 if (!evlist) {
777 pr_debug("evlist__new failed\n");
778 goto out_put;
779 }
780
781 perf_evlist__set_maps(&evlist->core, cpus, threads);
782
783 str = events[evidx];
784 pr_debug("Parsing event '%s'\n", str);
785 ret = parse_event(evlist, str);
786 if (ret < 0) {
787 pr_debug("parse_events failed\n");
788 goto out_put;
789 }
790
791 evlist__config(evlist, &opts, NULL);
792
793 evlist__for_each_entry(evlist, evsel) {
794 evsel->core.attr.comm = 1;
795 evsel->core.attr.disabled = 1;
796 evsel->core.attr.enable_on_exec = 0;
797 }
798
799 ret = evlist__open(evlist);
800 if (ret < 0) {
801 evidx++;
802
803 if (events[evidx] == NULL && verbose > 0) {
804 char errbuf[512];
805 evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
806 pr_debug("perf_evlist__open() failed!\n%s\n", errbuf);
807 }
808
809 perf_evlist__set_maps(&evlist->core, NULL, NULL);
810 evlist__delete(evlist);
811 evlist = NULL;
812 continue;
813 }
814 break;
815 }
816
817 if (events[evidx] == NULL)
818 goto out_put;
819
820 ret = evlist__mmap(evlist, UINT_MAX);
821 if (ret < 0) {
822 pr_debug("evlist__mmap failed\n");
823 goto out_put;
824 }
825
826 evlist__enable(evlist);
827
828 do_something();
829
830 evlist__disable(evlist);
831
832 ret = process_events(machine, evlist, &tested_sections);
833 if (ret < 0)
834 goto out_put;
835
836 if (!have_vmlinux && !have_kcore && !try_kcore)
837 err = TEST_CODE_READING_NO_KERNEL_OBJ;
838 else if (!have_vmlinux && !try_kcore)
839 err = TEST_CODE_READING_NO_VMLINUX;
840 else if (strstr(events[evidx], ":u"))
841 err = TEST_CODE_READING_NO_ACCESS;
842 else
843 err = TEST_CODE_READING_OK;
844out_put:
845 thread__put(thread);
846out_err:
847 evlist__delete(evlist);
848 perf_cpu_map__put(cpus);
849 perf_thread_map__put(threads);
850 machine__delete(machine);
851 perf_env__exit(&host_env);
852 tested_sections__free(&tested_sections);
853
854 return err;
855}
856
857static int test__code_reading(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
858{
859 int ret;
860
861 ret = do_test_code_reading(false);
862 if (!ret)
863 ret = do_test_code_reading(true);
864
865 switch (ret) {
866 case TEST_CODE_READING_OK:
867 return 0;
868 case TEST_CODE_READING_NO_VMLINUX:
869 pr_debug("no vmlinux\n");
870 return 0;
871 case TEST_CODE_READING_NO_KCORE:
872 pr_debug("no kcore\n");
873 return 0;
874 case TEST_CODE_READING_NO_ACCESS:
875 pr_debug("no access\n");
876 return 0;
877 case TEST_CODE_READING_NO_KERNEL_OBJ:
878 pr_debug("no kernel obj\n");
879 return 0;
880 default:
881 return -1;
882 };
883}
884
885DEFINE_SUITE("Object code reading", code_reading);