Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
3#include <test_progs.h>
4#include <bpf/libbpf.h>
5#include <bpf/btf.h>
6#include <fcntl.h>
7#include <sys/mman.h>
8#include <unistd.h>
9#include "cgroup_helpers.h"
10#include "cgroup_iter_memcg.h"
11#include "cgroup_iter_memcg.skel.h"
12
13static int read_stats(struct bpf_link *link)
14{
15 int fd, ret = 0;
16 ssize_t bytes;
17
18 fd = bpf_iter_create(bpf_link__fd(link));
19 if (!ASSERT_OK_FD(fd, "bpf_iter_create"))
20 return 1;
21
22 /*
23 * Invoke iter program by reading from its fd. We're not expecting any
24 * data to be written by the bpf program so the result should be zero.
25 * Results will be read directly through the custom data section
26 * accessible through skel->data_query.memcg_query.
27 */
28 bytes = read(fd, NULL, 0);
29 if (!ASSERT_EQ(bytes, 0, "read fd"))
30 ret = 1;
31
32 close(fd);
33 return ret;
34}
35
36static void test_anon(struct bpf_link *link, struct memcg_query *memcg_query)
37{
38 void *map;
39 size_t len;
40
41 len = sysconf(_SC_PAGESIZE) * 1024;
42
43 /*
44 * Increase memcg anon usage by mapping and writing
45 * to a new anon region.
46 */
47 map = mmap(NULL, len, PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
48 if (!ASSERT_NEQ(map, MAP_FAILED, "mmap anon"))
49 return;
50
51 memset(map, 1, len);
52
53 if (!ASSERT_OK(read_stats(link), "read stats"))
54 goto cleanup;
55
56 ASSERT_GT(memcg_query->nr_anon_mapped, 0, "final anon mapped val");
57
58cleanup:
59 munmap(map, len);
60}
61
62static void test_file(struct bpf_link *link, struct memcg_query *memcg_query)
63{
64 void *map;
65 size_t len;
66 char *path;
67 int fd;
68
69 len = sysconf(_SC_PAGESIZE) * 1024;
70 path = "/tmp/test_cgroup_iter_memcg";
71
72 /*
73 * Increase memcg file usage by creating and writing
74 * to a mapped file.
75 */
76 fd = open(path, O_CREAT | O_RDWR, 0644);
77 if (!ASSERT_OK_FD(fd, "open fd"))
78 return;
79 if (!ASSERT_OK(ftruncate(fd, len), "ftruncate"))
80 goto cleanup_fd;
81
82 map = mmap(NULL, len, PROT_WRITE, MAP_SHARED, fd, 0);
83 if (!ASSERT_NEQ(map, MAP_FAILED, "mmap file"))
84 goto cleanup_fd;
85
86 memset(map, 1, len);
87
88 if (!ASSERT_OK(read_stats(link), "read stats"))
89 goto cleanup_map;
90
91 ASSERT_GT(memcg_query->nr_file_pages, 0, "final file value");
92 ASSERT_GT(memcg_query->nr_file_mapped, 0, "final file mapped value");
93
94cleanup_map:
95 munmap(map, len);
96cleanup_fd:
97 close(fd);
98 unlink(path);
99}
100
101static void test_shmem(struct bpf_link *link, struct memcg_query *memcg_query)
102{
103 size_t len;
104 int fd;
105
106 len = sysconf(_SC_PAGESIZE) * 1024;
107
108 /*
109 * Increase memcg shmem usage by creating and writing
110 * to a memfd backed by shmem/tmpfs.
111 */
112 fd = memfd_create("tmp_shmem", 0);
113 if (!ASSERT_OK_FD(fd, "memfd_create"))
114 return;
115
116 if (!ASSERT_OK(fallocate(fd, 0, 0, len), "fallocate"))
117 goto cleanup;
118
119 if (!ASSERT_OK(read_stats(link), "read stats"))
120 goto cleanup;
121
122 ASSERT_GT(memcg_query->nr_shmem, 0, "final shmem value");
123
124cleanup:
125 close(fd);
126}
127
128static void test_pgfault(struct bpf_link *link, struct memcg_query *memcg_query)
129{
130 void *map;
131 size_t len;
132
133 len = sysconf(_SC_PAGESIZE) * 1024;
134
135 /* Create region to use for triggering a page fault. */
136 map = mmap(NULL, len, PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
137 if (!ASSERT_NEQ(map, MAP_FAILED, "mmap anon"))
138 return;
139
140 /* Trigger page fault. */
141 memset(map, 1, len);
142
143 if (!ASSERT_OK(read_stats(link), "read stats"))
144 goto cleanup;
145
146 ASSERT_GT(memcg_query->pgfault, 0, "final pgfault val");
147
148cleanup:
149 munmap(map, len);
150}
151
152void test_cgroup_iter_memcg(void)
153{
154 char *cgroup_rel_path = "/cgroup_iter_memcg_test";
155 struct cgroup_iter_memcg *skel;
156 struct bpf_link *link;
157 int cgroup_fd;
158
159 cgroup_fd = cgroup_setup_and_join(cgroup_rel_path);
160 if (!ASSERT_OK_FD(cgroup_fd, "cgroup_setup_and_join"))
161 return;
162
163 skel = cgroup_iter_memcg__open_and_load();
164 if (!ASSERT_OK_PTR(skel, "cgroup_iter_memcg__open_and_load"))
165 goto cleanup_cgroup_fd;
166
167 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
168 union bpf_iter_link_info linfo = {
169 .cgroup.cgroup_fd = cgroup_fd,
170 .cgroup.order = BPF_CGROUP_ITER_SELF_ONLY,
171 };
172 opts.link_info = &linfo;
173 opts.link_info_len = sizeof(linfo);
174
175 link = bpf_program__attach_iter(skel->progs.cgroup_memcg_query, &opts);
176 if (!ASSERT_OK_PTR(link, "bpf_program__attach_iter"))
177 goto cleanup_skel;
178
179 if (test__start_subtest("cgroup_iter_memcg__anon"))
180 test_anon(link, &skel->data_query->memcg_query);
181 if (test__start_subtest("cgroup_iter_memcg__shmem"))
182 test_shmem(link, &skel->data_query->memcg_query);
183 if (test__start_subtest("cgroup_iter_memcg__file"))
184 test_file(link, &skel->data_query->memcg_query);
185 if (test__start_subtest("cgroup_iter_memcg__pgfault"))
186 test_pgfault(link, &skel->data_query->memcg_query);
187
188 bpf_link__destroy(link);
189cleanup_skel:
190 cgroup_iter_memcg__destroy(skel);
191cleanup_cgroup_fd:
192 close(cgroup_fd);
193 cleanup_cgroup_environment();
194}