Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.*/
3
4#define _GNU_SOURCE
5#include <unistd.h>
6#include <sys/syscall.h>
7#include <sys/types.h>
8#include <test_progs.h>
9#include "cgrp_ls_tp_btf.skel.h"
10#include "cgrp_ls_recursion.skel.h"
11#include "cgrp_ls_attach_cgroup.skel.h"
12#include "cgrp_ls_negative.skel.h"
13#include "cgrp_ls_sleepable.skel.h"
14#include "network_helpers.h"
15#include "cgroup_helpers.h"
16
17struct socket_cookie {
18 __u64 cookie_key;
19 __u64 cookie_value;
20};
21
22static bool is_cgroup1;
23static int target_hid;
24
25#define CGROUP_MODE_SET(skel) \
26{ \
27 skel->bss->is_cgroup1 = is_cgroup1; \
28 skel->bss->target_hid = target_hid; \
29}
30
31static void cgroup_mode_value_init(bool cgroup, int hid)
32{
33 is_cgroup1 = cgroup;
34 target_hid = hid;
35}
36
37static void test_tp_btf(int cgroup_fd)
38{
39 struct cgrp_ls_tp_btf *skel;
40 long val1 = 1, val2 = 0;
41 int err;
42
43 skel = cgrp_ls_tp_btf__open_and_load();
44 if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
45 return;
46
47 CGROUP_MODE_SET(skel);
48
49 /* populate a value in map_b */
50 err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val1, BPF_ANY);
51 if (!ASSERT_OK(err, "map_update_elem"))
52 goto out;
53
54 /* check value */
55 err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val2);
56 if (!ASSERT_OK(err, "map_lookup_elem"))
57 goto out;
58 if (!ASSERT_EQ(val2, 1, "map_lookup_elem, invalid val"))
59 goto out;
60
61 /* delete value */
62 err = bpf_map_delete_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd);
63 if (!ASSERT_OK(err, "map_delete_elem"))
64 goto out;
65
66 skel->bss->target_pid = sys_gettid();
67
68 err = cgrp_ls_tp_btf__attach(skel);
69 if (!ASSERT_OK(err, "skel_attach"))
70 goto out;
71
72 sys_gettid();
73 sys_gettid();
74
75 skel->bss->target_pid = 0;
76
77 /* 3x syscalls: 1x attach and 2x gettid */
78 ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt");
79 ASSERT_EQ(skel->bss->exit_cnt, 3, "exit_cnt");
80 ASSERT_EQ(skel->bss->mismatch_cnt, 0, "mismatch_cnt");
81out:
82 cgrp_ls_tp_btf__destroy(skel);
83}
84
85static void test_attach_cgroup(int cgroup_fd)
86{
87 int server_fd = 0, client_fd = 0, err = 0;
88 socklen_t addr_len = sizeof(struct sockaddr_in6);
89 struct cgrp_ls_attach_cgroup *skel;
90 __u32 cookie_expected_value;
91 struct sockaddr_in6 addr;
92 struct socket_cookie val;
93
94 skel = cgrp_ls_attach_cgroup__open_and_load();
95 if (!ASSERT_OK_PTR(skel, "skel_open"))
96 return;
97
98 skel->links.set_cookie = bpf_program__attach_cgroup(
99 skel->progs.set_cookie, cgroup_fd);
100 if (!ASSERT_OK_PTR(skel->links.set_cookie, "prog_attach"))
101 goto out;
102
103 skel->links.update_cookie_sockops = bpf_program__attach_cgroup(
104 skel->progs.update_cookie_sockops, cgroup_fd);
105 if (!ASSERT_OK_PTR(skel->links.update_cookie_sockops, "prog_attach"))
106 goto out;
107
108 skel->links.update_cookie_tracing = bpf_program__attach(
109 skel->progs.update_cookie_tracing);
110 if (!ASSERT_OK_PTR(skel->links.update_cookie_tracing, "prog_attach"))
111 goto out;
112
113 server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
114 if (!ASSERT_GE(server_fd, 0, "start_server"))
115 goto out;
116
117 client_fd = connect_to_fd(server_fd, 0);
118 if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
119 goto close_server_fd;
120
121 err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.socket_cookies),
122 &cgroup_fd, &val);
123 if (!ASSERT_OK(err, "map_lookup(socket_cookies)"))
124 goto close_client_fd;
125
126 err = getsockname(client_fd, (struct sockaddr *)&addr, &addr_len);
127 if (!ASSERT_OK(err, "getsockname"))
128 goto close_client_fd;
129
130 cookie_expected_value = (ntohs(addr.sin6_port) << 8) | 0xFF;
131 ASSERT_EQ(val.cookie_value, cookie_expected_value, "cookie_value");
132
133close_client_fd:
134 close(client_fd);
135close_server_fd:
136 close(server_fd);
137out:
138 cgrp_ls_attach_cgroup__destroy(skel);
139}
140
141static void test_recursion(int cgroup_fd)
142{
143 struct cgrp_ls_recursion *skel;
144 int err;
145
146 skel = cgrp_ls_recursion__open_and_load();
147 if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
148 return;
149
150 CGROUP_MODE_SET(skel);
151
152 err = cgrp_ls_recursion__attach(skel);
153 if (!ASSERT_OK(err, "skel_attach"))
154 goto out;
155
156 /* trigger sys_enter, make sure it does not cause deadlock */
157 sys_gettid();
158
159out:
160 cgrp_ls_recursion__destroy(skel);
161}
162
163static void test_negative(void)
164{
165 struct cgrp_ls_negative *skel;
166
167 skel = cgrp_ls_negative__open_and_load();
168 if (!ASSERT_ERR_PTR(skel, "skel_open_and_load")) {
169 cgrp_ls_negative__destroy(skel);
170 return;
171 }
172}
173
174static void test_cgroup_iter_sleepable(int cgroup_fd, __u64 cgroup_id)
175{
176 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
177 union bpf_iter_link_info linfo;
178 struct cgrp_ls_sleepable *skel;
179 struct bpf_link *link;
180 int err, iter_fd;
181 char buf[16];
182
183 skel = cgrp_ls_sleepable__open();
184 if (!ASSERT_OK_PTR(skel, "skel_open"))
185 return;
186
187 CGROUP_MODE_SET(skel);
188
189 bpf_program__set_autoload(skel->progs.cgroup_iter, true);
190 err = cgrp_ls_sleepable__load(skel);
191 if (!ASSERT_OK(err, "skel_load"))
192 goto out;
193
194 memset(&linfo, 0, sizeof(linfo));
195 linfo.cgroup.cgroup_fd = cgroup_fd;
196 linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY;
197 opts.link_info = &linfo;
198 opts.link_info_len = sizeof(linfo);
199 link = bpf_program__attach_iter(skel->progs.cgroup_iter, &opts);
200 if (!ASSERT_OK_PTR(link, "attach_iter"))
201 goto out;
202
203 iter_fd = bpf_iter_create(bpf_link__fd(link));
204 if (!ASSERT_GE(iter_fd, 0, "iter_create"))
205 goto out_link;
206
207 /* trigger the program run */
208 (void)read(iter_fd, buf, sizeof(buf));
209
210 ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
211
212 close(iter_fd);
213out_link:
214 bpf_link__destroy(link);
215out:
216 cgrp_ls_sleepable__destroy(skel);
217}
218
219static void test_yes_rcu_lock(__u64 cgroup_id)
220{
221 struct cgrp_ls_sleepable *skel;
222 int err;
223
224 skel = cgrp_ls_sleepable__open();
225 if (!ASSERT_OK_PTR(skel, "skel_open"))
226 return;
227
228 CGROUP_MODE_SET(skel);
229 skel->bss->target_pid = sys_gettid();
230
231 bpf_program__set_autoload(skel->progs.yes_rcu_lock, true);
232 err = cgrp_ls_sleepable__load(skel);
233 if (!ASSERT_OK(err, "skel_load"))
234 goto out;
235
236 err = cgrp_ls_sleepable__attach(skel);
237 if (!ASSERT_OK(err, "skel_attach"))
238 goto out;
239
240 syscall(SYS_getpgid);
241
242 ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
243out:
244 cgrp_ls_sleepable__destroy(skel);
245}
246
247static void test_no_rcu_lock(void)
248{
249 struct cgrp_ls_sleepable *skel;
250 int err;
251
252 skel = cgrp_ls_sleepable__open();
253 if (!ASSERT_OK_PTR(skel, "skel_open"))
254 return;
255
256 CGROUP_MODE_SET(skel);
257
258 bpf_program__set_autoload(skel->progs.no_rcu_lock, true);
259 err = cgrp_ls_sleepable__load(skel);
260 ASSERT_ERR(err, "skel_load");
261
262 cgrp_ls_sleepable__destroy(skel);
263}
264
265static void test_cgrp1_no_rcu_lock(void)
266{
267 struct cgrp_ls_sleepable *skel;
268 int err;
269
270 skel = cgrp_ls_sleepable__open();
271 if (!ASSERT_OK_PTR(skel, "skel_open"))
272 return;
273
274 CGROUP_MODE_SET(skel);
275
276 bpf_program__set_autoload(skel->progs.cgrp1_no_rcu_lock, true);
277 err = cgrp_ls_sleepable__load(skel);
278 ASSERT_OK(err, "skel_load");
279
280 cgrp_ls_sleepable__destroy(skel);
281}
282
283static void cgrp2_local_storage(void)
284{
285 __u64 cgroup_id;
286 int cgroup_fd;
287
288 cgroup_fd = test__join_cgroup("/cgrp_local_storage");
289 if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /cgrp_local_storage"))
290 return;
291
292 cgroup_mode_value_init(0, -1);
293
294 cgroup_id = get_cgroup_id("/cgrp_local_storage");
295 if (test__start_subtest("tp_btf"))
296 test_tp_btf(cgroup_fd);
297 if (test__start_subtest("attach_cgroup"))
298 test_attach_cgroup(cgroup_fd);
299 if (test__start_subtest("recursion"))
300 test_recursion(cgroup_fd);
301 if (test__start_subtest("negative"))
302 test_negative();
303 if (test__start_subtest("cgroup_iter_sleepable"))
304 test_cgroup_iter_sleepable(cgroup_fd, cgroup_id);
305 if (test__start_subtest("yes_rcu_lock"))
306 test_yes_rcu_lock(cgroup_id);
307 if (test__start_subtest("no_rcu_lock"))
308 test_no_rcu_lock();
309
310 close(cgroup_fd);
311}
312
313static void cgrp1_local_storage(void)
314{
315 int cgrp1_fd, cgrp1_hid, cgrp1_id, err;
316
317 /* Setup cgroup1 hierarchy */
318 err = setup_classid_environment();
319 if (!ASSERT_OK(err, "setup_classid_environment"))
320 return;
321
322 err = join_classid();
323 if (!ASSERT_OK(err, "join_cgroup1"))
324 goto cleanup;
325
326 cgrp1_fd = open_classid();
327 if (!ASSERT_GE(cgrp1_fd, 0, "cgroup1 fd"))
328 goto cleanup;
329
330 cgrp1_id = get_classid_cgroup_id();
331 if (!ASSERT_GE(cgrp1_id, 0, "cgroup1 id"))
332 goto close_fd;
333
334 cgrp1_hid = get_cgroup1_hierarchy_id("net_cls");
335 if (!ASSERT_GE(cgrp1_hid, 0, "cgroup1 hid"))
336 goto close_fd;
337
338 cgroup_mode_value_init(1, cgrp1_hid);
339
340 if (test__start_subtest("cgrp1_tp_btf"))
341 test_tp_btf(cgrp1_fd);
342 if (test__start_subtest("cgrp1_recursion"))
343 test_recursion(cgrp1_fd);
344 if (test__start_subtest("cgrp1_negative"))
345 test_negative();
346 if (test__start_subtest("cgrp1_iter_sleepable"))
347 test_cgroup_iter_sleepable(cgrp1_fd, cgrp1_id);
348 if (test__start_subtest("cgrp1_yes_rcu_lock"))
349 test_yes_rcu_lock(cgrp1_id);
350 if (test__start_subtest("cgrp1_no_rcu_lock"))
351 test_cgrp1_no_rcu_lock();
352
353close_fd:
354 close(cgrp1_fd);
355cleanup:
356 cleanup_classid_environment();
357}
358
359void test_cgrp_local_storage(void)
360{
361 cgrp2_local_storage();
362 cgrp1_local_storage();
363}