]> Git Repo - J-linux.git/blob - tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / tools / testing / selftests / bpf / prog_tests / test_struct_ops_module.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3 #include <test_progs.h>
4 #include <time.h>
5
6 #include <sys/epoll.h>
7
8 #include "struct_ops_module.skel.h"
9 #include "struct_ops_nulled_out_cb.skel.h"
10 #include "struct_ops_forgotten_cb.skel.h"
11 #include "struct_ops_detach.skel.h"
12 #include "unsupported_ops.skel.h"
13
14 static void check_map_info(struct bpf_map_info *info)
15 {
16         struct bpf_btf_info btf_info;
17         char btf_name[256];
18         u32 btf_info_len = sizeof(btf_info);
19         int err, fd;
20
21         fd = bpf_btf_get_fd_by_id(info->btf_vmlinux_id);
22         if (!ASSERT_GE(fd, 0, "get_value_type_btf_obj_fd"))
23                 return;
24
25         memset(&btf_info, 0, sizeof(btf_info));
26         btf_info.name = ptr_to_u64(btf_name);
27         btf_info.name_len = sizeof(btf_name);
28         err = bpf_btf_get_info_by_fd(fd, &btf_info, &btf_info_len);
29         if (!ASSERT_OK(err, "get_value_type_btf_obj_info"))
30                 goto cleanup;
31
32         if (!ASSERT_EQ(strcmp(btf_name, "bpf_testmod"), 0, "get_value_type_btf_obj_name"))
33                 goto cleanup;
34
35 cleanup:
36         close(fd);
37 }
38
39 static int attach_ops_and_check(struct struct_ops_module *skel,
40                                 struct bpf_map *map,
41                                 int expected_test_2_result)
42 {
43         struct bpf_link *link;
44
45         link = bpf_map__attach_struct_ops(map);
46         ASSERT_OK_PTR(link, "attach_test_mod_1");
47         if (!link)
48                 return -1;
49
50         /* test_{1,2}() would be called from bpf_dummy_reg() in bpf_testmod.c */
51         ASSERT_EQ(skel->bss->test_1_result, 0xdeadbeef, "test_1_result");
52         ASSERT_EQ(skel->bss->test_2_result, expected_test_2_result, "test_2_result");
53
54         bpf_link__destroy(link);
55         return 0;
56 }
57
58 static void test_struct_ops_load(void)
59 {
60         struct struct_ops_module *skel;
61         struct bpf_map_info info = {};
62         int err;
63         u32 len;
64
65         skel = struct_ops_module__open();
66         if (!ASSERT_OK_PTR(skel, "struct_ops_module_open"))
67                 return;
68
69         skel->struct_ops.testmod_1->data = 13;
70         skel->struct_ops.testmod_1->test_2 = skel->progs.test_3;
71         /* Since test_2() is not being used, it should be disabled from
72          * auto-loading, or it will fail to load.
73          */
74         bpf_program__set_autoload(skel->progs.test_2, false);
75         bpf_map__set_autocreate(skel->maps.testmod_zeroed, false);
76
77         err = struct_ops_module__load(skel);
78         if (!ASSERT_OK(err, "struct_ops_module_load"))
79                 goto cleanup;
80
81         len = sizeof(info);
82         err = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.testmod_1), &info,
83                                      &len);
84         if (!ASSERT_OK(err, "bpf_map_get_info_by_fd"))
85                 goto cleanup;
86
87         check_map_info(&info);
88         /* test_3() will be called from bpf_dummy_reg() in bpf_testmod.c
89          *
90          * In bpf_testmod.c it will pass 4 and 13 (the value of data) to
91          * .test_2.  So, the value of test_2_result should be 20 (4 + 13 +
92          * 3).
93          */
94         if (!attach_ops_and_check(skel, skel->maps.testmod_1, 20))
95                 goto cleanup;
96         if (!attach_ops_and_check(skel, skel->maps.testmod_2, 12))
97                 goto cleanup;
98
99 cleanup:
100         struct_ops_module__destroy(skel);
101 }
102
103 static void test_struct_ops_not_zeroed(void)
104 {
105         struct struct_ops_module *skel;
106         int err;
107
108         /* zeroed is 0, and zeroed_op is null */
109         skel = struct_ops_module__open();
110         if (!ASSERT_OK_PTR(skel, "struct_ops_module_open"))
111                 return;
112
113         skel->struct_ops.testmod_zeroed->zeroed = 0;
114         /* zeroed_op prog should be not loaded automatically now */
115         skel->struct_ops.testmod_zeroed->zeroed_op = NULL;
116
117         err = struct_ops_module__load(skel);
118         ASSERT_OK(err, "struct_ops_module_load");
119
120         struct_ops_module__destroy(skel);
121
122         /* zeroed is not 0 */
123         skel = struct_ops_module__open();
124         if (!ASSERT_OK_PTR(skel, "struct_ops_module_open_not_zeroed"))
125                 return;
126
127         /* libbpf should reject the testmod_zeroed since struct
128          * bpf_testmod_ops in the kernel has no "zeroed" field and the
129          * value of "zeroed" is non-zero.
130          */
131         skel->struct_ops.testmod_zeroed->zeroed = 0xdeadbeef;
132         skel->struct_ops.testmod_zeroed->zeroed_op = NULL;
133         err = struct_ops_module__load(skel);
134         ASSERT_ERR(err, "struct_ops_module_load_not_zeroed");
135
136         struct_ops_module__destroy(skel);
137
138         /* zeroed_op is not null */
139         skel = struct_ops_module__open();
140         if (!ASSERT_OK_PTR(skel, "struct_ops_module_open_not_zeroed_op"))
141                 return;
142
143         /* libbpf should reject the testmod_zeroed since the value of its
144          * "zeroed_op" is not null.
145          */
146         skel->struct_ops.testmod_zeroed->zeroed_op = skel->progs.test_3;
147         err = struct_ops_module__load(skel);
148         ASSERT_ERR(err, "struct_ops_module_load_not_zeroed_op");
149
150         struct_ops_module__destroy(skel);
151 }
152
153 /* The signature of an implementation might not match the signature of the
154  * function pointer prototype defined in the BPF program. This mismatch
155  * should be allowed as long as the behavior of the operator program
156  * adheres to the signature in the kernel. Libbpf should not enforce the
157  * signature; rather, let the kernel verifier handle the enforcement.
158  */
159 static void test_struct_ops_incompatible(void)
160 {
161         struct struct_ops_module *skel;
162         struct bpf_link *link;
163         int err;
164
165         skel = struct_ops_module__open();
166         if (!ASSERT_OK_PTR(skel, "struct_ops_module_open"))
167                 return;
168
169         bpf_map__set_autocreate(skel->maps.testmod_zeroed, false);
170
171         err = struct_ops_module__load(skel);
172         if (!ASSERT_OK(err, "skel_load"))
173                 goto cleanup;
174
175         link = bpf_map__attach_struct_ops(skel->maps.testmod_incompatible);
176         if (ASSERT_OK_PTR(link, "attach_struct_ops"))
177                 bpf_link__destroy(link);
178
179 cleanup:
180         struct_ops_module__destroy(skel);
181 }
182
183 /* validate that it's ok to "turn off" callback that kernel supports */
184 static void test_struct_ops_nulled_out_cb(void)
185 {
186         struct struct_ops_nulled_out_cb *skel;
187         int err;
188
189         skel = struct_ops_nulled_out_cb__open();
190         if (!ASSERT_OK_PTR(skel, "skel_open"))
191                 return;
192
193         /* kernel knows about test_1, but we still null it out */
194         skel->struct_ops.ops->test_1 = NULL;
195
196         err = struct_ops_nulled_out_cb__load(skel);
197         if (!ASSERT_OK(err, "skel_load"))
198                 goto cleanup;
199
200         ASSERT_FALSE(bpf_program__autoload(skel->progs.test_1_turn_off), "prog_autoload");
201         ASSERT_LT(bpf_program__fd(skel->progs.test_1_turn_off), 0, "prog_fd");
202
203 cleanup:
204         struct_ops_nulled_out_cb__destroy(skel);
205 }
206
207 /* validate that libbpf generates reasonable error message if struct_ops is
208  * not referenced in any struct_ops map
209  */
210 static void test_struct_ops_forgotten_cb(void)
211 {
212         struct struct_ops_forgotten_cb *skel;
213         char *log;
214         int err;
215
216         skel = struct_ops_forgotten_cb__open();
217         if (!ASSERT_OK_PTR(skel, "skel_open"))
218                 return;
219
220         start_libbpf_log_capture();
221
222         err = struct_ops_forgotten_cb__load(skel);
223         if (!ASSERT_ERR(err, "skel_load"))
224                 goto cleanup;
225
226         log = stop_libbpf_log_capture();
227         ASSERT_HAS_SUBSTR(log,
228                           "prog 'test_1_forgotten': SEC(\"struct_ops\") program isn't referenced anywhere, did you forget to use it?",
229                           "libbpf_log");
230         free(log);
231
232         struct_ops_forgotten_cb__destroy(skel);
233
234         /* now let's programmatically use it, we should be fine now */
235         skel = struct_ops_forgotten_cb__open();
236         if (!ASSERT_OK_PTR(skel, "skel_open"))
237                 return;
238
239         skel->struct_ops.ops->test_1 = skel->progs.test_1_forgotten; /* not anymore */
240
241         err = struct_ops_forgotten_cb__load(skel);
242         if (!ASSERT_OK(err, "skel_load"))
243                 goto cleanup;
244
245 cleanup:
246         struct_ops_forgotten_cb__destroy(skel);
247 }
248
249 /* Detach a link from a user space program */
250 static void test_detach_link(void)
251 {
252         struct epoll_event ev, events[2];
253         struct struct_ops_detach *skel;
254         struct bpf_link *link = NULL;
255         int fd, epollfd = -1, nfds;
256         int err;
257
258         skel = struct_ops_detach__open_and_load();
259         if (!ASSERT_OK_PTR(skel, "struct_ops_detach__open_and_load"))
260                 return;
261
262         link = bpf_map__attach_struct_ops(skel->maps.testmod_do_detach);
263         if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
264                 goto cleanup;
265
266         fd = bpf_link__fd(link);
267         if (!ASSERT_GE(fd, 0, "link_fd"))
268                 goto cleanup;
269
270         epollfd = epoll_create1(0);
271         if (!ASSERT_GE(epollfd, 0, "epoll_create1"))
272                 goto cleanup;
273
274         ev.events = EPOLLHUP;
275         ev.data.fd = fd;
276         err = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &ev);
277         if (!ASSERT_OK(err, "epoll_ctl"))
278                 goto cleanup;
279
280         err = bpf_link__detach(link);
281         if (!ASSERT_OK(err, "detach_link"))
282                 goto cleanup;
283
284         /* Wait for EPOLLHUP */
285         nfds = epoll_wait(epollfd, events, 2, 500);
286         if (!ASSERT_EQ(nfds, 1, "epoll_wait"))
287                 goto cleanup;
288
289         if (!ASSERT_EQ(events[0].data.fd, fd, "epoll_wait_fd"))
290                 goto cleanup;
291         if (!ASSERT_TRUE(events[0].events & EPOLLHUP, "events[0].events"))
292                 goto cleanup;
293
294 cleanup:
295         if (epollfd >= 0)
296                 close(epollfd);
297         bpf_link__destroy(link);
298         struct_ops_detach__destroy(skel);
299 }
300
301 void serial_test_struct_ops_module(void)
302 {
303         if (test__start_subtest("struct_ops_load"))
304                 test_struct_ops_load();
305         if (test__start_subtest("struct_ops_not_zeroed"))
306                 test_struct_ops_not_zeroed();
307         if (test__start_subtest("struct_ops_incompatible"))
308                 test_struct_ops_incompatible();
309         if (test__start_subtest("struct_ops_null_out_cb"))
310                 test_struct_ops_nulled_out_cb();
311         if (test__start_subtest("struct_ops_forgotten_cb"))
312                 test_struct_ops_forgotten_cb();
313         if (test__start_subtest("test_detach_link"))
314                 test_detach_link();
315         RUN_TESTS(unsupported_ops);
316 }
317
This page took 0.04581 seconds and 4 git commands to generate.