]> Git Repo - linux.git/blob - tools/testing/selftests/bpf/progs/task_kfunc_success.c
Linux 6.14-rc3
[linux.git] / tools / testing / selftests / bpf / progs / task_kfunc_success.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3
4 #include <vmlinux.h>
5 #include <bpf/bpf_tracing.h>
6 #include <bpf/bpf_helpers.h>
7
8 #include "../bpf_experimental.h"
9 #include "task_kfunc_common.h"
10
11 char _license[] SEC("license") = "GPL";
12
13 int err, pid;
14
15 /* Prototype for all of the program trace events below:
16  *
17  * TRACE_EVENT(task_newtask,
18  *         TP_PROTO(struct task_struct *p, u64 clone_flags)
19  */
20
21 struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak;
22
23 struct task_struct *bpf_task_acquire___one(struct task_struct *task) __ksym __weak;
24 /* The two-param bpf_task_acquire doesn't exist */
25 struct task_struct *bpf_task_acquire___two(struct task_struct *p, void *ctx) __ksym __weak;
26 /* Incorrect type for first param */
27 struct task_struct *bpf_task_acquire___three(void *ctx) __ksym __weak;
28
29 void invalid_kfunc(void) __ksym __weak;
30 void bpf_testmod_test_mod_kfunc(int i) __ksym __weak;
31
32 static bool is_test_kfunc_task(void)
33 {
34         int cur_pid = bpf_get_current_pid_tgid() >> 32;
35
36         return pid == cur_pid;
37 }
38
39 static int test_acquire_release(struct task_struct *task)
40 {
41         struct task_struct *acquired = NULL;
42
43         if (!bpf_ksym_exists(bpf_task_acquire)) {
44                 err = 3;
45                 return 0;
46         }
47         if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc)) {
48                 err = 4;
49                 return 0;
50         }
51         if (bpf_ksym_exists(invalid_kfunc)) {
52                 /* the verifier's dead code elimination should remove this */
53                 err = 5;
54                 asm volatile ("goto -1"); /* for (;;); */
55         }
56
57         acquired = bpf_task_acquire(task);
58         if (acquired)
59                 bpf_task_release(acquired);
60         else
61                 err = 6;
62
63         return 0;
64 }
65
66 SEC("tp_btf/task_newtask")
67 int BPF_PROG(test_task_kfunc_flavor_relo, struct task_struct *task, u64 clone_flags)
68 {
69         struct task_struct *acquired = NULL;
70         int fake_ctx = 42;
71
72         if (bpf_ksym_exists(bpf_task_acquire___one)) {
73                 acquired = bpf_task_acquire___one(task);
74         } else if (bpf_ksym_exists(bpf_task_acquire___two)) {
75                 /* Here, bpf_object__resolve_ksym_func_btf_id's find_ksym_btf_id
76                  * call will find vmlinux's bpf_task_acquire, but subsequent
77                  * bpf_core_types_are_compat will fail
78                  */
79                 acquired = bpf_task_acquire___two(task, &fake_ctx);
80                 err = 3;
81                 return 0;
82         } else if (bpf_ksym_exists(bpf_task_acquire___three)) {
83                 /* bpf_core_types_are_compat will fail similarly to above case */
84                 acquired = bpf_task_acquire___three(&fake_ctx);
85                 err = 4;
86                 return 0;
87         }
88
89         if (acquired)
90                 bpf_task_release(acquired);
91         else
92                 err = 5;
93         return 0;
94 }
95
96 SEC("tp_btf/task_newtask")
97 int BPF_PROG(test_task_kfunc_flavor_relo_not_found, struct task_struct *task, u64 clone_flags)
98 {
99         /* Neither symbol should successfully resolve.
100          * Success or failure of one ___flavor should not affect others
101          */
102         if (bpf_ksym_exists(bpf_task_acquire___two))
103                 err = 1;
104         else if (bpf_ksym_exists(bpf_task_acquire___three))
105                 err = 2;
106
107         return 0;
108 }
109
110 SEC("tp_btf/task_newtask")
111 int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags)
112 {
113         if (!is_test_kfunc_task())
114                 return 0;
115
116         return test_acquire_release(task);
117 }
118
119 SEC("tp_btf/task_newtask")
120 int BPF_PROG(test_task_acquire_release_current, struct task_struct *task, u64 clone_flags)
121 {
122         if (!is_test_kfunc_task())
123                 return 0;
124
125         return test_acquire_release(bpf_get_current_task_btf());
126 }
127
128 SEC("tp_btf/task_newtask")
129 int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone_flags)
130 {
131         long status;
132
133         if (!is_test_kfunc_task())
134                 return 0;
135
136         status = tasks_kfunc_map_insert(task);
137         if (status)
138                 err = 1;
139
140         return 0;
141 }
142
143 SEC("tp_btf/task_newtask")
144 int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
145 {
146         struct task_struct *kptr, *acquired;
147         struct __tasks_kfunc_map_value *v, *local;
148         int refcnt, refcnt_after_drop;
149         long status;
150
151         if (!is_test_kfunc_task())
152                 return 0;
153
154         status = tasks_kfunc_map_insert(task);
155         if (status) {
156                 err = 1;
157                 return 0;
158         }
159
160         v = tasks_kfunc_map_value_lookup(task);
161         if (!v) {
162                 err = 2;
163                 return 0;
164         }
165
166         kptr = bpf_kptr_xchg(&v->task, NULL);
167         if (!kptr) {
168                 err = 3;
169                 return 0;
170         }
171
172         local = bpf_obj_new(typeof(*local));
173         if (!local) {
174                 err = 4;
175                 bpf_task_release(kptr);
176                 return 0;
177         }
178
179         kptr = bpf_kptr_xchg(&local->task, kptr);
180         if (kptr) {
181                 err = 5;
182                 bpf_obj_drop(local);
183                 bpf_task_release(kptr);
184                 return 0;
185         }
186
187         kptr = bpf_kptr_xchg(&local->task, NULL);
188         if (!kptr) {
189                 err = 6;
190                 bpf_obj_drop(local);
191                 return 0;
192         }
193
194         /* Stash a copy into local kptr and check if it is released recursively */
195         acquired = bpf_task_acquire(kptr);
196         if (!acquired) {
197                 err = 7;
198                 bpf_obj_drop(local);
199                 bpf_task_release(kptr);
200                 return 0;
201         }
202         bpf_probe_read_kernel(&refcnt, sizeof(refcnt), &acquired->rcu_users);
203
204         acquired = bpf_kptr_xchg(&local->task, acquired);
205         if (acquired) {
206                 err = 8;
207                 bpf_obj_drop(local);
208                 bpf_task_release(kptr);
209                 bpf_task_release(acquired);
210                 return 0;
211         }
212
213         bpf_obj_drop(local);
214
215         bpf_probe_read_kernel(&refcnt_after_drop, sizeof(refcnt_after_drop), &kptr->rcu_users);
216         if (refcnt != refcnt_after_drop + 1) {
217                 err = 9;
218                 bpf_task_release(kptr);
219                 return 0;
220         }
221
222         bpf_task_release(kptr);
223
224         return 0;
225 }
226
227 SEC("tp_btf/task_newtask")
228 int BPF_PROG(test_task_map_acquire_release, struct task_struct *task, u64 clone_flags)
229 {
230         struct task_struct *kptr;
231         struct __tasks_kfunc_map_value *v;
232         long status;
233
234         if (!is_test_kfunc_task())
235                 return 0;
236
237         status = tasks_kfunc_map_insert(task);
238         if (status) {
239                 err = 1;
240                 return 0;
241         }
242
243         v = tasks_kfunc_map_value_lookup(task);
244         if (!v) {
245                 err = 2;
246                 return 0;
247         }
248
249         bpf_rcu_read_lock();
250         kptr = v->task;
251         if (!kptr) {
252                 err = 3;
253         } else {
254                 kptr = bpf_task_acquire(kptr);
255                 if (!kptr)
256                         err = 4;
257                 else
258                         bpf_task_release(kptr);
259         }
260         bpf_rcu_read_unlock();
261
262         return 0;
263 }
264
265 SEC("tp_btf/task_newtask")
266 int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 clone_flags)
267 {
268         struct task_struct *current, *acquired;
269
270         if (!is_test_kfunc_task())
271                 return 0;
272
273         current = bpf_get_current_task_btf();
274         acquired = bpf_task_acquire(current);
275         if (acquired)
276                 bpf_task_release(acquired);
277         else
278                 err = 1;
279
280         return 0;
281 }
282
283 static void lookup_compare_pid(const struct task_struct *p)
284 {
285         struct task_struct *acquired;
286
287         acquired = bpf_task_from_pid(p->pid);
288         if (!acquired) {
289                 err = 1;
290                 return;
291         }
292
293         if (acquired->pid != p->pid)
294                 err = 2;
295         bpf_task_release(acquired);
296 }
297
298 SEC("tp_btf/task_newtask")
299 int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags)
300 {
301         if (!is_test_kfunc_task())
302                 return 0;
303
304         lookup_compare_pid(task);
305         return 0;
306 }
307
308 SEC("tp_btf/task_newtask")
309 int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags)
310 {
311         if (!is_test_kfunc_task())
312                 return 0;
313
314         lookup_compare_pid(bpf_get_current_task_btf());
315         return 0;
316 }
317
318 static int is_pid_lookup_valid(s32 pid)
319 {
320         struct task_struct *acquired;
321
322         acquired = bpf_task_from_pid(pid);
323         if (acquired) {
324                 bpf_task_release(acquired);
325                 return 1;
326         }
327
328         return 0;
329 }
330
331 SEC("tp_btf/task_newtask")
332 int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags)
333 {
334         if (!is_test_kfunc_task())
335                 return 0;
336
337         bpf_strncmp(task->comm, 12, "foo");
338         bpf_strncmp(task->comm, 16, "foo");
339         bpf_strncmp(&task->comm[8], 4, "foo");
340
341         if (is_pid_lookup_valid(-1)) {
342                 err = 1;
343                 return 0;
344         }
345
346         if (is_pid_lookup_valid(0xcafef00d)) {
347                 err = 2;
348                 return 0;
349         }
350
351         return 0;
352 }
353
354 SEC("tp_btf/task_newtask")
355 int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags)
356 {
357         struct task_struct *acquired;
358
359         /* task->group_leader is listed as a trusted, non-NULL field of task struct. */
360         acquired = bpf_task_acquire(task->group_leader);
361         if (acquired)
362                 bpf_task_release(acquired);
363         else
364                 err = 1;
365
366
367         return 0;
368 }
369
370 SEC("syscall")
371 int test_task_from_vpid_current(const void *ctx)
372 {
373         struct task_struct *current, *v_task;
374
375         v_task = bpf_task_from_vpid(1);
376         if (!v_task) {
377                 err = 1;
378                 return 0;
379         }
380
381         current = bpf_get_current_task_btf();
382
383         /* The current process should be the init process (pid 1) in the new pid namespace. */
384         if (current != v_task)
385                 err = 2;
386
387         bpf_task_release(v_task);
388         return 0;
389 }
390
391 SEC("syscall")
392 int test_task_from_vpid_invalid(const void *ctx)
393 {
394         struct task_struct *v_task;
395
396         v_task = bpf_task_from_vpid(-1);
397         if (v_task) {
398                 err = 1;
399                 goto err;
400         }
401
402         /* There should be only one process (current process) in the new pid namespace. */
403         v_task = bpf_task_from_vpid(2);
404         if (v_task) {
405                 err = 2;
406                 goto err;
407         }
408
409         v_task = bpf_task_from_vpid(9999);
410         if (v_task) {
411                 err = 3;
412                 goto err;
413         }
414
415         return 0;
416 err:
417         bpf_task_release(v_task);
418         return 0;
419 }
This page took 0.05464 seconds and 4 git commands to generate.