1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
5 #include <bpf/bpf_tracing.h>
6 #include <bpf/bpf_helpers.h>
8 #include "../bpf_experimental.h"
9 #include "task_kfunc_common.h"
11 char _license[] SEC("license") = "GPL";
15 /* Prototype for all of the program trace events below:
17 * TRACE_EVENT(task_newtask,
18 * TP_PROTO(struct task_struct *p, u64 clone_flags)
21 struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak;
23 struct task_struct *bpf_task_acquire___one(struct task_struct *task) __ksym __weak;
24 /* The two-param bpf_task_acquire doesn't exist */
25 struct task_struct *bpf_task_acquire___two(struct task_struct *p, void *ctx) __ksym __weak;
26 /* Incorrect type for first param */
27 struct task_struct *bpf_task_acquire___three(void *ctx) __ksym __weak;
29 void invalid_kfunc(void) __ksym __weak;
30 void bpf_testmod_test_mod_kfunc(int i) __ksym __weak;
32 static bool is_test_kfunc_task(void)
34 int cur_pid = bpf_get_current_pid_tgid() >> 32;
36 return pid == cur_pid;
39 static int test_acquire_release(struct task_struct *task)
41 struct task_struct *acquired = NULL;
43 if (!bpf_ksym_exists(bpf_task_acquire)) {
47 if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc)) {
51 if (bpf_ksym_exists(invalid_kfunc)) {
52 /* the verifier's dead code elimination should remove this */
54 asm volatile ("goto -1"); /* for (;;); */
57 acquired = bpf_task_acquire(task);
59 bpf_task_release(acquired);
66 SEC("tp_btf/task_newtask")
67 int BPF_PROG(test_task_kfunc_flavor_relo, struct task_struct *task, u64 clone_flags)
69 struct task_struct *acquired = NULL;
72 if (bpf_ksym_exists(bpf_task_acquire___one)) {
73 acquired = bpf_task_acquire___one(task);
74 } else if (bpf_ksym_exists(bpf_task_acquire___two)) {
75 /* Here, bpf_object__resolve_ksym_func_btf_id's find_ksym_btf_id
76 * call will find vmlinux's bpf_task_acquire, but subsequent
77 * bpf_core_types_are_compat will fail
79 acquired = bpf_task_acquire___two(task, &fake_ctx);
82 } else if (bpf_ksym_exists(bpf_task_acquire___three)) {
83 /* bpf_core_types_are_compat will fail similarly to above case */
84 acquired = bpf_task_acquire___three(&fake_ctx);
90 bpf_task_release(acquired);
96 SEC("tp_btf/task_newtask")
97 int BPF_PROG(test_task_kfunc_flavor_relo_not_found, struct task_struct *task, u64 clone_flags)
99 /* Neither symbol should successfully resolve.
100 * Success or failure of one ___flavor should not affect others
102 if (bpf_ksym_exists(bpf_task_acquire___two))
104 else if (bpf_ksym_exists(bpf_task_acquire___three))
110 SEC("tp_btf/task_newtask")
111 int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags)
113 if (!is_test_kfunc_task())
116 return test_acquire_release(task);
119 SEC("tp_btf/task_newtask")
120 int BPF_PROG(test_task_acquire_release_current, struct task_struct *task, u64 clone_flags)
122 if (!is_test_kfunc_task())
125 return test_acquire_release(bpf_get_current_task_btf());
128 SEC("tp_btf/task_newtask")
129 int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone_flags)
133 if (!is_test_kfunc_task())
136 status = tasks_kfunc_map_insert(task);
143 SEC("tp_btf/task_newtask")
144 int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
146 struct task_struct *kptr, *acquired;
147 struct __tasks_kfunc_map_value *v, *local;
148 int refcnt, refcnt_after_drop;
151 if (!is_test_kfunc_task())
154 status = tasks_kfunc_map_insert(task);
160 v = tasks_kfunc_map_value_lookup(task);
166 kptr = bpf_kptr_xchg(&v->task, NULL);
172 local = bpf_obj_new(typeof(*local));
175 bpf_task_release(kptr);
179 kptr = bpf_kptr_xchg(&local->task, kptr);
183 bpf_task_release(kptr);
187 kptr = bpf_kptr_xchg(&local->task, NULL);
194 /* Stash a copy into local kptr and check if it is released recursively */
195 acquired = bpf_task_acquire(kptr);
199 bpf_task_release(kptr);
202 bpf_probe_read_kernel(&refcnt, sizeof(refcnt), &acquired->rcu_users);
204 acquired = bpf_kptr_xchg(&local->task, acquired);
208 bpf_task_release(kptr);
209 bpf_task_release(acquired);
215 bpf_probe_read_kernel(&refcnt_after_drop, sizeof(refcnt_after_drop), &kptr->rcu_users);
216 if (refcnt != refcnt_after_drop + 1) {
218 bpf_task_release(kptr);
222 bpf_task_release(kptr);
227 SEC("tp_btf/task_newtask")
228 int BPF_PROG(test_task_map_acquire_release, struct task_struct *task, u64 clone_flags)
230 struct task_struct *kptr;
231 struct __tasks_kfunc_map_value *v;
234 if (!is_test_kfunc_task())
237 status = tasks_kfunc_map_insert(task);
243 v = tasks_kfunc_map_value_lookup(task);
254 kptr = bpf_task_acquire(kptr);
258 bpf_task_release(kptr);
260 bpf_rcu_read_unlock();
265 SEC("tp_btf/task_newtask")
266 int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 clone_flags)
268 struct task_struct *current, *acquired;
270 if (!is_test_kfunc_task())
273 current = bpf_get_current_task_btf();
274 acquired = bpf_task_acquire(current);
276 bpf_task_release(acquired);
283 static void lookup_compare_pid(const struct task_struct *p)
285 struct task_struct *acquired;
287 acquired = bpf_task_from_pid(p->pid);
293 if (acquired->pid != p->pid)
295 bpf_task_release(acquired);
298 SEC("tp_btf/task_newtask")
299 int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags)
301 if (!is_test_kfunc_task())
304 lookup_compare_pid(task);
308 SEC("tp_btf/task_newtask")
309 int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags)
311 if (!is_test_kfunc_task())
314 lookup_compare_pid(bpf_get_current_task_btf());
318 static int is_pid_lookup_valid(s32 pid)
320 struct task_struct *acquired;
322 acquired = bpf_task_from_pid(pid);
324 bpf_task_release(acquired);
331 SEC("tp_btf/task_newtask")
332 int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags)
334 if (!is_test_kfunc_task())
337 bpf_strncmp(task->comm, 12, "foo");
338 bpf_strncmp(task->comm, 16, "foo");
339 bpf_strncmp(&task->comm[8], 4, "foo");
341 if (is_pid_lookup_valid(-1)) {
346 if (is_pid_lookup_valid(0xcafef00d)) {
354 SEC("tp_btf/task_newtask")
355 int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags)
357 struct task_struct *acquired;
359 /* task->group_leader is listed as a trusted, non-NULL field of task struct. */
360 acquired = bpf_task_acquire(task->group_leader);
362 bpf_task_release(acquired);
371 int test_task_from_vpid_current(const void *ctx)
373 struct task_struct *current, *v_task;
375 v_task = bpf_task_from_vpid(1);
381 current = bpf_get_current_task_btf();
383 /* The current process should be the init process (pid 1) in the new pid namespace. */
384 if (current != v_task)
387 bpf_task_release(v_task);
392 int test_task_from_vpid_invalid(const void *ctx)
394 struct task_struct *v_task;
396 v_task = bpf_task_from_vpid(-1);
402 /* There should be only one process (current process) in the new pid namespace. */
403 v_task = bpf_task_from_vpid(2);
409 v_task = bpf_task_from_vpid(9999);
417 bpf_task_release(v_task);