1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
4 #define _GNU_SOURCE /* See feature_test_macros(7) */
8 #include <sys/syscall.h> /* For SYS_xxx definitions */
10 #include <test_progs.h>
11 #include "task_local_storage_helpers.h"
12 #include "task_local_storage.skel.h"
13 #include "task_local_storage_exit_creds.skel.h"
14 #include "task_ls_recursion.skel.h"
15 #include "task_storage_nodeadlock.skel.h"
17 static void test_sys_enter_exit(void)
19 struct task_local_storage *skel;
22 skel = task_local_storage__open_and_load();
23 if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
26 skel->bss->target_pid = syscall(SYS_gettid);
28 err = task_local_storage__attach(skel);
29 if (!ASSERT_OK(err, "skel_attach"))
35 /* 3x syscalls: 1x attach and 2x gettid */
36 ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt");
37 ASSERT_EQ(skel->bss->exit_cnt, 3, "exit_cnt");
38 ASSERT_EQ(skel->bss->mismatch_cnt, 0, "mismatch_cnt");
40 task_local_storage__destroy(skel);
43 static void test_exit_creds(void)
45 struct task_local_storage_exit_creds *skel;
46 int err, run_count, sync_rcu_calls = 0;
47 const int MAX_SYNC_RCU_CALLS = 1000;
49 skel = task_local_storage_exit_creds__open_and_load();
50 if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
53 err = task_local_storage_exit_creds__attach(skel);
54 if (!ASSERT_OK(err, "skel_attach"))
57 /* trigger at least one exit_creds() */
58 if (CHECK_FAIL(system("ls > /dev/null")))
61 /* kern_sync_rcu is not enough on its own as the read section we want
62 * to wait for may start after we enter synchronize_rcu, so our call
63 * won't wait for the section to finish. Loop on the run counter
64 * as well to ensure the program has run.
68 run_count = __atomic_load_n(&skel->bss->run_count, __ATOMIC_SEQ_CST);
69 } while (run_count == 0 && ++sync_rcu_calls < MAX_SYNC_RCU_CALLS);
71 ASSERT_NEQ(sync_rcu_calls, MAX_SYNC_RCU_CALLS,
72 "sync_rcu count too high");
73 ASSERT_NEQ(run_count, 0, "run_count");
74 ASSERT_EQ(skel->bss->valid_ptr_count, 0, "valid_ptr_count");
75 ASSERT_NEQ(skel->bss->null_ptr_count, 0, "null_ptr_count");
77 task_local_storage_exit_creds__destroy(skel);
80 static void test_recursion(void)
82 int err, map_fd, prog_fd, task_fd;
83 struct task_ls_recursion *skel;
84 struct bpf_prog_info info;
85 __u32 info_len = sizeof(info);
88 task_fd = sys_pidfd_open(getpid(), 0);
89 if (!ASSERT_NEQ(task_fd, -1, "sys_pidfd_open"))
92 skel = task_ls_recursion__open_and_load();
93 if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
96 err = task_ls_recursion__attach(skel);
97 if (!ASSERT_OK(err, "skel_attach"))
100 /* trigger sys_enter, make sure it does not cause deadlock */
101 skel->bss->test_pid = getpid();
103 skel->bss->test_pid = 0;
104 task_ls_recursion__detach(skel);
106 /* Refer to the comment in BPF_PROG(on_update) for
107 * the explanation on the value 201 and 100.
109 map_fd = bpf_map__fd(skel->maps.map_a);
110 err = bpf_map_lookup_elem(map_fd, &task_fd, &value);
111 ASSERT_OK(err, "lookup map_a");
112 ASSERT_EQ(value, 201, "map_a value");
113 ASSERT_EQ(skel->bss->nr_del_errs, 1, "bpf_task_storage_delete busy");
115 map_fd = bpf_map__fd(skel->maps.map_b);
116 err = bpf_map_lookup_elem(map_fd, &task_fd, &value);
117 ASSERT_OK(err, "lookup map_b");
118 ASSERT_EQ(value, 100, "map_b value");
120 prog_fd = bpf_program__fd(skel->progs.on_update);
121 memset(&info, 0, sizeof(info));
122 err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
123 ASSERT_OK(err, "get prog info");
124 ASSERT_EQ(info.recursion_misses, 0, "on_update prog recursion");
126 prog_fd = bpf_program__fd(skel->progs.on_enter);
127 memset(&info, 0, sizeof(info));
128 err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
129 ASSERT_OK(err, "get prog info");
130 ASSERT_EQ(info.recursion_misses, 0, "on_enter prog recursion");
134 task_ls_recursion__destroy(skel);
139 static void waitall(const pthread_t *tids, int nr)
144 for (i = 0; i < nr; i++)
145 pthread_join(tids[i], NULL);
148 static void *sock_create_loop(void *arg)
150 struct task_storage_nodeadlock *skel = arg;
154 fd = socket(AF_INET, SOCK_STREAM, 0);
156 if (skel->bss->nr_get_errs || skel->bss->nr_del_errs)
163 static void test_nodeadlock(void)
165 struct task_storage_nodeadlock *skel;
166 struct bpf_prog_info info = {};
167 __u32 info_len = sizeof(info);
168 const int nr_threads = 32;
169 pthread_t tids[nr_threads];
173 /* Pin all threads to one cpu to increase the chance of preemption
174 * in a sleepable bpf prog.
178 err = sched_getaffinity(getpid(), sizeof(old), &old);
179 if (!ASSERT_OK(err, "getaffinity"))
181 err = sched_setaffinity(getpid(), sizeof(new), &new);
182 if (!ASSERT_OK(err, "setaffinity"))
185 skel = task_storage_nodeadlock__open_and_load();
186 if (!ASSERT_OK_PTR(skel, "open_and_load"))
189 /* Unnecessary recursion and deadlock detection are reproducible
190 * in the preemptible kernel.
192 if (!skel->kconfig->CONFIG_PREEMPT) {
197 err = task_storage_nodeadlock__attach(skel);
198 ASSERT_OK(err, "attach prog");
200 for (i = 0; i < nr_threads; i++) {
201 err = pthread_create(&tids[i], NULL, sock_create_loop, skel);
203 /* Only assert once here to avoid excessive
204 * PASS printing during test failure.
206 ASSERT_OK(err, "pthread_create");
212 /* With 32 threads, 1s is enough to reproduce the issue */
214 waitall(tids, nr_threads);
216 info_len = sizeof(info);
217 prog_fd = bpf_program__fd(skel->progs.socket_post_create);
218 err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
219 ASSERT_OK(err, "get prog info");
220 ASSERT_EQ(info.recursion_misses, 0, "prog recursion");
222 ASSERT_EQ(skel->bss->nr_get_errs, 0, "bpf_task_storage_get busy");
223 ASSERT_EQ(skel->bss->nr_del_errs, 0, "bpf_task_storage_delete busy");
226 task_storage_nodeadlock__destroy(skel);
227 sched_setaffinity(getpid(), sizeof(old), &old);
230 void test_task_local_storage(void)
232 if (test__start_subtest("sys_enter_exit"))
233 test_sys_enter_exit();
234 if (test__start_subtest("exit_creds"))
236 if (test__start_subtest("recursion"))
238 if (test__start_subtest("nodeadlock"))