1 // SPDX-License-Identifier: GPL-2.0
3 #include <test_progs.h>
4 #include <network_helpers.h>
6 #include "test_spin_lock.skel.h"
7 #include "test_spin_lock_fail.skel.h"
9 static char log_buf[1024 * 1024];
12 const char *prog_name;
14 } spin_lock_fail_tests[] = {
15 { "lock_id_kptr_preserve",
16 "5: (bf) r1 = r0 ; R0_w=ptr_foo(id=2,ref_obj_id=2) "
17 "R1_w=ptr_foo(id=2,ref_obj_id=2) refs=2\n6: (85) call bpf_this_cpu_ptr#154\n"
18 "R1 type=ptr_ expected=percpu_ptr_" },
19 { "lock_id_global_zero",
20 "; R1_w=map_value(map=.data.A,ks=4,vs=4)\n2: (85) call bpf_this_cpu_ptr#154\n"
21 "R1 type=map_value expected=percpu_ptr_" },
22 { "lock_id_mapval_preserve",
23 "[0-9]\\+: (bf) r1 = r0 ;"
24 " R0_w=map_value(id=1,map=array_map,ks=4,vs=8)"
25 " R1_w=map_value(id=1,map=array_map,ks=4,vs=8)\n"
26 "[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n"
27 "R1 type=map_value expected=percpu_ptr_" },
28 { "lock_id_innermapval_preserve",
29 "[0-9]\\+: (bf) r1 = r0 ;"
30 " R0=map_value(id=2,ks=4,vs=8)"
31 " R1_w=map_value(id=2,ks=4,vs=8)\n"
32 "[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n"
33 "R1 type=map_value expected=percpu_ptr_" },
34 { "lock_id_mismatch_kptr_kptr", "bpf_spin_unlock of different lock" },
35 { "lock_id_mismatch_kptr_global", "bpf_spin_unlock of different lock" },
36 { "lock_id_mismatch_kptr_mapval", "bpf_spin_unlock of different lock" },
37 { "lock_id_mismatch_kptr_innermapval", "bpf_spin_unlock of different lock" },
38 { "lock_id_mismatch_global_global", "bpf_spin_unlock of different lock" },
39 { "lock_id_mismatch_global_kptr", "bpf_spin_unlock of different lock" },
40 { "lock_id_mismatch_global_mapval", "bpf_spin_unlock of different lock" },
41 { "lock_id_mismatch_global_innermapval", "bpf_spin_unlock of different lock" },
42 { "lock_id_mismatch_mapval_mapval", "bpf_spin_unlock of different lock" },
43 { "lock_id_mismatch_mapval_kptr", "bpf_spin_unlock of different lock" },
44 { "lock_id_mismatch_mapval_global", "bpf_spin_unlock of different lock" },
45 { "lock_id_mismatch_mapval_innermapval", "bpf_spin_unlock of different lock" },
46 { "lock_id_mismatch_innermapval_innermapval1", "bpf_spin_unlock of different lock" },
47 { "lock_id_mismatch_innermapval_innermapval2", "bpf_spin_unlock of different lock" },
48 { "lock_id_mismatch_innermapval_kptr", "bpf_spin_unlock of different lock" },
49 { "lock_id_mismatch_innermapval_global", "bpf_spin_unlock of different lock" },
50 { "lock_id_mismatch_innermapval_mapval", "bpf_spin_unlock of different lock" },
51 { "lock_global_subprog_call1", "global function calls are not allowed while holding a lock" },
52 { "lock_global_subprog_call2", "global function calls are not allowed while holding a lock" },
55 static int match_regex(const char *pattern, const char *string)
60 err = regcomp(&re, pattern, REG_NOSUB);
64 regerror(err, &re, errbuf, sizeof(errbuf));
65 PRINT_FAIL("Can't compile regex: %s\n", errbuf);
68 rc = regexec(&re, string, 0, NULL, 0);
70 return rc == 0 ? 1 : 0;
73 static void test_spin_lock_fail_prog(const char *prog_name, const char *err_msg)
75 LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
76 .kernel_log_size = sizeof(log_buf),
77 .kernel_log_level = 1);
78 struct test_spin_lock_fail *skel;
79 struct bpf_program *prog;
82 skel = test_spin_lock_fail__open_opts(&opts);
83 if (!ASSERT_OK_PTR(skel, "test_spin_lock_fail__open_opts"))
86 prog = bpf_object__find_program_by_name(skel->obj, prog_name);
87 if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
90 bpf_program__set_autoload(prog, true);
92 ret = test_spin_lock_fail__load(skel);
93 if (!ASSERT_ERR(ret, "test_spin_lock_fail__load must fail"))
96 /* Skip check if JIT does not support kfuncs */
97 if (strstr(log_buf, "JIT does not support calling kernel function")) {
102 ret = match_regex(err_msg, log_buf);
103 if (!ASSERT_GE(ret, 0, "match_regex"))
106 if (!ASSERT_TRUE(ret, "no match for expected error message")) {
107 fprintf(stderr, "Expected: %s\n", err_msg);
108 fprintf(stderr, "Verifier: %s\n", log_buf);
112 test_spin_lock_fail__destroy(skel);
115 static void *spin_lock_thread(void *arg)
117 int err, prog_fd = *(u32 *) arg;
118 LIBBPF_OPTS(bpf_test_run_opts, topts,
120 .data_size_in = sizeof(pkt_v4),
124 err = bpf_prog_test_run_opts(prog_fd, &topts);
125 ASSERT_OK(err, "test_run");
126 ASSERT_OK(topts.retval, "test_run retval");
130 void test_spin_lock_success(void)
132 struct test_spin_lock *skel;
133 pthread_t thread_id[4];
137 skel = test_spin_lock__open_and_load();
138 if (!ASSERT_OK_PTR(skel, "test_spin_lock__open_and_load"))
140 prog_fd = bpf_program__fd(skel->progs.bpf_spin_lock_test);
141 for (i = 0; i < 4; i++) {
144 err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd);
145 if (!ASSERT_OK(err, "pthread_create"))
149 for (i = 0; i < 4; i++) {
150 if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join"))
152 if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd"))
156 test_spin_lock__destroy(skel);
159 void test_spin_lock(void)
163 test_spin_lock_success();
165 for (i = 0; i < ARRAY_SIZE(spin_lock_fail_tests); i++) {
166 if (!test__start_subtest(spin_lock_fail_tests[i].prog_name))
168 test_spin_lock_fail_prog(spin_lock_fail_tests[i].prog_name,
169 spin_lock_fail_tests[i].err_msg);