1 // SPDX-License-Identifier: GPL-2.0
5 #include <test_progs.h>
6 #include <network_helpers.h>
8 #include "linked_list.skel.h"
9 #include "linked_list_fail.skel.h"
11 static char log_buf[1024 * 1024];
14 const char *prog_name;
16 } linked_list_fail_tests[] = {
17 #define TEST(test, off) \
18 { #test "_missing_lock_push_front", \
19 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
20 { #test "_missing_lock_push_back", \
21 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
22 { #test "_missing_lock_pop_front", \
23 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
24 { #test "_missing_lock_pop_back", \
25 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" },
31 #define TEST(test, op) \
32 { #test "_kptr_incorrect_lock_" #op, \
33 "held lock and object are not in the same allocation\n" \
34 "bpf_spin_lock at off=40 must be held for bpf_list_head" }, \
35 { #test "_global_incorrect_lock_" #op, \
36 "held lock and object are not in the same allocation\n" \
37 "bpf_spin_lock at off=16 must be held for bpf_list_head" }, \
38 { #test "_map_incorrect_lock_" #op, \
39 "held lock and object are not in the same allocation\n" \
40 "bpf_spin_lock at off=0 must be held for bpf_list_head" }, \
41 { #test "_inner_map_incorrect_lock_" #op, \
42 "held lock and object are not in the same allocation\n" \
43 "bpf_spin_lock at off=0 must be held for bpf_list_head" },
44 TEST(kptr, push_front)
48 TEST(global, push_front)
49 TEST(global, push_back)
50 TEST(global, pop_front)
51 TEST(global, pop_back)
56 TEST(inner_map, push_front)
57 TEST(inner_map, push_back)
58 TEST(inner_map, pop_front)
59 TEST(inner_map, pop_back)
61 { "map_compat_kprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
62 { "map_compat_kretprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
63 { "map_compat_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
64 { "map_compat_perf", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
65 { "map_compat_raw_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
66 { "map_compat_raw_tp_w", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
67 { "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" },
68 { "obj_new_no_composite", "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct" },
69 { "obj_new_no_struct", "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct" },
70 { "obj_drop_non_zero_off", "R1 must have zero offset when passed to release func" },
71 { "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" },
72 { "obj_new_acq", "Unreleased reference id=" },
73 { "use_after_drop", "invalid mem access 'scalar'" },
74 { "ptr_walk_scalar", "type=scalar expected=percpu_ptr_" },
75 { "direct_read_lock", "direct access to bpf_spin_lock is disallowed" },
76 { "direct_write_lock", "direct access to bpf_spin_lock is disallowed" },
77 { "direct_read_head", "direct access to bpf_list_head is disallowed" },
78 { "direct_write_head", "direct access to bpf_list_head is disallowed" },
79 { "direct_read_node", "direct access to bpf_list_node is disallowed" },
80 { "direct_write_node", "direct access to bpf_list_node is disallowed" },
81 { "use_after_unlock_push_front", "invalid mem access 'scalar'" },
82 { "use_after_unlock_push_back", "invalid mem access 'scalar'" },
83 { "double_push_front", "arg#1 expected pointer to allocated object" },
84 { "double_push_back", "arg#1 expected pointer to allocated object" },
85 { "no_node_value_type", "bpf_list_node not found at offset=0" },
86 { "incorrect_value_type",
87 "operation on bpf_list_head expects arg#1 bpf_list_node at offset=48 in struct foo, "
88 "but arg is at offset=0 in struct bar" },
89 { "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
90 { "incorrect_node_off1", "bpf_list_node not found at offset=49" },
91 { "incorrect_node_off2", "arg#1 offset=0, but expected bpf_list_node at offset=48 in struct foo" },
92 { "no_head_type", "bpf_list_head not found at offset=0" },
93 { "incorrect_head_var_off1", "R1 doesn't have constant offset" },
94 { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
95 { "incorrect_head_off1", "bpf_list_head not found at offset=25" },
96 { "incorrect_head_off2", "bpf_list_head not found at offset=1" },
97 { "pop_front_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
98 { "pop_back_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
101 static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg)
103 LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
104 .kernel_log_size = sizeof(log_buf),
105 .kernel_log_level = 1);
106 struct linked_list_fail *skel;
107 struct bpf_program *prog;
110 skel = linked_list_fail__open_opts(&opts);
111 if (!ASSERT_OK_PTR(skel, "linked_list_fail__open_opts"))
114 prog = bpf_object__find_program_by_name(skel->obj, prog_name);
115 if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
118 bpf_program__set_autoload(prog, true);
120 ret = linked_list_fail__load(skel);
121 if (!ASSERT_ERR(ret, "linked_list_fail__load must fail"))
124 if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
125 fprintf(stderr, "Expected: %s\n", err_msg);
126 fprintf(stderr, "Verifier: %s\n", log_buf);
130 linked_list_fail__destroy(skel);
133 static void clear_fields(struct bpf_map *map)
138 memset(buf, 0xff, sizeof(buf));
139 ASSERT_OK(bpf_map__update_elem(map, &key, sizeof(key), buf, sizeof(buf), 0), "check_and_free_fields");
149 static void test_linked_list_success(int mode, bool leave_in_map)
151 LIBBPF_OPTS(bpf_test_run_opts, opts,
153 .data_size_in = sizeof(pkt_v4),
156 struct linked_list *skel;
159 skel = linked_list__open_and_load();
160 if (!ASSERT_OK_PTR(skel, "linked_list__open_and_load"))
163 if (mode == LIST_IN_LIST)
165 if (mode == PUSH_POP_MULT)
168 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop), &opts);
169 ASSERT_OK(ret, "map_list_push_pop");
170 ASSERT_OK(opts.retval, "map_list_push_pop retval");
172 clear_fields(skel->maps.array_map);
174 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop), &opts);
175 ASSERT_OK(ret, "inner_map_list_push_pop");
176 ASSERT_OK(opts.retval, "inner_map_list_push_pop retval");
178 clear_fields(skel->maps.inner_map);
180 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop), &opts);
181 ASSERT_OK(ret, "global_list_push_pop");
182 ASSERT_OK(opts.retval, "global_list_push_pop retval");
184 clear_fields(skel->maps.bss_A);
186 if (mode == PUSH_POP)
190 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop_multiple), &opts);
191 ASSERT_OK(ret, "map_list_push_pop_multiple");
192 ASSERT_OK(opts.retval, "map_list_push_pop_multiple retval");
194 clear_fields(skel->maps.array_map);
196 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop_multiple), &opts);
197 ASSERT_OK(ret, "inner_map_list_push_pop_multiple");
198 ASSERT_OK(opts.retval, "inner_map_list_push_pop_multiple retval");
200 clear_fields(skel->maps.inner_map);
202 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_multiple), &opts);
203 ASSERT_OK(ret, "global_list_push_pop_multiple");
204 ASSERT_OK(opts.retval, "global_list_push_pop_multiple retval");
206 clear_fields(skel->maps.bss_A);
208 if (mode == PUSH_POP_MULT)
212 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_in_list), &opts);
213 ASSERT_OK(ret, "map_list_in_list");
214 ASSERT_OK(opts.retval, "map_list_in_list retval");
216 clear_fields(skel->maps.array_map);
218 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_in_list), &opts);
219 ASSERT_OK(ret, "inner_map_list_in_list");
220 ASSERT_OK(opts.retval, "inner_map_list_in_list retval");
222 clear_fields(skel->maps.inner_map);
224 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_in_list), &opts);
225 ASSERT_OK(ret, "global_list_in_list");
226 ASSERT_OK(opts.retval, "global_list_in_list retval");
228 clear_fields(skel->maps.bss_A);
230 linked_list__destroy(skel);
237 static struct btf *init_btf(void)
239 int id, lid, hid, nid;
242 btf = btf__new_empty();
243 if (!ASSERT_OK_PTR(btf, "btf__new_empty"))
245 id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED);
246 if (!ASSERT_EQ(id, 1, "btf__add_int"))
248 lid = btf__add_struct(btf, "bpf_spin_lock", 4);
249 if (!ASSERT_EQ(lid, SPIN_LOCK, "btf__add_struct bpf_spin_lock"))
251 hid = btf__add_struct(btf, "bpf_list_head", 16);
252 if (!ASSERT_EQ(hid, LIST_HEAD, "btf__add_struct bpf_list_head"))
254 nid = btf__add_struct(btf, "bpf_list_node", 24);
255 if (!ASSERT_EQ(nid, LIST_NODE, "btf__add_struct bpf_list_node"))
263 static void list_and_rb_node_same_struct(bool refcount_field)
265 int bpf_rb_node_btf_id, bpf_refcount_btf_id = 0, foo_btf_id;
270 if (!ASSERT_OK_PTR(btf, "init_btf"))
273 bpf_rb_node_btf_id = btf__add_struct(btf, "bpf_rb_node", 32);
274 if (!ASSERT_GT(bpf_rb_node_btf_id, 0, "btf__add_struct bpf_rb_node"))
277 if (refcount_field) {
278 bpf_refcount_btf_id = btf__add_struct(btf, "bpf_refcount", 4);
279 if (!ASSERT_GT(bpf_refcount_btf_id, 0, "btf__add_struct bpf_refcount"))
283 id = btf__add_struct(btf, "bar", refcount_field ? 60 : 56);
284 if (!ASSERT_GT(id, 0, "btf__add_struct bar"))
286 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
287 if (!ASSERT_OK(err, "btf__add_field bar::a"))
289 err = btf__add_field(btf, "c", bpf_rb_node_btf_id, 192, 0);
290 if (!ASSERT_OK(err, "btf__add_field bar::c"))
292 if (refcount_field) {
293 err = btf__add_field(btf, "ref", bpf_refcount_btf_id, 448, 0);
294 if (!ASSERT_OK(err, "btf__add_field bar::ref"))
298 foo_btf_id = btf__add_struct(btf, "foo", 20);
299 if (!ASSERT_GT(foo_btf_id, 0, "btf__add_struct foo"))
301 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
302 if (!ASSERT_OK(err, "btf__add_field foo::a"))
304 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
305 if (!ASSERT_OK(err, "btf__add_field foo::b"))
307 id = btf__add_decl_tag(btf, "contains:bar:a", foo_btf_id, 0);
308 if (!ASSERT_GT(id, 0, "btf__add_decl_tag contains:bar:a"))
311 err = btf__load_into_kernel(btf);
312 ASSERT_EQ(err, refcount_field ? 0 : -EINVAL, "check btf");
316 static void test_btf(void)
318 struct btf *btf = NULL;
321 while (test__start_subtest("btf: too many locks")) {
323 if (!ASSERT_OK_PTR(btf, "init_btf"))
325 id = btf__add_struct(btf, "foo", 24);
326 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
328 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
329 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
331 err = btf__add_field(btf, "b", SPIN_LOCK, 32, 0);
332 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
334 err = btf__add_field(btf, "c", LIST_HEAD, 64, 0);
335 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
338 err = btf__load_into_kernel(btf);
339 ASSERT_EQ(err, -E2BIG, "check btf");
344 while (test__start_subtest("btf: missing lock")) {
346 if (!ASSERT_OK_PTR(btf, "init_btf"))
348 id = btf__add_struct(btf, "foo", 16);
349 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
351 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
352 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
354 id = btf__add_decl_tag(btf, "contains:baz:a", 5, 0);
355 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:baz:a"))
357 id = btf__add_struct(btf, "baz", 16);
358 if (!ASSERT_EQ(id, 7, "btf__add_struct baz"))
360 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
361 if (!ASSERT_OK(err, "btf__add_field baz::a"))
364 err = btf__load_into_kernel(btf);
365 ASSERT_EQ(err, -EINVAL, "check btf");
370 while (test__start_subtest("btf: bad offset")) {
372 if (!ASSERT_OK_PTR(btf, "init_btf"))
374 id = btf__add_struct(btf, "foo", 36);
375 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
377 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
378 if (!ASSERT_OK(err, "btf__add_field foo::a"))
380 err = btf__add_field(btf, "b", LIST_NODE, 0, 0);
381 if (!ASSERT_OK(err, "btf__add_field foo::b"))
383 err = btf__add_field(btf, "c", SPIN_LOCK, 0, 0);
384 if (!ASSERT_OK(err, "btf__add_field foo::c"))
386 id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
387 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
390 err = btf__load_into_kernel(btf);
391 ASSERT_EQ(err, -EEXIST, "check btf");
396 while (test__start_subtest("btf: missing contains:")) {
398 if (!ASSERT_OK_PTR(btf, "init_btf"))
400 id = btf__add_struct(btf, "foo", 24);
401 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
403 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
404 if (!ASSERT_OK(err, "btf__add_field foo::a"))
406 err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
407 if (!ASSERT_OK(err, "btf__add_field foo::b"))
410 err = btf__load_into_kernel(btf);
411 ASSERT_EQ(err, -EINVAL, "check btf");
416 while (test__start_subtest("btf: missing struct")) {
418 if (!ASSERT_OK_PTR(btf, "init_btf"))
420 id = btf__add_struct(btf, "foo", 24);
421 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
423 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
424 if (!ASSERT_OK(err, "btf__add_field foo::a"))
426 err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
427 if (!ASSERT_OK(err, "btf__add_field foo::b"))
429 id = btf__add_decl_tag(btf, "contains:bar:bar", 5, 1);
430 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:bar"))
433 err = btf__load_into_kernel(btf);
434 ASSERT_EQ(err, -ENOENT, "check btf");
439 while (test__start_subtest("btf: missing node")) {
441 if (!ASSERT_OK_PTR(btf, "init_btf"))
443 id = btf__add_struct(btf, "foo", 24);
444 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
446 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
447 if (!ASSERT_OK(err, "btf__add_field foo::a"))
449 err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
450 if (!ASSERT_OK(err, "btf__add_field foo::b"))
452 id = btf__add_decl_tag(btf, "contains:foo:c", 5, 1);
453 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:c"))
456 err = btf__load_into_kernel(btf);
458 ASSERT_EQ(err, -ENOENT, "check btf");
462 while (test__start_subtest("btf: node incorrect type")) {
464 if (!ASSERT_OK_PTR(btf, "init_btf"))
466 id = btf__add_struct(btf, "foo", 20);
467 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
469 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
470 if (!ASSERT_OK(err, "btf__add_field foo::a"))
472 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
473 if (!ASSERT_OK(err, "btf__add_field foo::b"))
475 id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
476 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
478 id = btf__add_struct(btf, "bar", 4);
479 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
481 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
482 if (!ASSERT_OK(err, "btf__add_field bar::a"))
485 err = btf__load_into_kernel(btf);
486 ASSERT_EQ(err, -EINVAL, "check btf");
491 while (test__start_subtest("btf: multiple bpf_list_node with name b")) {
493 if (!ASSERT_OK_PTR(btf, "init_btf"))
495 id = btf__add_struct(btf, "foo", 52);
496 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
498 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
499 if (!ASSERT_OK(err, "btf__add_field foo::a"))
501 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
502 if (!ASSERT_OK(err, "btf__add_field foo::b"))
504 err = btf__add_field(btf, "b", LIST_NODE, 256, 0);
505 if (!ASSERT_OK(err, "btf__add_field foo::c"))
507 err = btf__add_field(btf, "d", SPIN_LOCK, 384, 0);
508 if (!ASSERT_OK(err, "btf__add_field foo::d"))
510 id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
511 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
514 err = btf__load_into_kernel(btf);
515 ASSERT_EQ(err, -EINVAL, "check btf");
520 while (test__start_subtest("btf: owning | owned AA cycle")) {
522 if (!ASSERT_OK_PTR(btf, "init_btf"))
524 id = btf__add_struct(btf, "foo", 44);
525 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
527 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
528 if (!ASSERT_OK(err, "btf__add_field foo::a"))
530 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
531 if (!ASSERT_OK(err, "btf__add_field foo::b"))
533 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
534 if (!ASSERT_OK(err, "btf__add_field foo::c"))
536 id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
537 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
540 err = btf__load_into_kernel(btf);
541 ASSERT_EQ(err, -ELOOP, "check btf");
546 while (test__start_subtest("btf: owning | owned ABA cycle")) {
548 if (!ASSERT_OK_PTR(btf, "init_btf"))
550 id = btf__add_struct(btf, "foo", 44);
551 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
553 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
554 if (!ASSERT_OK(err, "btf__add_field foo::a"))
556 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
557 if (!ASSERT_OK(err, "btf__add_field foo::b"))
559 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
560 if (!ASSERT_OK(err, "btf__add_field foo::c"))
562 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
563 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
565 id = btf__add_struct(btf, "bar", 44);
566 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
568 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
569 if (!ASSERT_OK(err, "btf__add_field bar::a"))
571 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
572 if (!ASSERT_OK(err, "btf__add_field bar::b"))
574 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
575 if (!ASSERT_OK(err, "btf__add_field bar::c"))
577 id = btf__add_decl_tag(btf, "contains:foo:b", 7, 0);
578 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:foo:b"))
581 err = btf__load_into_kernel(btf);
582 ASSERT_EQ(err, -ELOOP, "check btf");
587 while (test__start_subtest("btf: owning -> owned")) {
589 if (!ASSERT_OK_PTR(btf, "init_btf"))
591 id = btf__add_struct(btf, "foo", 28);
592 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
594 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
595 if (!ASSERT_OK(err, "btf__add_field foo::a"))
597 err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0);
598 if (!ASSERT_OK(err, "btf__add_field foo::b"))
600 id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
601 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
603 id = btf__add_struct(btf, "bar", 24);
604 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
606 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
607 if (!ASSERT_OK(err, "btf__add_field bar::a"))
610 err = btf__load_into_kernel(btf);
611 ASSERT_EQ(err, 0, "check btf");
616 while (test__start_subtest("btf: owning -> owning | owned -> owned")) {
618 if (!ASSERT_OK_PTR(btf, "init_btf"))
620 id = btf__add_struct(btf, "foo", 28);
621 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
623 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
624 if (!ASSERT_OK(err, "btf__add_field foo::a"))
626 err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0);
627 if (!ASSERT_OK(err, "btf__add_field foo::b"))
629 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
630 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
632 id = btf__add_struct(btf, "bar", 44);
633 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
635 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
636 if (!ASSERT_OK(err, "btf__add_field bar::a"))
638 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
639 if (!ASSERT_OK(err, "btf__add_field bar::b"))
641 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
642 if (!ASSERT_OK(err, "btf__add_field bar::c"))
644 id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
645 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
647 id = btf__add_struct(btf, "baz", 24);
648 if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
650 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
651 if (!ASSERT_OK(err, "btf__add_field baz:a"))
654 err = btf__load_into_kernel(btf);
655 ASSERT_EQ(err, 0, "check btf");
660 while (test__start_subtest("btf: owning | owned -> owning | owned -> owned")) {
662 if (!ASSERT_OK_PTR(btf, "init_btf"))
664 id = btf__add_struct(btf, "foo", 44);
665 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
667 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
668 if (!ASSERT_OK(err, "btf__add_field foo::a"))
670 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
671 if (!ASSERT_OK(err, "btf__add_field foo::b"))
673 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
674 if (!ASSERT_OK(err, "btf__add_field foo::c"))
676 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
677 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
679 id = btf__add_struct(btf, "bar", 44);
680 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
682 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
683 if (!ASSERT_OK(err, "btf__add_field bar:a"))
685 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
686 if (!ASSERT_OK(err, "btf__add_field bar:b"))
688 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
689 if (!ASSERT_OK(err, "btf__add_field bar:c"))
691 id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
692 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
694 id = btf__add_struct(btf, "baz", 24);
695 if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
697 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
698 if (!ASSERT_OK(err, "btf__add_field baz:a"))
701 err = btf__load_into_kernel(btf);
702 ASSERT_EQ(err, -ELOOP, "check btf");
707 while (test__start_subtest("btf: owning -> owning | owned -> owning | owned -> owned")) {
709 if (!ASSERT_OK_PTR(btf, "init_btf"))
711 id = btf__add_struct(btf, "foo", 20);
712 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
714 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
715 if (!ASSERT_OK(err, "btf__add_field foo::a"))
717 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
718 if (!ASSERT_OK(err, "btf__add_field foo::b"))
720 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
721 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
723 id = btf__add_struct(btf, "bar", 44);
724 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
726 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
727 if (!ASSERT_OK(err, "btf__add_field bar::a"))
729 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
730 if (!ASSERT_OK(err, "btf__add_field bar::b"))
732 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
733 if (!ASSERT_OK(err, "btf__add_field bar::c"))
735 id = btf__add_decl_tag(btf, "contains:baz:b", 7, 0);
736 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag"))
738 id = btf__add_struct(btf, "baz", 44);
739 if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
741 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
742 if (!ASSERT_OK(err, "btf__add_field bar::a"))
744 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
745 if (!ASSERT_OK(err, "btf__add_field bar::b"))
747 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
748 if (!ASSERT_OK(err, "btf__add_field bar::c"))
750 id = btf__add_decl_tag(btf, "contains:bam:a", 9, 0);
751 if (!ASSERT_EQ(id, 10, "btf__add_decl_tag contains:bam:a"))
753 id = btf__add_struct(btf, "bam", 24);
754 if (!ASSERT_EQ(id, 11, "btf__add_struct bam"))
756 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
757 if (!ASSERT_OK(err, "btf__add_field bam::a"))
760 err = btf__load_into_kernel(btf);
761 ASSERT_EQ(err, -ELOOP, "check btf");
766 while (test__start_subtest("btf: list_node and rb_node in same struct")) {
767 list_and_rb_node_same_struct(true);
771 while (test__start_subtest("btf: list_node and rb_node in same struct, no bpf_refcount")) {
772 list_and_rb_node_same_struct(false);
777 void test_linked_list(void)
781 for (i = 0; i < ARRAY_SIZE(linked_list_fail_tests); i++) {
782 if (!test__start_subtest(linked_list_fail_tests[i].prog_name))
784 test_linked_list_fail_prog(linked_list_fail_tests[i].prog_name,
785 linked_list_fail_tests[i].err_msg);
788 test_linked_list_success(PUSH_POP, false);
789 test_linked_list_success(PUSH_POP, true);
790 test_linked_list_success(PUSH_POP_MULT, false);
791 test_linked_list_success(PUSH_POP_MULT, true);
792 test_linked_list_success(LIST_IN_LIST, false);
793 test_linked_list_success(LIST_IN_LIST, true);
794 test_linked_list_success(TEST_ALL, false);