1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <network_helpers.h>
5 /* test_tailcall_1 checks basic functionality by patching multiple locations
6 * in a single program for a single tail call slot with nop->jmp, jmp->nop
7 * and jmp->jmp rewrites. Also checks for nop->nop.
9 static void test_tailcall_1(void)
11 int err, map_fd, prog_fd, main_fd, i, j;
12 struct bpf_map *prog_array;
13 struct bpf_program *prog;
14 struct bpf_object *obj;
17 LIBBPF_OPTS(bpf_test_run_opts, topts,
19 .data_size_in = sizeof(buff),
23 err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
28 prog = bpf_object__find_program_by_name(obj, "entry");
29 if (CHECK_FAIL(!prog))
32 main_fd = bpf_program__fd(prog);
33 if (CHECK_FAIL(main_fd < 0))
36 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
37 if (CHECK_FAIL(!prog_array))
40 map_fd = bpf_map__fd(prog_array);
41 if (CHECK_FAIL(map_fd < 0))
44 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
45 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
47 prog = bpf_object__find_program_by_name(obj, prog_name);
48 if (CHECK_FAIL(!prog))
51 prog_fd = bpf_program__fd(prog);
52 if (CHECK_FAIL(prog_fd < 0))
55 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
60 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
61 err = bpf_prog_test_run_opts(main_fd, &topts);
62 ASSERT_OK(err, "tailcall");
63 ASSERT_EQ(topts.retval, i, "tailcall retval");
65 err = bpf_map_delete_elem(map_fd, &i);
70 err = bpf_prog_test_run_opts(main_fd, &topts);
71 ASSERT_OK(err, "tailcall");
72 ASSERT_EQ(topts.retval, 3, "tailcall retval");
74 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
75 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
77 prog = bpf_object__find_program_by_name(obj, prog_name);
78 if (CHECK_FAIL(!prog))
81 prog_fd = bpf_program__fd(prog);
82 if (CHECK_FAIL(prog_fd < 0))
85 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
90 err = bpf_prog_test_run_opts(main_fd, &topts);
91 ASSERT_OK(err, "tailcall");
92 ASSERT_OK(topts.retval, "tailcall retval");
94 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
95 j = bpf_map__max_entries(prog_array) - 1 - i;
96 snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
98 prog = bpf_object__find_program_by_name(obj, prog_name);
99 if (CHECK_FAIL(!prog))
102 prog_fd = bpf_program__fd(prog);
103 if (CHECK_FAIL(prog_fd < 0))
106 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
111 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
112 j = bpf_map__max_entries(prog_array) - 1 - i;
114 err = bpf_prog_test_run_opts(main_fd, &topts);
115 ASSERT_OK(err, "tailcall");
116 ASSERT_EQ(topts.retval, j, "tailcall retval");
118 err = bpf_map_delete_elem(map_fd, &i);
123 err = bpf_prog_test_run_opts(main_fd, &topts);
124 ASSERT_OK(err, "tailcall");
125 ASSERT_EQ(topts.retval, 3, "tailcall retval");
127 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
128 err = bpf_map_delete_elem(map_fd, &i);
129 if (CHECK_FAIL(err >= 0 || errno != ENOENT))
132 err = bpf_prog_test_run_opts(main_fd, &topts);
133 ASSERT_OK(err, "tailcall");
134 ASSERT_EQ(topts.retval, 3, "tailcall retval");
138 bpf_object__close(obj);
141 /* test_tailcall_2 checks that patching multiple programs for a single
142 * tail call slot works. It also jumps through several programs and tests
143 * the tail call limit counter.
145 static void test_tailcall_2(void)
147 int err, map_fd, prog_fd, main_fd, i;
148 struct bpf_map *prog_array;
149 struct bpf_program *prog;
150 struct bpf_object *obj;
153 LIBBPF_OPTS(bpf_test_run_opts, topts,
155 .data_size_in = sizeof(buff),
159 err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
164 prog = bpf_object__find_program_by_name(obj, "entry");
165 if (CHECK_FAIL(!prog))
168 main_fd = bpf_program__fd(prog);
169 if (CHECK_FAIL(main_fd < 0))
172 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
173 if (CHECK_FAIL(!prog_array))
176 map_fd = bpf_map__fd(prog_array);
177 if (CHECK_FAIL(map_fd < 0))
180 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
181 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
183 prog = bpf_object__find_program_by_name(obj, prog_name);
184 if (CHECK_FAIL(!prog))
187 prog_fd = bpf_program__fd(prog);
188 if (CHECK_FAIL(prog_fd < 0))
191 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
196 err = bpf_prog_test_run_opts(main_fd, &topts);
197 ASSERT_OK(err, "tailcall");
198 ASSERT_EQ(topts.retval, 2, "tailcall retval");
201 err = bpf_map_delete_elem(map_fd, &i);
205 err = bpf_prog_test_run_opts(main_fd, &topts);
206 ASSERT_OK(err, "tailcall");
207 ASSERT_EQ(topts.retval, 1, "tailcall retval");
210 err = bpf_map_delete_elem(map_fd, &i);
214 err = bpf_prog_test_run_opts(main_fd, &topts);
215 ASSERT_OK(err, "tailcall");
216 ASSERT_EQ(topts.retval, 3, "tailcall retval");
218 bpf_object__close(obj);
221 static void test_tailcall_count(const char *which)
223 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
224 struct bpf_map *prog_array, *data_map;
225 struct bpf_program *prog;
226 struct bpf_object *obj;
228 LIBBPF_OPTS(bpf_test_run_opts, topts,
230 .data_size_in = sizeof(buff),
234 err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
239 prog = bpf_object__find_program_by_name(obj, "entry");
240 if (CHECK_FAIL(!prog))
243 main_fd = bpf_program__fd(prog);
244 if (CHECK_FAIL(main_fd < 0))
247 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
248 if (CHECK_FAIL(!prog_array))
251 map_fd = bpf_map__fd(prog_array);
252 if (CHECK_FAIL(map_fd < 0))
255 prog = bpf_object__find_program_by_name(obj, "classifier_0");
256 if (CHECK_FAIL(!prog))
259 prog_fd = bpf_program__fd(prog);
260 if (CHECK_FAIL(prog_fd < 0))
264 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
268 err = bpf_prog_test_run_opts(main_fd, &topts);
269 ASSERT_OK(err, "tailcall");
270 ASSERT_EQ(topts.retval, 1, "tailcall retval");
272 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
273 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
276 data_fd = bpf_map__fd(data_map);
277 if (CHECK_FAIL(map_fd < 0))
281 err = bpf_map_lookup_elem(data_fd, &i, &val);
282 ASSERT_OK(err, "tailcall count");
283 ASSERT_EQ(val, 33, "tailcall count");
286 err = bpf_map_delete_elem(map_fd, &i);
290 err = bpf_prog_test_run_opts(main_fd, &topts);
291 ASSERT_OK(err, "tailcall");
292 ASSERT_OK(topts.retval, "tailcall retval");
294 bpf_object__close(obj);
297 /* test_tailcall_3 checks that the count value of the tail call limit
298 * enforcement matches with expectations. JIT uses direct jump.
300 static void test_tailcall_3(void)
302 test_tailcall_count("tailcall3.bpf.o");
305 /* test_tailcall_6 checks that the count value of the tail call limit
306 * enforcement matches with expectations. JIT uses indirect jump.
308 static void test_tailcall_6(void)
310 test_tailcall_count("tailcall6.bpf.o");
313 /* test_tailcall_4 checks that the kernel properly selects indirect jump
314 * for the case where the key is not known. Latter is passed via global
315 * data to select different targets we can compare return value of.
317 static void test_tailcall_4(void)
319 int err, map_fd, prog_fd, main_fd, data_fd, i;
320 struct bpf_map *prog_array, *data_map;
321 struct bpf_program *prog;
322 struct bpf_object *obj;
323 static const int zero = 0;
326 LIBBPF_OPTS(bpf_test_run_opts, topts,
328 .data_size_in = sizeof(buff),
332 err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
337 prog = bpf_object__find_program_by_name(obj, "entry");
338 if (CHECK_FAIL(!prog))
341 main_fd = bpf_program__fd(prog);
342 if (CHECK_FAIL(main_fd < 0))
345 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
346 if (CHECK_FAIL(!prog_array))
349 map_fd = bpf_map__fd(prog_array);
350 if (CHECK_FAIL(map_fd < 0))
353 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
354 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
357 data_fd = bpf_map__fd(data_map);
358 if (CHECK_FAIL(map_fd < 0))
361 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
362 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
364 prog = bpf_object__find_program_by_name(obj, prog_name);
365 if (CHECK_FAIL(!prog))
368 prog_fd = bpf_program__fd(prog);
369 if (CHECK_FAIL(prog_fd < 0))
372 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
377 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
378 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
382 err = bpf_prog_test_run_opts(main_fd, &topts);
383 ASSERT_OK(err, "tailcall");
384 ASSERT_EQ(topts.retval, i, "tailcall retval");
387 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
388 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
392 err = bpf_map_delete_elem(map_fd, &i);
396 err = bpf_prog_test_run_opts(main_fd, &topts);
397 ASSERT_OK(err, "tailcall");
398 ASSERT_EQ(topts.retval, 3, "tailcall retval");
401 bpf_object__close(obj);
404 /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
405 * an indirect jump when the keys are const but different from different branches.
407 static void test_tailcall_5(void)
409 int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
410 struct bpf_map *prog_array, *data_map;
411 struct bpf_program *prog;
412 struct bpf_object *obj;
413 static const int zero = 0;
416 LIBBPF_OPTS(bpf_test_run_opts, topts,
418 .data_size_in = sizeof(buff),
422 err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
427 prog = bpf_object__find_program_by_name(obj, "entry");
428 if (CHECK_FAIL(!prog))
431 main_fd = bpf_program__fd(prog);
432 if (CHECK_FAIL(main_fd < 0))
435 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
436 if (CHECK_FAIL(!prog_array))
439 map_fd = bpf_map__fd(prog_array);
440 if (CHECK_FAIL(map_fd < 0))
443 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
444 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
447 data_fd = bpf_map__fd(data_map);
448 if (CHECK_FAIL(map_fd < 0))
451 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
452 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
454 prog = bpf_object__find_program_by_name(obj, prog_name);
455 if (CHECK_FAIL(!prog))
458 prog_fd = bpf_program__fd(prog);
459 if (CHECK_FAIL(prog_fd < 0))
462 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
467 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
468 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
472 err = bpf_prog_test_run_opts(main_fd, &topts);
473 ASSERT_OK(err, "tailcall");
474 ASSERT_EQ(topts.retval, i, "tailcall retval");
477 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
478 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
482 err = bpf_map_delete_elem(map_fd, &i);
486 err = bpf_prog_test_run_opts(main_fd, &topts);
487 ASSERT_OK(err, "tailcall");
488 ASSERT_EQ(topts.retval, 3, "tailcall retval");
491 bpf_object__close(obj);
494 /* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
495 * correctly in correlation with BPF subprograms
497 static void test_tailcall_bpf2bpf_1(void)
499 int err, map_fd, prog_fd, main_fd, i;
500 struct bpf_map *prog_array;
501 struct bpf_program *prog;
502 struct bpf_object *obj;
504 LIBBPF_OPTS(bpf_test_run_opts, topts,
506 .data_size_in = sizeof(pkt_v4),
510 err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
515 prog = bpf_object__find_program_by_name(obj, "entry");
516 if (CHECK_FAIL(!prog))
519 main_fd = bpf_program__fd(prog);
520 if (CHECK_FAIL(main_fd < 0))
523 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
524 if (CHECK_FAIL(!prog_array))
527 map_fd = bpf_map__fd(prog_array);
528 if (CHECK_FAIL(map_fd < 0))
532 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
533 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
535 prog = bpf_object__find_program_by_name(obj, prog_name);
536 if (CHECK_FAIL(!prog))
539 prog_fd = bpf_program__fd(prog);
540 if (CHECK_FAIL(prog_fd < 0))
543 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
548 err = bpf_prog_test_run_opts(main_fd, &topts);
549 ASSERT_OK(err, "tailcall");
550 ASSERT_EQ(topts.retval, 1, "tailcall retval");
552 /* jmp -> nop, call subprog that will do tailcall */
554 err = bpf_map_delete_elem(map_fd, &i);
558 err = bpf_prog_test_run_opts(main_fd, &topts);
559 ASSERT_OK(err, "tailcall");
560 ASSERT_OK(topts.retval, "tailcall retval");
562 /* make sure that subprog can access ctx and entry prog that
563 * called this subprog can properly return
566 err = bpf_map_delete_elem(map_fd, &i);
570 err = bpf_prog_test_run_opts(main_fd, &topts);
571 ASSERT_OK(err, "tailcall");
572 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
574 bpf_object__close(obj);
577 /* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
578 * enforcement matches with expectations when tailcall is preceded with
581 static void test_tailcall_bpf2bpf_2(void)
583 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
584 struct bpf_map *prog_array, *data_map;
585 struct bpf_program *prog;
586 struct bpf_object *obj;
588 LIBBPF_OPTS(bpf_test_run_opts, topts,
590 .data_size_in = sizeof(buff),
594 err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
599 prog = bpf_object__find_program_by_name(obj, "entry");
600 if (CHECK_FAIL(!prog))
603 main_fd = bpf_program__fd(prog);
604 if (CHECK_FAIL(main_fd < 0))
607 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
608 if (CHECK_FAIL(!prog_array))
611 map_fd = bpf_map__fd(prog_array);
612 if (CHECK_FAIL(map_fd < 0))
615 prog = bpf_object__find_program_by_name(obj, "classifier_0");
616 if (CHECK_FAIL(!prog))
619 prog_fd = bpf_program__fd(prog);
620 if (CHECK_FAIL(prog_fd < 0))
624 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
628 err = bpf_prog_test_run_opts(main_fd, &topts);
629 ASSERT_OK(err, "tailcall");
630 ASSERT_EQ(topts.retval, 1, "tailcall retval");
632 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
633 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
636 data_fd = bpf_map__fd(data_map);
637 if (CHECK_FAIL(map_fd < 0))
641 err = bpf_map_lookup_elem(data_fd, &i, &val);
642 ASSERT_OK(err, "tailcall count");
643 ASSERT_EQ(val, 33, "tailcall count");
646 err = bpf_map_delete_elem(map_fd, &i);
650 err = bpf_prog_test_run_opts(main_fd, &topts);
651 ASSERT_OK(err, "tailcall");
652 ASSERT_OK(topts.retval, "tailcall retval");
654 bpf_object__close(obj);
657 /* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
658 * 256 bytes) can be used within bpf subprograms that have the tailcalls
661 static void test_tailcall_bpf2bpf_3(void)
663 int err, map_fd, prog_fd, main_fd, i;
664 struct bpf_map *prog_array;
665 struct bpf_program *prog;
666 struct bpf_object *obj;
668 LIBBPF_OPTS(bpf_test_run_opts, topts,
670 .data_size_in = sizeof(pkt_v4),
674 err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
679 prog = bpf_object__find_program_by_name(obj, "entry");
680 if (CHECK_FAIL(!prog))
683 main_fd = bpf_program__fd(prog);
684 if (CHECK_FAIL(main_fd < 0))
687 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
688 if (CHECK_FAIL(!prog_array))
691 map_fd = bpf_map__fd(prog_array);
692 if (CHECK_FAIL(map_fd < 0))
695 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
696 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
698 prog = bpf_object__find_program_by_name(obj, prog_name);
699 if (CHECK_FAIL(!prog))
702 prog_fd = bpf_program__fd(prog);
703 if (CHECK_FAIL(prog_fd < 0))
706 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
711 err = bpf_prog_test_run_opts(main_fd, &topts);
712 ASSERT_OK(err, "tailcall");
713 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
716 err = bpf_map_delete_elem(map_fd, &i);
720 err = bpf_prog_test_run_opts(main_fd, &topts);
721 ASSERT_OK(err, "tailcall");
722 ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
725 err = bpf_map_delete_elem(map_fd, &i);
729 err = bpf_prog_test_run_opts(main_fd, &topts);
730 ASSERT_OK(err, "tailcall");
731 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
733 bpf_object__close(obj);
736 #include "tailcall_bpf2bpf4.skel.h"
738 /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
739 * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
740 * counter behaves correctly, bpf program will go through following flow:
742 * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
743 * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
744 * subprog2 [here bump global counter] --------^
746 * We go through first two tailcalls and start counting from the subprog2 where
747 * the loop begins. At the end of the test make sure that the global counter is
748 * equal to 31, because tailcall counter includes the first two tailcalls
749 * whereas global counter is incremented only on loop presented on flow above.
751 * The noise parameter is used to insert bpf_map_update calls into the logic
752 * to force verifier to patch instructions. This allows us to ensure jump
753 * logic remains correct with instruction movement.
755 static void test_tailcall_bpf2bpf_4(bool noise)
757 int err, map_fd, prog_fd, main_fd, data_fd, i;
758 struct tailcall_bpf2bpf4__bss val;
759 struct bpf_map *prog_array, *data_map;
760 struct bpf_program *prog;
761 struct bpf_object *obj;
763 LIBBPF_OPTS(bpf_test_run_opts, topts,
765 .data_size_in = sizeof(pkt_v4),
769 err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
774 prog = bpf_object__find_program_by_name(obj, "entry");
775 if (CHECK_FAIL(!prog))
778 main_fd = bpf_program__fd(prog);
779 if (CHECK_FAIL(main_fd < 0))
782 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
783 if (CHECK_FAIL(!prog_array))
786 map_fd = bpf_map__fd(prog_array);
787 if (CHECK_FAIL(map_fd < 0))
790 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
791 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
793 prog = bpf_object__find_program_by_name(obj, prog_name);
794 if (CHECK_FAIL(!prog))
797 prog_fd = bpf_program__fd(prog);
798 if (CHECK_FAIL(prog_fd < 0))
801 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
806 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
807 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
810 data_fd = bpf_map__fd(data_map);
811 if (CHECK_FAIL(map_fd < 0))
817 err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
821 err = bpf_prog_test_run_opts(main_fd, &topts);
822 ASSERT_OK(err, "tailcall");
823 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
826 err = bpf_map_lookup_elem(data_fd, &i, &val);
827 ASSERT_OK(err, "tailcall count");
828 ASSERT_EQ(val.count, 31, "tailcall count");
831 bpf_object__close(obj);
834 #include "tailcall_bpf2bpf6.skel.h"
836 /* Tail call counting works even when there is data on stack which is
837 * not aligned to 8 bytes.
839 static void test_tailcall_bpf2bpf_6(void)
841 struct tailcall_bpf2bpf6 *obj;
842 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
843 LIBBPF_OPTS(bpf_test_run_opts, topts,
845 .data_size_in = sizeof(pkt_v4),
849 obj = tailcall_bpf2bpf6__open_and_load();
850 if (!ASSERT_OK_PTR(obj, "open and load"))
853 main_fd = bpf_program__fd(obj->progs.entry);
854 if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
857 map_fd = bpf_map__fd(obj->maps.jmp_table);
858 if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
861 prog_fd = bpf_program__fd(obj->progs.classifier_0);
862 if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
866 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
867 if (!ASSERT_OK(err, "jmp_table map update"))
870 err = bpf_prog_test_run_opts(main_fd, &topts);
871 ASSERT_OK(err, "entry prog test run");
872 ASSERT_EQ(topts.retval, 0, "tailcall retval");
874 data_fd = bpf_map__fd(obj->maps.bss);
875 if (!ASSERT_GE(map_fd, 0, "bss map fd"))
879 err = bpf_map_lookup_elem(data_fd, &i, &val);
880 ASSERT_OK(err, "bss map lookup");
881 ASSERT_EQ(val, 1, "done flag is set");
884 tailcall_bpf2bpf6__destroy(obj);
887 void test_tailcalls(void)
889 if (test__start_subtest("tailcall_1"))
891 if (test__start_subtest("tailcall_2"))
893 if (test__start_subtest("tailcall_3"))
895 if (test__start_subtest("tailcall_4"))
897 if (test__start_subtest("tailcall_5"))
899 if (test__start_subtest("tailcall_6"))
901 if (test__start_subtest("tailcall_bpf2bpf_1"))
902 test_tailcall_bpf2bpf_1();
903 if (test__start_subtest("tailcall_bpf2bpf_2"))
904 test_tailcall_bpf2bpf_2();
905 if (test__start_subtest("tailcall_bpf2bpf_3"))
906 test_tailcall_bpf2bpf_3();
907 if (test__start_subtest("tailcall_bpf2bpf_4"))
908 test_tailcall_bpf2bpf_4(false);
909 if (test__start_subtest("tailcall_bpf2bpf_5"))
910 test_tailcall_bpf2bpf_4(true);
911 if (test__start_subtest("tailcall_bpf2bpf_6"))
912 test_tailcall_bpf2bpf_6();