1 // SPDX-License-Identifier: GPL-2.0
3 #include <test_progs.h>
4 #include <network_helpers.h>
5 #include "tailcall_poke.skel.h"
6 #include "tailcall_bpf2bpf_hierarchy2.skel.h"
7 #include "tailcall_bpf2bpf_hierarchy3.skel.h"
8 #include "tailcall_freplace.skel.h"
9 #include "tc_bpf2bpf.skel.h"
10 #include "tailcall_fail.skel.h"
12 /* test_tailcall_1 checks basic functionality by patching multiple locations
13 * in a single program for a single tail call slot with nop->jmp, jmp->nop
14 * and jmp->jmp rewrites. Also checks for nop->nop.
16 static void test_tailcall_1(void)
18 int err, map_fd, prog_fd, main_fd, i, j;
19 struct bpf_map *prog_array;
20 struct bpf_program *prog;
21 struct bpf_object *obj;
24 LIBBPF_OPTS(bpf_test_run_opts, topts,
26 .data_size_in = sizeof(buff),
30 err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
35 prog = bpf_object__find_program_by_name(obj, "entry");
36 if (CHECK_FAIL(!prog))
39 main_fd = bpf_program__fd(prog);
40 if (CHECK_FAIL(main_fd < 0))
43 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
44 if (CHECK_FAIL(!prog_array))
47 map_fd = bpf_map__fd(prog_array);
48 if (CHECK_FAIL(map_fd < 0))
51 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
52 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
54 prog = bpf_object__find_program_by_name(obj, prog_name);
55 if (CHECK_FAIL(!prog))
58 prog_fd = bpf_program__fd(prog);
59 if (CHECK_FAIL(prog_fd < 0))
62 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
67 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
68 err = bpf_prog_test_run_opts(main_fd, &topts);
69 ASSERT_OK(err, "tailcall");
70 ASSERT_EQ(topts.retval, i, "tailcall retval");
72 err = bpf_map_delete_elem(map_fd, &i);
77 err = bpf_prog_test_run_opts(main_fd, &topts);
78 ASSERT_OK(err, "tailcall");
79 ASSERT_EQ(topts.retval, 3, "tailcall retval");
81 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
82 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
84 prog = bpf_object__find_program_by_name(obj, prog_name);
85 if (CHECK_FAIL(!prog))
88 prog_fd = bpf_program__fd(prog);
89 if (CHECK_FAIL(prog_fd < 0))
92 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
97 err = bpf_prog_test_run_opts(main_fd, &topts);
98 ASSERT_OK(err, "tailcall");
99 ASSERT_OK(topts.retval, "tailcall retval");
101 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
102 j = bpf_map__max_entries(prog_array) - 1 - i;
103 snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
105 prog = bpf_object__find_program_by_name(obj, prog_name);
106 if (CHECK_FAIL(!prog))
109 prog_fd = bpf_program__fd(prog);
110 if (CHECK_FAIL(prog_fd < 0))
113 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
118 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
119 j = bpf_map__max_entries(prog_array) - 1 - i;
121 err = bpf_prog_test_run_opts(main_fd, &topts);
122 ASSERT_OK(err, "tailcall");
123 ASSERT_EQ(topts.retval, j, "tailcall retval");
125 err = bpf_map_delete_elem(map_fd, &i);
130 err = bpf_prog_test_run_opts(main_fd, &topts);
131 ASSERT_OK(err, "tailcall");
132 ASSERT_EQ(topts.retval, 3, "tailcall retval");
134 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
135 err = bpf_map_delete_elem(map_fd, &i);
136 if (CHECK_FAIL(err >= 0 || errno != ENOENT))
139 err = bpf_prog_test_run_opts(main_fd, &topts);
140 ASSERT_OK(err, "tailcall");
141 ASSERT_EQ(topts.retval, 3, "tailcall retval");
145 bpf_object__close(obj);
148 /* test_tailcall_2 checks that patching multiple programs for a single
149 * tail call slot works. It also jumps through several programs and tests
150 * the tail call limit counter.
152 static void test_tailcall_2(void)
154 int err, map_fd, prog_fd, main_fd, i;
155 struct bpf_map *prog_array;
156 struct bpf_program *prog;
157 struct bpf_object *obj;
160 LIBBPF_OPTS(bpf_test_run_opts, topts,
162 .data_size_in = sizeof(buff),
166 err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
171 prog = bpf_object__find_program_by_name(obj, "entry");
172 if (CHECK_FAIL(!prog))
175 main_fd = bpf_program__fd(prog);
176 if (CHECK_FAIL(main_fd < 0))
179 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
180 if (CHECK_FAIL(!prog_array))
183 map_fd = bpf_map__fd(prog_array);
184 if (CHECK_FAIL(map_fd < 0))
187 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
188 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
190 prog = bpf_object__find_program_by_name(obj, prog_name);
191 if (CHECK_FAIL(!prog))
194 prog_fd = bpf_program__fd(prog);
195 if (CHECK_FAIL(prog_fd < 0))
198 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
203 err = bpf_prog_test_run_opts(main_fd, &topts);
204 ASSERT_OK(err, "tailcall");
205 ASSERT_EQ(topts.retval, 2, "tailcall retval");
208 err = bpf_map_delete_elem(map_fd, &i);
212 err = bpf_prog_test_run_opts(main_fd, &topts);
213 ASSERT_OK(err, "tailcall");
214 ASSERT_EQ(topts.retval, 1, "tailcall retval");
217 err = bpf_map_delete_elem(map_fd, &i);
221 err = bpf_prog_test_run_opts(main_fd, &topts);
222 ASSERT_OK(err, "tailcall");
223 ASSERT_EQ(topts.retval, 3, "tailcall retval");
225 bpf_object__close(obj);
228 static void test_tailcall_count(const char *which, bool test_fentry,
231 struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
232 struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
233 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
234 struct bpf_map *prog_array, *data_map;
235 struct bpf_program *prog;
237 LIBBPF_OPTS(bpf_test_run_opts, topts,
239 .data_size_in = sizeof(buff),
243 err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
248 prog = bpf_object__find_program_by_name(obj, "entry");
249 if (CHECK_FAIL(!prog))
252 main_fd = bpf_program__fd(prog);
253 if (CHECK_FAIL(main_fd < 0))
256 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
257 if (CHECK_FAIL(!prog_array))
260 map_fd = bpf_map__fd(prog_array);
261 if (CHECK_FAIL(map_fd < 0))
264 prog = bpf_object__find_program_by_name(obj, "classifier_0");
265 if (CHECK_FAIL(!prog))
268 prog_fd = bpf_program__fd(prog);
269 if (CHECK_FAIL(prog_fd < 0))
273 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
278 fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
280 if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
283 prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
284 if (!ASSERT_OK_PTR(prog, "find fentry prog"))
287 err = bpf_program__set_attach_target(prog, prog_fd,
289 if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
292 err = bpf_object__load(fentry_obj);
293 if (!ASSERT_OK(err, "load fentry_obj"))
296 fentry_link = bpf_program__attach_trace(prog);
297 if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
302 fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
304 if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
307 prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
308 if (!ASSERT_OK_PTR(prog, "find fexit prog"))
311 err = bpf_program__set_attach_target(prog, prog_fd,
313 if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
316 err = bpf_object__load(fexit_obj);
317 if (!ASSERT_OK(err, "load fexit_obj"))
320 fexit_link = bpf_program__attach_trace(prog);
321 if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
325 err = bpf_prog_test_run_opts(main_fd, &topts);
326 ASSERT_OK(err, "tailcall");
327 ASSERT_EQ(topts.retval, 1, "tailcall retval");
329 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
330 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
333 data_fd = bpf_map__fd(data_map);
334 if (CHECK_FAIL(data_fd < 0))
338 err = bpf_map_lookup_elem(data_fd, &i, &val);
339 ASSERT_OK(err, "tailcall count");
340 ASSERT_EQ(val, 33, "tailcall count");
343 data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
344 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
345 "find tailcall_bpf2bpf_fentry.bss map"))
348 data_fd = bpf_map__fd(data_map);
349 if (!ASSERT_FALSE(data_fd < 0,
350 "find tailcall_bpf2bpf_fentry.bss map fd"))
354 err = bpf_map_lookup_elem(data_fd, &i, &val);
355 ASSERT_OK(err, "fentry count");
356 ASSERT_EQ(val, 33, "fentry count");
360 data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
361 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
362 "find tailcall_bpf2bpf_fexit.bss map"))
365 data_fd = bpf_map__fd(data_map);
366 if (!ASSERT_FALSE(data_fd < 0,
367 "find tailcall_bpf2bpf_fexit.bss map fd"))
371 err = bpf_map_lookup_elem(data_fd, &i, &val);
372 ASSERT_OK(err, "fexit count");
373 ASSERT_EQ(val, 33, "fexit count");
377 err = bpf_map_delete_elem(map_fd, &i);
381 err = bpf_prog_test_run_opts(main_fd, &topts);
382 ASSERT_OK(err, "tailcall");
383 ASSERT_OK(topts.retval, "tailcall retval");
385 bpf_link__destroy(fentry_link);
386 bpf_link__destroy(fexit_link);
387 bpf_object__close(fentry_obj);
388 bpf_object__close(fexit_obj);
389 bpf_object__close(obj);
392 /* test_tailcall_3 checks that the count value of the tail call limit
393 * enforcement matches with expectations. JIT uses direct jump.
395 static void test_tailcall_3(void)
397 test_tailcall_count("tailcall3.bpf.o", false, false);
400 /* test_tailcall_6 checks that the count value of the tail call limit
401 * enforcement matches with expectations. JIT uses indirect jump.
403 static void test_tailcall_6(void)
405 test_tailcall_count("tailcall6.bpf.o", false, false);
408 /* test_tailcall_4 checks that the kernel properly selects indirect jump
409 * for the case where the key is not known. Latter is passed via global
410 * data to select different targets we can compare return value of.
412 static void test_tailcall_4(void)
414 int err, map_fd, prog_fd, main_fd, data_fd, i;
415 struct bpf_map *prog_array, *data_map;
416 struct bpf_program *prog;
417 struct bpf_object *obj;
418 static const int zero = 0;
421 LIBBPF_OPTS(bpf_test_run_opts, topts,
423 .data_size_in = sizeof(buff),
427 err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
432 prog = bpf_object__find_program_by_name(obj, "entry");
433 if (CHECK_FAIL(!prog))
436 main_fd = bpf_program__fd(prog);
437 if (CHECK_FAIL(main_fd < 0))
440 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
441 if (CHECK_FAIL(!prog_array))
444 map_fd = bpf_map__fd(prog_array);
445 if (CHECK_FAIL(map_fd < 0))
448 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
449 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
452 data_fd = bpf_map__fd(data_map);
453 if (CHECK_FAIL(data_fd < 0))
456 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
457 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
459 prog = bpf_object__find_program_by_name(obj, prog_name);
460 if (CHECK_FAIL(!prog))
463 prog_fd = bpf_program__fd(prog);
464 if (CHECK_FAIL(prog_fd < 0))
467 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
472 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
473 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
477 err = bpf_prog_test_run_opts(main_fd, &topts);
478 ASSERT_OK(err, "tailcall");
479 ASSERT_EQ(topts.retval, i, "tailcall retval");
482 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
483 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
487 err = bpf_map_delete_elem(map_fd, &i);
491 err = bpf_prog_test_run_opts(main_fd, &topts);
492 ASSERT_OK(err, "tailcall");
493 ASSERT_EQ(topts.retval, 3, "tailcall retval");
496 bpf_object__close(obj);
499 /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
500 * an indirect jump when the keys are const but different from different branches.
502 static void test_tailcall_5(void)
504 int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
505 struct bpf_map *prog_array, *data_map;
506 struct bpf_program *prog;
507 struct bpf_object *obj;
508 static const int zero = 0;
511 LIBBPF_OPTS(bpf_test_run_opts, topts,
513 .data_size_in = sizeof(buff),
517 err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
522 prog = bpf_object__find_program_by_name(obj, "entry");
523 if (CHECK_FAIL(!prog))
526 main_fd = bpf_program__fd(prog);
527 if (CHECK_FAIL(main_fd < 0))
530 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
531 if (CHECK_FAIL(!prog_array))
534 map_fd = bpf_map__fd(prog_array);
535 if (CHECK_FAIL(map_fd < 0))
538 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
539 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
542 data_fd = bpf_map__fd(data_map);
543 if (CHECK_FAIL(data_fd < 0))
546 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
547 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
549 prog = bpf_object__find_program_by_name(obj, prog_name);
550 if (CHECK_FAIL(!prog))
553 prog_fd = bpf_program__fd(prog);
554 if (CHECK_FAIL(prog_fd < 0))
557 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
562 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
563 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
567 err = bpf_prog_test_run_opts(main_fd, &topts);
568 ASSERT_OK(err, "tailcall");
569 ASSERT_EQ(topts.retval, i, "tailcall retval");
572 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
573 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
577 err = bpf_map_delete_elem(map_fd, &i);
581 err = bpf_prog_test_run_opts(main_fd, &topts);
582 ASSERT_OK(err, "tailcall");
583 ASSERT_EQ(topts.retval, 3, "tailcall retval");
586 bpf_object__close(obj);
589 /* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
590 * correctly in correlation with BPF subprograms
592 static void test_tailcall_bpf2bpf_1(void)
594 int err, map_fd, prog_fd, main_fd, i;
595 struct bpf_map *prog_array;
596 struct bpf_program *prog;
597 struct bpf_object *obj;
599 LIBBPF_OPTS(bpf_test_run_opts, topts,
601 .data_size_in = sizeof(pkt_v4),
605 err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
610 prog = bpf_object__find_program_by_name(obj, "entry");
611 if (CHECK_FAIL(!prog))
614 main_fd = bpf_program__fd(prog);
615 if (CHECK_FAIL(main_fd < 0))
618 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
619 if (CHECK_FAIL(!prog_array))
622 map_fd = bpf_map__fd(prog_array);
623 if (CHECK_FAIL(map_fd < 0))
627 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
628 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
630 prog = bpf_object__find_program_by_name(obj, prog_name);
631 if (CHECK_FAIL(!prog))
634 prog_fd = bpf_program__fd(prog);
635 if (CHECK_FAIL(prog_fd < 0))
638 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
643 err = bpf_prog_test_run_opts(main_fd, &topts);
644 ASSERT_OK(err, "tailcall");
645 ASSERT_EQ(topts.retval, 1, "tailcall retval");
647 /* jmp -> nop, call subprog that will do tailcall */
649 err = bpf_map_delete_elem(map_fd, &i);
653 err = bpf_prog_test_run_opts(main_fd, &topts);
654 ASSERT_OK(err, "tailcall");
655 ASSERT_OK(topts.retval, "tailcall retval");
657 /* make sure that subprog can access ctx and entry prog that
658 * called this subprog can properly return
661 err = bpf_map_delete_elem(map_fd, &i);
665 err = bpf_prog_test_run_opts(main_fd, &topts);
666 ASSERT_OK(err, "tailcall");
667 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
669 bpf_object__close(obj);
672 /* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
673 * enforcement matches with expectations when tailcall is preceded with
676 static void test_tailcall_bpf2bpf_2(void)
678 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
679 struct bpf_map *prog_array, *data_map;
680 struct bpf_program *prog;
681 struct bpf_object *obj;
683 LIBBPF_OPTS(bpf_test_run_opts, topts,
685 .data_size_in = sizeof(buff),
689 err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
694 prog = bpf_object__find_program_by_name(obj, "entry");
695 if (CHECK_FAIL(!prog))
698 main_fd = bpf_program__fd(prog);
699 if (CHECK_FAIL(main_fd < 0))
702 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
703 if (CHECK_FAIL(!prog_array))
706 map_fd = bpf_map__fd(prog_array);
707 if (CHECK_FAIL(map_fd < 0))
710 prog = bpf_object__find_program_by_name(obj, "classifier_0");
711 if (CHECK_FAIL(!prog))
714 prog_fd = bpf_program__fd(prog);
715 if (CHECK_FAIL(prog_fd < 0))
719 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
723 err = bpf_prog_test_run_opts(main_fd, &topts);
724 ASSERT_OK(err, "tailcall");
725 ASSERT_EQ(topts.retval, 1, "tailcall retval");
727 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
728 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
731 data_fd = bpf_map__fd(data_map);
732 if (CHECK_FAIL(data_fd < 0))
736 err = bpf_map_lookup_elem(data_fd, &i, &val);
737 ASSERT_OK(err, "tailcall count");
738 ASSERT_EQ(val, 33, "tailcall count");
741 err = bpf_map_delete_elem(map_fd, &i);
745 err = bpf_prog_test_run_opts(main_fd, &topts);
746 ASSERT_OK(err, "tailcall");
747 ASSERT_OK(topts.retval, "tailcall retval");
749 bpf_object__close(obj);
752 /* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
753 * 256 bytes) can be used within bpf subprograms that have the tailcalls
756 static void test_tailcall_bpf2bpf_3(void)
758 int err, map_fd, prog_fd, main_fd, i;
759 struct bpf_map *prog_array;
760 struct bpf_program *prog;
761 struct bpf_object *obj;
763 LIBBPF_OPTS(bpf_test_run_opts, topts,
765 .data_size_in = sizeof(pkt_v4),
769 err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
774 prog = bpf_object__find_program_by_name(obj, "entry");
775 if (CHECK_FAIL(!prog))
778 main_fd = bpf_program__fd(prog);
779 if (CHECK_FAIL(main_fd < 0))
782 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
783 if (CHECK_FAIL(!prog_array))
786 map_fd = bpf_map__fd(prog_array);
787 if (CHECK_FAIL(map_fd < 0))
790 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
791 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
793 prog = bpf_object__find_program_by_name(obj, prog_name);
794 if (CHECK_FAIL(!prog))
797 prog_fd = bpf_program__fd(prog);
798 if (CHECK_FAIL(prog_fd < 0))
801 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
806 err = bpf_prog_test_run_opts(main_fd, &topts);
807 ASSERT_OK(err, "tailcall");
808 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
811 err = bpf_map_delete_elem(map_fd, &i);
815 err = bpf_prog_test_run_opts(main_fd, &topts);
816 ASSERT_OK(err, "tailcall");
817 ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
820 err = bpf_map_delete_elem(map_fd, &i);
824 err = bpf_prog_test_run_opts(main_fd, &topts);
825 ASSERT_OK(err, "tailcall");
826 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
828 bpf_object__close(obj);
831 #include "tailcall_bpf2bpf4.skel.h"
833 /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
834 * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
835 * counter behaves correctly, bpf program will go through following flow:
837 * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
838 * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
839 * subprog2 [here bump global counter] --------^
841 * We go through first two tailcalls and start counting from the subprog2 where
842 * the loop begins. At the end of the test make sure that the global counter is
843 * equal to 31, because tailcall counter includes the first two tailcalls
844 * whereas global counter is incremented only on loop presented on flow above.
846 * The noise parameter is used to insert bpf_map_update calls into the logic
847 * to force verifier to patch instructions. This allows us to ensure jump
848 * logic remains correct with instruction movement.
850 static void test_tailcall_bpf2bpf_4(bool noise)
852 int err, map_fd, prog_fd, main_fd, data_fd, i;
853 struct tailcall_bpf2bpf4__bss val;
854 struct bpf_map *prog_array, *data_map;
855 struct bpf_program *prog;
856 struct bpf_object *obj;
858 LIBBPF_OPTS(bpf_test_run_opts, topts,
860 .data_size_in = sizeof(pkt_v4),
864 err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
869 prog = bpf_object__find_program_by_name(obj, "entry");
870 if (CHECK_FAIL(!prog))
873 main_fd = bpf_program__fd(prog);
874 if (CHECK_FAIL(main_fd < 0))
877 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
878 if (CHECK_FAIL(!prog_array))
881 map_fd = bpf_map__fd(prog_array);
882 if (CHECK_FAIL(map_fd < 0))
885 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
886 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
888 prog = bpf_object__find_program_by_name(obj, prog_name);
889 if (CHECK_FAIL(!prog))
892 prog_fd = bpf_program__fd(prog);
893 if (CHECK_FAIL(prog_fd < 0))
896 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
901 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
902 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
905 data_fd = bpf_map__fd(data_map);
906 if (CHECK_FAIL(data_fd < 0))
912 err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
916 err = bpf_prog_test_run_opts(main_fd, &topts);
917 ASSERT_OK(err, "tailcall");
918 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
921 err = bpf_map_lookup_elem(data_fd, &i, &val);
922 ASSERT_OK(err, "tailcall count");
923 ASSERT_EQ(val.count, 31, "tailcall count");
926 bpf_object__close(obj);
929 #include "tailcall_bpf2bpf6.skel.h"
931 /* Tail call counting works even when there is data on stack which is
932 * not aligned to 8 bytes.
934 static void test_tailcall_bpf2bpf_6(void)
936 struct tailcall_bpf2bpf6 *obj;
937 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
938 LIBBPF_OPTS(bpf_test_run_opts, topts,
940 .data_size_in = sizeof(pkt_v4),
944 obj = tailcall_bpf2bpf6__open_and_load();
945 if (!ASSERT_OK_PTR(obj, "open and load"))
948 main_fd = bpf_program__fd(obj->progs.entry);
949 if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
952 map_fd = bpf_map__fd(obj->maps.jmp_table);
953 if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
956 prog_fd = bpf_program__fd(obj->progs.classifier_0);
957 if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
961 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
962 if (!ASSERT_OK(err, "jmp_table map update"))
965 err = bpf_prog_test_run_opts(main_fd, &topts);
966 ASSERT_OK(err, "entry prog test run");
967 ASSERT_EQ(topts.retval, 0, "tailcall retval");
969 data_fd = bpf_map__fd(obj->maps.bss);
970 if (!ASSERT_GE(data_fd, 0, "bss map fd"))
974 err = bpf_map_lookup_elem(data_fd, &i, &val);
975 ASSERT_OK(err, "bss map lookup");
976 ASSERT_EQ(val, 1, "done flag is set");
979 tailcall_bpf2bpf6__destroy(obj);
982 /* test_tailcall_bpf2bpf_fentry checks that the count value of the tail call
983 * limit enforcement matches with expectations when tailcall is preceded with
984 * bpf2bpf call, and the bpf2bpf call is traced by fentry.
986 static void test_tailcall_bpf2bpf_fentry(void)
988 test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, false);
991 /* test_tailcall_bpf2bpf_fexit checks that the count value of the tail call
992 * limit enforcement matches with expectations when tailcall is preceded with
993 * bpf2bpf call, and the bpf2bpf call is traced by fexit.
995 static void test_tailcall_bpf2bpf_fexit(void)
997 test_tailcall_count("tailcall_bpf2bpf2.bpf.o", false, true);
1000 /* test_tailcall_bpf2bpf_fentry_fexit checks that the count value of the tail
1001 * call limit enforcement matches with expectations when tailcall is preceded
1002 * with bpf2bpf call, and the bpf2bpf call is traced by both fentry and fexit.
1004 static void test_tailcall_bpf2bpf_fentry_fexit(void)
1006 test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, true);
1009 /* test_tailcall_bpf2bpf_fentry_entry checks that the count value of the tail
1010 * call limit enforcement matches with expectations when tailcall is preceded
1011 * with bpf2bpf call, and the bpf2bpf caller is traced by fentry.
1013 static void test_tailcall_bpf2bpf_fentry_entry(void)
1015 struct bpf_object *tgt_obj = NULL, *fentry_obj = NULL;
1016 int err, map_fd, prog_fd, data_fd, i, val;
1017 struct bpf_map *prog_array, *data_map;
1018 struct bpf_link *fentry_link = NULL;
1019 struct bpf_program *prog;
1020 char buff[128] = {};
1022 LIBBPF_OPTS(bpf_test_run_opts, topts,
1024 .data_size_in = sizeof(buff),
1028 err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o",
1029 BPF_PROG_TYPE_SCHED_CLS,
1030 &tgt_obj, &prog_fd);
1031 if (!ASSERT_OK(err, "load tgt_obj"))
1034 prog_array = bpf_object__find_map_by_name(tgt_obj, "jmp_table");
1035 if (!ASSERT_OK_PTR(prog_array, "find jmp_table map"))
1038 map_fd = bpf_map__fd(prog_array);
1039 if (!ASSERT_FALSE(map_fd < 0, "find jmp_table map fd"))
1042 prog = bpf_object__find_program_by_name(tgt_obj, "classifier_0");
1043 if (!ASSERT_OK_PTR(prog, "find classifier_0 prog"))
1046 prog_fd = bpf_program__fd(prog);
1047 if (!ASSERT_FALSE(prog_fd < 0, "find classifier_0 prog fd"))
1051 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
1052 if (!ASSERT_OK(err, "update jmp_table"))
1055 fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
1057 if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1060 prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
1061 if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1064 err = bpf_program__set_attach_target(prog, prog_fd, "classifier_0");
1065 if (!ASSERT_OK(err, "set_attach_target classifier_0"))
1068 err = bpf_object__load(fentry_obj);
1069 if (!ASSERT_OK(err, "load fentry_obj"))
1072 fentry_link = bpf_program__attach_trace(prog);
1073 if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1076 err = bpf_prog_test_run_opts(prog_fd, &topts);
1077 ASSERT_OK(err, "tailcall");
1078 ASSERT_EQ(topts.retval, 1, "tailcall retval");
1080 data_map = bpf_object__find_map_by_name(tgt_obj, "tailcall.bss");
1081 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1082 "find tailcall.bss map"))
1085 data_fd = bpf_map__fd(data_map);
1086 if (!ASSERT_FALSE(data_fd < 0, "find tailcall.bss map fd"))
1090 err = bpf_map_lookup_elem(data_fd, &i, &val);
1091 ASSERT_OK(err, "tailcall count");
1092 ASSERT_EQ(val, 34, "tailcall count");
1094 data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1095 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1096 "find tailcall_bpf2bpf_fentry.bss map"))
1099 data_fd = bpf_map__fd(data_map);
1100 if (!ASSERT_FALSE(data_fd < 0,
1101 "find tailcall_bpf2bpf_fentry.bss map fd"))
1105 err = bpf_map_lookup_elem(data_fd, &i, &val);
1106 ASSERT_OK(err, "fentry count");
1107 ASSERT_EQ(val, 1, "fentry count");
1110 bpf_link__destroy(fentry_link);
1111 bpf_object__close(fentry_obj);
1112 bpf_object__close(tgt_obj);
1115 #define JMP_TABLE "/sys/fs/bpf/jmp_table"
1117 static int poke_thread_exit;
1119 static void *poke_update(void *arg)
1121 __u32 zero = 0, prog1_fd, prog2_fd, map_fd;
1122 struct tailcall_poke *call = arg;
1124 map_fd = bpf_map__fd(call->maps.jmp_table);
1125 prog1_fd = bpf_program__fd(call->progs.call1);
1126 prog2_fd = bpf_program__fd(call->progs.call2);
1128 while (!poke_thread_exit) {
1129 bpf_map_update_elem(map_fd, &zero, &prog1_fd, BPF_ANY);
1130 bpf_map_update_elem(map_fd, &zero, &prog2_fd, BPF_ANY);
1137 * We are trying to hit prog array update during another program load
1138 * that shares the same prog array map.
1140 * For that we share the jmp_table map between two skeleton instances
1141 * by pinning the jmp_table to same path. Then first skeleton instance
1142 * periodically updates jmp_table in 'poke update' thread while we load
1143 * the second skeleton instance in the main thread.
1145 static void test_tailcall_poke(void)
1147 struct tailcall_poke *call, *test;
1153 call = tailcall_poke__open_and_load();
1154 if (!ASSERT_OK_PTR(call, "tailcall_poke__open"))
1157 err = bpf_map__pin(call->maps.jmp_table, JMP_TABLE);
1158 if (!ASSERT_OK(err, "bpf_map__pin"))
1161 err = pthread_create(&thread, NULL, poke_update, call);
1162 if (!ASSERT_OK(err, "new toggler"))
1166 test = tailcall_poke__open();
1167 if (!ASSERT_OK_PTR(test, "tailcall_poke__open"))
1170 err = bpf_map__set_pin_path(test->maps.jmp_table, JMP_TABLE);
1171 if (!ASSERT_OK(err, "bpf_map__pin")) {
1172 tailcall_poke__destroy(test);
1176 bpf_program__set_autoload(test->progs.test, true);
1177 bpf_program__set_autoload(test->progs.call1, false);
1178 bpf_program__set_autoload(test->progs.call2, false);
1180 err = tailcall_poke__load(test);
1181 tailcall_poke__destroy(test);
1182 if (!ASSERT_OK(err, "tailcall_poke__load"))
1186 poke_thread_exit = 1;
1187 ASSERT_OK(pthread_join(thread, NULL), "pthread_join");
1190 bpf_map__unpin(call->maps.jmp_table, JMP_TABLE);
1191 tailcall_poke__destroy(call);
1194 static void test_tailcall_hierarchy_count(const char *which, bool test_fentry,
1196 bool test_fentry_entry)
1198 int err, map_fd, prog_fd, main_data_fd, fentry_data_fd, fexit_data_fd, i, val;
1199 struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
1200 struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
1201 struct bpf_program *prog, *fentry_prog;
1202 struct bpf_map *prog_array, *data_map;
1204 char buff[128] = {};
1206 LIBBPF_OPTS(bpf_test_run_opts, topts,
1208 .data_size_in = sizeof(buff),
1212 err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
1214 if (!ASSERT_OK(err, "load obj"))
1217 prog = bpf_object__find_program_by_name(obj, "entry");
1218 if (!ASSERT_OK_PTR(prog, "find entry prog"))
1221 prog_fd = bpf_program__fd(prog);
1222 if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
1225 if (test_fentry_entry) {
1226 fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_hierarchy_fentry.bpf.o",
1228 if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1231 fentry_prog = bpf_object__find_program_by_name(fentry_obj,
1233 if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1236 err = bpf_program__set_attach_target(fentry_prog, prog_fd,
1238 if (!ASSERT_OK(err, "set_attach_target entry"))
1241 err = bpf_object__load(fentry_obj);
1242 if (!ASSERT_OK(err, "load fentry_obj"))
1245 fentry_link = bpf_program__attach_trace(fentry_prog);
1246 if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1249 fentry_prog_fd = bpf_program__fd(fentry_prog);
1250 if (!ASSERT_GE(fentry_prog_fd, 0, "fentry_prog_fd"))
1253 prog_array = bpf_object__find_map_by_name(fentry_obj, "jmp_table");
1254 if (!ASSERT_OK_PTR(prog_array, "find jmp_table"))
1257 map_fd = bpf_map__fd(prog_array);
1258 if (!ASSERT_GE(map_fd, 0, "map_fd"))
1262 err = bpf_map_update_elem(map_fd, &i, &fentry_prog_fd, BPF_ANY);
1263 if (!ASSERT_OK(err, "update jmp_table"))
1266 data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1267 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1272 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
1273 if (!ASSERT_OK_PTR(prog_array, "find jmp_table"))
1276 map_fd = bpf_map__fd(prog_array);
1277 if (!ASSERT_GE(map_fd, 0, "map_fd"))
1281 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
1282 if (!ASSERT_OK(err, "update jmp_table"))
1285 data_map = bpf_object__find_map_by_name(obj, ".bss");
1286 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1292 fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
1294 if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1297 prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
1298 if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1301 err = bpf_program__set_attach_target(prog, prog_fd,
1303 if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
1306 err = bpf_object__load(fentry_obj);
1307 if (!ASSERT_OK(err, "load fentry_obj"))
1310 fentry_link = bpf_program__attach_trace(prog);
1311 if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1316 fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
1318 if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
1321 prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
1322 if (!ASSERT_OK_PTR(prog, "find fexit prog"))
1325 err = bpf_program__set_attach_target(prog, prog_fd,
1327 if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
1330 err = bpf_object__load(fexit_obj);
1331 if (!ASSERT_OK(err, "load fexit_obj"))
1334 fexit_link = bpf_program__attach_trace(prog);
1335 if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
1339 err = bpf_prog_test_run_opts(prog_fd, &topts);
1340 ASSERT_OK(err, "tailcall");
1341 ASSERT_EQ(topts.retval, 1, "tailcall retval");
1343 main_data_fd = bpf_map__fd(data_map);
1344 if (!ASSERT_GE(main_data_fd, 0, "main_data_fd"))
1348 err = bpf_map_lookup_elem(main_data_fd, &i, &val);
1349 ASSERT_OK(err, "tailcall count");
1350 ASSERT_EQ(val, 34, "tailcall count");
1353 data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1354 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1355 "find tailcall_bpf2bpf_fentry.bss map"))
1358 fentry_data_fd = bpf_map__fd(data_map);
1359 if (!ASSERT_GE(fentry_data_fd, 0,
1360 "find tailcall_bpf2bpf_fentry.bss map fd"))
1364 err = bpf_map_lookup_elem(fentry_data_fd, &i, &val);
1365 ASSERT_OK(err, "fentry count");
1366 ASSERT_EQ(val, 68, "fentry count");
1370 data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
1371 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1372 "find tailcall_bpf2bpf_fexit.bss map"))
1375 fexit_data_fd = bpf_map__fd(data_map);
1376 if (!ASSERT_GE(fexit_data_fd, 0,
1377 "find tailcall_bpf2bpf_fexit.bss map fd"))
1381 err = bpf_map_lookup_elem(fexit_data_fd, &i, &val);
1382 ASSERT_OK(err, "fexit count");
1383 ASSERT_EQ(val, 68, "fexit count");
1387 err = bpf_map_delete_elem(map_fd, &i);
1388 if (!ASSERT_OK(err, "delete_elem from jmp_table"))
1391 err = bpf_prog_test_run_opts(prog_fd, &topts);
1392 ASSERT_OK(err, "tailcall");
1393 ASSERT_EQ(topts.retval, 1, "tailcall retval");
1396 err = bpf_map_lookup_elem(main_data_fd, &i, &val);
1397 ASSERT_OK(err, "tailcall count");
1398 ASSERT_EQ(val, 35, "tailcall count");
1402 err = bpf_map_lookup_elem(fentry_data_fd, &i, &val);
1403 ASSERT_OK(err, "fentry count");
1404 ASSERT_EQ(val, 70, "fentry count");
1409 err = bpf_map_lookup_elem(fexit_data_fd, &i, &val);
1410 ASSERT_OK(err, "fexit count");
1411 ASSERT_EQ(val, 70, "fexit count");
1415 bpf_link__destroy(fentry_link);
1416 bpf_link__destroy(fexit_link);
1417 bpf_object__close(fentry_obj);
1418 bpf_object__close(fexit_obj);
1419 bpf_object__close(obj);
1422 /* test_tailcall_bpf2bpf_hierarchy_1 checks that the count value of the tail
1423 * call limit enforcement matches with expectations when tailcalls are preceded
1424 * with two bpf2bpf calls.
1426 * subprog --tailcall-> entry
1428 * subprog --tailcall-> entry
1430 static void test_tailcall_bpf2bpf_hierarchy_1(void)
1432 test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1433 false, false, false);
1436 /* test_tailcall_bpf2bpf_hierarchy_fentry checks that the count value of the
1437 * tail call limit enforcement matches with expectations when tailcalls are
1438 * preceded with two bpf2bpf calls, and the two subprogs are traced by fentry.
1440 static void test_tailcall_bpf2bpf_hierarchy_fentry(void)
1442 test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1443 true, false, false);
1446 /* test_tailcall_bpf2bpf_hierarchy_fexit checks that the count value of the tail
1447 * call limit enforcement matches with expectations when tailcalls are preceded
1448 * with two bpf2bpf calls, and the two subprogs are traced by fexit.
1450 static void test_tailcall_bpf2bpf_hierarchy_fexit(void)
1452 test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1453 false, true, false);
1456 /* test_tailcall_bpf2bpf_hierarchy_fentry_fexit checks that the count value of
1457 * the tail call limit enforcement matches with expectations when tailcalls are
1458 * preceded with two bpf2bpf calls, and the two subprogs are traced by both
1461 static void test_tailcall_bpf2bpf_hierarchy_fentry_fexit(void)
1463 test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1467 /* test_tailcall_bpf2bpf_hierarchy_fentry_entry checks that the count value of
1468 * the tail call limit enforcement matches with expectations when tailcalls are
1469 * preceded with two bpf2bpf calls in fentry.
1471 static void test_tailcall_bpf2bpf_hierarchy_fentry_entry(void)
1473 test_tailcall_hierarchy_count("tc_dummy.bpf.o", false, false, true);
1476 /* test_tailcall_bpf2bpf_hierarchy_2 checks that the count value of the tail
1477 * call limit enforcement matches with expectations:
1479 * subprog_tail0 --tailcall-> classifier_0 -> subprog_tail0
1481 * subprog_tail1 --tailcall-> classifier_1 -> subprog_tail1
1483 static void test_tailcall_bpf2bpf_hierarchy_2(void)
1485 RUN_TESTS(tailcall_bpf2bpf_hierarchy2);
1488 /* test_tailcall_bpf2bpf_hierarchy_3 checks that the count value of the tail
1489 * call limit enforcement matches with expectations:
1491 * subprog with jmp_table0 to classifier_0
1492 * entry --tailcall-> classifier_0 <
1493 * subprog with jmp_table1 to classifier_0
1495 static void test_tailcall_bpf2bpf_hierarchy_3(void)
1497 RUN_TESTS(tailcall_bpf2bpf_hierarchy3);
1500 /* test_tailcall_freplace checks that the freplace prog fails to update the
1501 * prog_array map, no matter whether the freplace prog attaches to its target.
1503 static void test_tailcall_freplace(void)
1505 struct tailcall_freplace *freplace_skel = NULL;
1506 struct bpf_link *freplace_link = NULL;
1507 struct bpf_program *freplace_prog;
1508 struct tc_bpf2bpf *tc_skel = NULL;
1509 int prog_fd, tc_prog_fd, map_fd;
1510 char buff[128] = {};
1513 LIBBPF_OPTS(bpf_test_run_opts, topts,
1515 .data_size_in = sizeof(buff),
1519 freplace_skel = tailcall_freplace__open();
1520 if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open"))
1523 tc_skel = tc_bpf2bpf__open_and_load();
1524 if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load"))
1527 tc_prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
1528 freplace_prog = freplace_skel->progs.entry_freplace;
1529 err = bpf_program__set_attach_target(freplace_prog, tc_prog_fd,
1531 if (!ASSERT_OK(err, "set_attach_target"))
1534 err = tailcall_freplace__load(freplace_skel);
1535 if (!ASSERT_OK(err, "tailcall_freplace__load"))
1538 map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
1539 prog_fd = bpf_program__fd(freplace_prog);
1541 err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1542 ASSERT_ERR(err, "update jmp_table failure");
1544 freplace_link = bpf_program__attach_freplace(freplace_prog, tc_prog_fd,
1546 if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
1549 err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1550 ASSERT_ERR(err, "update jmp_table failure");
1553 bpf_link__destroy(freplace_link);
1554 tailcall_freplace__destroy(freplace_skel);
1555 tc_bpf2bpf__destroy(tc_skel);
1558 /* test_tailcall_bpf2bpf_freplace checks the failure that fails to attach a tail
1559 * callee prog with freplace prog or fails to update an extended prog to
1562 static void test_tailcall_bpf2bpf_freplace(void)
1564 struct tailcall_freplace *freplace_skel = NULL;
1565 struct bpf_link *freplace_link = NULL;
1566 struct tc_bpf2bpf *tc_skel = NULL;
1567 char buff[128] = {};
1568 int prog_fd, map_fd;
1571 LIBBPF_OPTS(bpf_test_run_opts, topts,
1573 .data_size_in = sizeof(buff),
1577 tc_skel = tc_bpf2bpf__open_and_load();
1578 if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load"))
1581 prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
1582 freplace_skel = tailcall_freplace__open();
1583 if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open"))
1586 err = bpf_program__set_attach_target(freplace_skel->progs.entry_freplace,
1587 prog_fd, "subprog_tc");
1588 if (!ASSERT_OK(err, "set_attach_target"))
1591 err = tailcall_freplace__load(freplace_skel);
1592 if (!ASSERT_OK(err, "tailcall_freplace__load"))
1595 /* OK to attach then detach freplace prog. */
1597 freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
1598 prog_fd, "subprog_tc");
1599 if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
1602 err = bpf_link__destroy(freplace_link);
1603 if (!ASSERT_OK(err, "destroy link"))
1606 /* OK to update prog_array map then delete element from the map. */
1609 map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
1610 err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1611 if (!ASSERT_OK(err, "update jmp_table"))
1614 err = bpf_map_delete_elem(map_fd, &key);
1615 if (!ASSERT_OK(err, "delete_elem from jmp_table"))
1618 /* Fail to attach a tail callee prog with freplace prog. */
1620 err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1621 if (!ASSERT_OK(err, "update jmp_table"))
1624 freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
1625 prog_fd, "subprog_tc");
1626 if (!ASSERT_ERR_PTR(freplace_link, "attach_freplace failure"))
1629 err = bpf_map_delete_elem(map_fd, &key);
1630 if (!ASSERT_OK(err, "delete_elem from jmp_table"))
1633 /* Fail to update an extended prog to prog_array map. */
1635 freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
1636 prog_fd, "subprog_tc");
1637 if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
1640 err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1641 if (!ASSERT_ERR(err, "update jmp_table failure"))
1645 bpf_link__destroy(freplace_link);
1646 tailcall_freplace__destroy(freplace_skel);
1647 tc_bpf2bpf__destroy(tc_skel);
1650 static void test_tailcall_failure()
1652 RUN_TESTS(tailcall_fail);
1655 void test_tailcalls(void)
1657 if (test__start_subtest("tailcall_1"))
1659 if (test__start_subtest("tailcall_2"))
1661 if (test__start_subtest("tailcall_3"))
1663 if (test__start_subtest("tailcall_4"))
1665 if (test__start_subtest("tailcall_5"))
1667 if (test__start_subtest("tailcall_6"))
1669 if (test__start_subtest("tailcall_bpf2bpf_1"))
1670 test_tailcall_bpf2bpf_1();
1671 if (test__start_subtest("tailcall_bpf2bpf_2"))
1672 test_tailcall_bpf2bpf_2();
1673 if (test__start_subtest("tailcall_bpf2bpf_3"))
1674 test_tailcall_bpf2bpf_3();
1675 if (test__start_subtest("tailcall_bpf2bpf_4"))
1676 test_tailcall_bpf2bpf_4(false);
1677 if (test__start_subtest("tailcall_bpf2bpf_5"))
1678 test_tailcall_bpf2bpf_4(true);
1679 if (test__start_subtest("tailcall_bpf2bpf_6"))
1680 test_tailcall_bpf2bpf_6();
1681 if (test__start_subtest("tailcall_bpf2bpf_fentry"))
1682 test_tailcall_bpf2bpf_fentry();
1683 if (test__start_subtest("tailcall_bpf2bpf_fexit"))
1684 test_tailcall_bpf2bpf_fexit();
1685 if (test__start_subtest("tailcall_bpf2bpf_fentry_fexit"))
1686 test_tailcall_bpf2bpf_fentry_fexit();
1687 if (test__start_subtest("tailcall_bpf2bpf_fentry_entry"))
1688 test_tailcall_bpf2bpf_fentry_entry();
1689 if (test__start_subtest("tailcall_poke"))
1690 test_tailcall_poke();
1691 if (test__start_subtest("tailcall_bpf2bpf_hierarchy_1"))
1692 test_tailcall_bpf2bpf_hierarchy_1();
1693 if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry"))
1694 test_tailcall_bpf2bpf_hierarchy_fentry();
1695 if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fexit"))
1696 test_tailcall_bpf2bpf_hierarchy_fexit();
1697 if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_fexit"))
1698 test_tailcall_bpf2bpf_hierarchy_fentry_fexit();
1699 if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_entry"))
1700 test_tailcall_bpf2bpf_hierarchy_fentry_entry();
1701 test_tailcall_bpf2bpf_hierarchy_2();
1702 test_tailcall_bpf2bpf_hierarchy_3();
1703 if (test__start_subtest("tailcall_freplace"))
1704 test_tailcall_freplace();
1705 if (test__start_subtest("tailcall_bpf2bpf_freplace"))
1706 test_tailcall_bpf2bpf_freplace();
1707 if (test__start_subtest("tailcall_failure"))
1708 test_tailcall_failure();