1 // SPDX-License-Identifier: GPL-2.0
3 #include <test_progs.h>
4 #include <network_helpers.h>
5 #include "tailcall_poke.skel.h"
8 /* test_tailcall_1 checks basic functionality by patching multiple locations
9 * in a single program for a single tail call slot with nop->jmp, jmp->nop
10 * and jmp->jmp rewrites. Also checks for nop->nop.
12 static void test_tailcall_1(void)
14 int err, map_fd, prog_fd, main_fd, i, j;
15 struct bpf_map *prog_array;
16 struct bpf_program *prog;
17 struct bpf_object *obj;
20 LIBBPF_OPTS(bpf_test_run_opts, topts,
22 .data_size_in = sizeof(buff),
26 err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
31 prog = bpf_object__find_program_by_name(obj, "entry");
32 if (CHECK_FAIL(!prog))
35 main_fd = bpf_program__fd(prog);
36 if (CHECK_FAIL(main_fd < 0))
39 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
40 if (CHECK_FAIL(!prog_array))
43 map_fd = bpf_map__fd(prog_array);
44 if (CHECK_FAIL(map_fd < 0))
47 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
48 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
50 prog = bpf_object__find_program_by_name(obj, prog_name);
51 if (CHECK_FAIL(!prog))
54 prog_fd = bpf_program__fd(prog);
55 if (CHECK_FAIL(prog_fd < 0))
58 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
63 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
64 err = bpf_prog_test_run_opts(main_fd, &topts);
65 ASSERT_OK(err, "tailcall");
66 ASSERT_EQ(topts.retval, i, "tailcall retval");
68 err = bpf_map_delete_elem(map_fd, &i);
73 err = bpf_prog_test_run_opts(main_fd, &topts);
74 ASSERT_OK(err, "tailcall");
75 ASSERT_EQ(topts.retval, 3, "tailcall retval");
77 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
78 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
80 prog = bpf_object__find_program_by_name(obj, prog_name);
81 if (CHECK_FAIL(!prog))
84 prog_fd = bpf_program__fd(prog);
85 if (CHECK_FAIL(prog_fd < 0))
88 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
93 err = bpf_prog_test_run_opts(main_fd, &topts);
94 ASSERT_OK(err, "tailcall");
95 ASSERT_OK(topts.retval, "tailcall retval");
97 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
98 j = bpf_map__max_entries(prog_array) - 1 - i;
99 snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
101 prog = bpf_object__find_program_by_name(obj, prog_name);
102 if (CHECK_FAIL(!prog))
105 prog_fd = bpf_program__fd(prog);
106 if (CHECK_FAIL(prog_fd < 0))
109 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
114 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
115 j = bpf_map__max_entries(prog_array) - 1 - i;
117 err = bpf_prog_test_run_opts(main_fd, &topts);
118 ASSERT_OK(err, "tailcall");
119 ASSERT_EQ(topts.retval, j, "tailcall retval");
121 err = bpf_map_delete_elem(map_fd, &i);
126 err = bpf_prog_test_run_opts(main_fd, &topts);
127 ASSERT_OK(err, "tailcall");
128 ASSERT_EQ(topts.retval, 3, "tailcall retval");
130 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
131 err = bpf_map_delete_elem(map_fd, &i);
132 if (CHECK_FAIL(err >= 0 || errno != ENOENT))
135 err = bpf_prog_test_run_opts(main_fd, &topts);
136 ASSERT_OK(err, "tailcall");
137 ASSERT_EQ(topts.retval, 3, "tailcall retval");
141 bpf_object__close(obj);
144 /* test_tailcall_2 checks that patching multiple programs for a single
145 * tail call slot works. It also jumps through several programs and tests
146 * the tail call limit counter.
148 static void test_tailcall_2(void)
150 int err, map_fd, prog_fd, main_fd, i;
151 struct bpf_map *prog_array;
152 struct bpf_program *prog;
153 struct bpf_object *obj;
156 LIBBPF_OPTS(bpf_test_run_opts, topts,
158 .data_size_in = sizeof(buff),
162 err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
167 prog = bpf_object__find_program_by_name(obj, "entry");
168 if (CHECK_FAIL(!prog))
171 main_fd = bpf_program__fd(prog);
172 if (CHECK_FAIL(main_fd < 0))
175 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
176 if (CHECK_FAIL(!prog_array))
179 map_fd = bpf_map__fd(prog_array);
180 if (CHECK_FAIL(map_fd < 0))
183 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
184 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
186 prog = bpf_object__find_program_by_name(obj, prog_name);
187 if (CHECK_FAIL(!prog))
190 prog_fd = bpf_program__fd(prog);
191 if (CHECK_FAIL(prog_fd < 0))
194 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
199 err = bpf_prog_test_run_opts(main_fd, &topts);
200 ASSERT_OK(err, "tailcall");
201 ASSERT_EQ(topts.retval, 2, "tailcall retval");
204 err = bpf_map_delete_elem(map_fd, &i);
208 err = bpf_prog_test_run_opts(main_fd, &topts);
209 ASSERT_OK(err, "tailcall");
210 ASSERT_EQ(topts.retval, 1, "tailcall retval");
213 err = bpf_map_delete_elem(map_fd, &i);
217 err = bpf_prog_test_run_opts(main_fd, &topts);
218 ASSERT_OK(err, "tailcall");
219 ASSERT_EQ(topts.retval, 3, "tailcall retval");
221 bpf_object__close(obj);
224 static void test_tailcall_count(const char *which, bool test_fentry,
227 struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
228 struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
229 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
230 struct bpf_map *prog_array, *data_map;
231 struct bpf_program *prog;
233 LIBBPF_OPTS(bpf_test_run_opts, topts,
235 .data_size_in = sizeof(buff),
239 err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
244 prog = bpf_object__find_program_by_name(obj, "entry");
245 if (CHECK_FAIL(!prog))
248 main_fd = bpf_program__fd(prog);
249 if (CHECK_FAIL(main_fd < 0))
252 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
253 if (CHECK_FAIL(!prog_array))
256 map_fd = bpf_map__fd(prog_array);
257 if (CHECK_FAIL(map_fd < 0))
260 prog = bpf_object__find_program_by_name(obj, "classifier_0");
261 if (CHECK_FAIL(!prog))
264 prog_fd = bpf_program__fd(prog);
265 if (CHECK_FAIL(prog_fd < 0))
269 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
274 fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
276 if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
279 prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
280 if (!ASSERT_OK_PTR(prog, "find fentry prog"))
283 err = bpf_program__set_attach_target(prog, prog_fd,
285 if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
288 err = bpf_object__load(fentry_obj);
289 if (!ASSERT_OK(err, "load fentry_obj"))
292 fentry_link = bpf_program__attach_trace(prog);
293 if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
298 fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
300 if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
303 prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
304 if (!ASSERT_OK_PTR(prog, "find fexit prog"))
307 err = bpf_program__set_attach_target(prog, prog_fd,
309 if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
312 err = bpf_object__load(fexit_obj);
313 if (!ASSERT_OK(err, "load fexit_obj"))
316 fexit_link = bpf_program__attach_trace(prog);
317 if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
321 err = bpf_prog_test_run_opts(main_fd, &topts);
322 ASSERT_OK(err, "tailcall");
323 ASSERT_EQ(topts.retval, 1, "tailcall retval");
325 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
326 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
329 data_fd = bpf_map__fd(data_map);
330 if (CHECK_FAIL(data_fd < 0))
334 err = bpf_map_lookup_elem(data_fd, &i, &val);
335 ASSERT_OK(err, "tailcall count");
336 ASSERT_EQ(val, 33, "tailcall count");
339 data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
340 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
341 "find tailcall_bpf2bpf_fentry.bss map"))
344 data_fd = bpf_map__fd(data_map);
345 if (!ASSERT_FALSE(data_fd < 0,
346 "find tailcall_bpf2bpf_fentry.bss map fd"))
350 err = bpf_map_lookup_elem(data_fd, &i, &val);
351 ASSERT_OK(err, "fentry count");
352 ASSERT_EQ(val, 33, "fentry count");
356 data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
357 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
358 "find tailcall_bpf2bpf_fexit.bss map"))
361 data_fd = bpf_map__fd(data_map);
362 if (!ASSERT_FALSE(data_fd < 0,
363 "find tailcall_bpf2bpf_fexit.bss map fd"))
367 err = bpf_map_lookup_elem(data_fd, &i, &val);
368 ASSERT_OK(err, "fexit count");
369 ASSERT_EQ(val, 33, "fexit count");
373 err = bpf_map_delete_elem(map_fd, &i);
377 err = bpf_prog_test_run_opts(main_fd, &topts);
378 ASSERT_OK(err, "tailcall");
379 ASSERT_OK(topts.retval, "tailcall retval");
381 bpf_link__destroy(fentry_link);
382 bpf_link__destroy(fexit_link);
383 bpf_object__close(fentry_obj);
384 bpf_object__close(fexit_obj);
385 bpf_object__close(obj);
388 /* test_tailcall_3 checks that the count value of the tail call limit
389 * enforcement matches with expectations. JIT uses direct jump.
391 static void test_tailcall_3(void)
393 test_tailcall_count("tailcall3.bpf.o", false, false);
396 /* test_tailcall_6 checks that the count value of the tail call limit
397 * enforcement matches with expectations. JIT uses indirect jump.
399 static void test_tailcall_6(void)
401 test_tailcall_count("tailcall6.bpf.o", false, false);
404 /* test_tailcall_4 checks that the kernel properly selects indirect jump
405 * for the case where the key is not known. Latter is passed via global
406 * data to select different targets we can compare return value of.
408 static void test_tailcall_4(void)
410 int err, map_fd, prog_fd, main_fd, data_fd, i;
411 struct bpf_map *prog_array, *data_map;
412 struct bpf_program *prog;
413 struct bpf_object *obj;
414 static const int zero = 0;
417 LIBBPF_OPTS(bpf_test_run_opts, topts,
419 .data_size_in = sizeof(buff),
423 err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
428 prog = bpf_object__find_program_by_name(obj, "entry");
429 if (CHECK_FAIL(!prog))
432 main_fd = bpf_program__fd(prog);
433 if (CHECK_FAIL(main_fd < 0))
436 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
437 if (CHECK_FAIL(!prog_array))
440 map_fd = bpf_map__fd(prog_array);
441 if (CHECK_FAIL(map_fd < 0))
444 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
445 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
448 data_fd = bpf_map__fd(data_map);
449 if (CHECK_FAIL(data_fd < 0))
452 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
453 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
455 prog = bpf_object__find_program_by_name(obj, prog_name);
456 if (CHECK_FAIL(!prog))
459 prog_fd = bpf_program__fd(prog);
460 if (CHECK_FAIL(prog_fd < 0))
463 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
468 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
469 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
473 err = bpf_prog_test_run_opts(main_fd, &topts);
474 ASSERT_OK(err, "tailcall");
475 ASSERT_EQ(topts.retval, i, "tailcall retval");
478 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
479 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
483 err = bpf_map_delete_elem(map_fd, &i);
487 err = bpf_prog_test_run_opts(main_fd, &topts);
488 ASSERT_OK(err, "tailcall");
489 ASSERT_EQ(topts.retval, 3, "tailcall retval");
492 bpf_object__close(obj);
495 /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
496 * an indirect jump when the keys are const but different from different branches.
498 static void test_tailcall_5(void)
500 int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
501 struct bpf_map *prog_array, *data_map;
502 struct bpf_program *prog;
503 struct bpf_object *obj;
504 static const int zero = 0;
507 LIBBPF_OPTS(bpf_test_run_opts, topts,
509 .data_size_in = sizeof(buff),
513 err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
518 prog = bpf_object__find_program_by_name(obj, "entry");
519 if (CHECK_FAIL(!prog))
522 main_fd = bpf_program__fd(prog);
523 if (CHECK_FAIL(main_fd < 0))
526 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
527 if (CHECK_FAIL(!prog_array))
530 map_fd = bpf_map__fd(prog_array);
531 if (CHECK_FAIL(map_fd < 0))
534 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
535 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
538 data_fd = bpf_map__fd(data_map);
539 if (CHECK_FAIL(data_fd < 0))
542 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
543 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
545 prog = bpf_object__find_program_by_name(obj, prog_name);
546 if (CHECK_FAIL(!prog))
549 prog_fd = bpf_program__fd(prog);
550 if (CHECK_FAIL(prog_fd < 0))
553 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
558 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
559 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
563 err = bpf_prog_test_run_opts(main_fd, &topts);
564 ASSERT_OK(err, "tailcall");
565 ASSERT_EQ(topts.retval, i, "tailcall retval");
568 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
569 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
573 err = bpf_map_delete_elem(map_fd, &i);
577 err = bpf_prog_test_run_opts(main_fd, &topts);
578 ASSERT_OK(err, "tailcall");
579 ASSERT_EQ(topts.retval, 3, "tailcall retval");
582 bpf_object__close(obj);
585 /* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
586 * correctly in correlation with BPF subprograms
588 static void test_tailcall_bpf2bpf_1(void)
590 int err, map_fd, prog_fd, main_fd, i;
591 struct bpf_map *prog_array;
592 struct bpf_program *prog;
593 struct bpf_object *obj;
595 LIBBPF_OPTS(bpf_test_run_opts, topts,
597 .data_size_in = sizeof(pkt_v4),
601 err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
606 prog = bpf_object__find_program_by_name(obj, "entry");
607 if (CHECK_FAIL(!prog))
610 main_fd = bpf_program__fd(prog);
611 if (CHECK_FAIL(main_fd < 0))
614 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
615 if (CHECK_FAIL(!prog_array))
618 map_fd = bpf_map__fd(prog_array);
619 if (CHECK_FAIL(map_fd < 0))
623 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
624 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
626 prog = bpf_object__find_program_by_name(obj, prog_name);
627 if (CHECK_FAIL(!prog))
630 prog_fd = bpf_program__fd(prog);
631 if (CHECK_FAIL(prog_fd < 0))
634 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
639 err = bpf_prog_test_run_opts(main_fd, &topts);
640 ASSERT_OK(err, "tailcall");
641 ASSERT_EQ(topts.retval, 1, "tailcall retval");
643 /* jmp -> nop, call subprog that will do tailcall */
645 err = bpf_map_delete_elem(map_fd, &i);
649 err = bpf_prog_test_run_opts(main_fd, &topts);
650 ASSERT_OK(err, "tailcall");
651 ASSERT_OK(topts.retval, "tailcall retval");
653 /* make sure that subprog can access ctx and entry prog that
654 * called this subprog can properly return
657 err = bpf_map_delete_elem(map_fd, &i);
661 err = bpf_prog_test_run_opts(main_fd, &topts);
662 ASSERT_OK(err, "tailcall");
663 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
665 bpf_object__close(obj);
668 /* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
669 * enforcement matches with expectations when tailcall is preceded with
672 static void test_tailcall_bpf2bpf_2(void)
674 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
675 struct bpf_map *prog_array, *data_map;
676 struct bpf_program *prog;
677 struct bpf_object *obj;
679 LIBBPF_OPTS(bpf_test_run_opts, topts,
681 .data_size_in = sizeof(buff),
685 err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
690 prog = bpf_object__find_program_by_name(obj, "entry");
691 if (CHECK_FAIL(!prog))
694 main_fd = bpf_program__fd(prog);
695 if (CHECK_FAIL(main_fd < 0))
698 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
699 if (CHECK_FAIL(!prog_array))
702 map_fd = bpf_map__fd(prog_array);
703 if (CHECK_FAIL(map_fd < 0))
706 prog = bpf_object__find_program_by_name(obj, "classifier_0");
707 if (CHECK_FAIL(!prog))
710 prog_fd = bpf_program__fd(prog);
711 if (CHECK_FAIL(prog_fd < 0))
715 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
719 err = bpf_prog_test_run_opts(main_fd, &topts);
720 ASSERT_OK(err, "tailcall");
721 ASSERT_EQ(topts.retval, 1, "tailcall retval");
723 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
724 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
727 data_fd = bpf_map__fd(data_map);
728 if (CHECK_FAIL(data_fd < 0))
732 err = bpf_map_lookup_elem(data_fd, &i, &val);
733 ASSERT_OK(err, "tailcall count");
734 ASSERT_EQ(val, 33, "tailcall count");
737 err = bpf_map_delete_elem(map_fd, &i);
741 err = bpf_prog_test_run_opts(main_fd, &topts);
742 ASSERT_OK(err, "tailcall");
743 ASSERT_OK(topts.retval, "tailcall retval");
745 bpf_object__close(obj);
748 /* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
749 * 256 bytes) can be used within bpf subprograms that have the tailcalls
752 static void test_tailcall_bpf2bpf_3(void)
754 int err, map_fd, prog_fd, main_fd, i;
755 struct bpf_map *prog_array;
756 struct bpf_program *prog;
757 struct bpf_object *obj;
759 LIBBPF_OPTS(bpf_test_run_opts, topts,
761 .data_size_in = sizeof(pkt_v4),
765 err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
770 prog = bpf_object__find_program_by_name(obj, "entry");
771 if (CHECK_FAIL(!prog))
774 main_fd = bpf_program__fd(prog);
775 if (CHECK_FAIL(main_fd < 0))
778 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
779 if (CHECK_FAIL(!prog_array))
782 map_fd = bpf_map__fd(prog_array);
783 if (CHECK_FAIL(map_fd < 0))
786 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
787 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
789 prog = bpf_object__find_program_by_name(obj, prog_name);
790 if (CHECK_FAIL(!prog))
793 prog_fd = bpf_program__fd(prog);
794 if (CHECK_FAIL(prog_fd < 0))
797 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
802 err = bpf_prog_test_run_opts(main_fd, &topts);
803 ASSERT_OK(err, "tailcall");
804 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
807 err = bpf_map_delete_elem(map_fd, &i);
811 err = bpf_prog_test_run_opts(main_fd, &topts);
812 ASSERT_OK(err, "tailcall");
813 ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
816 err = bpf_map_delete_elem(map_fd, &i);
820 err = bpf_prog_test_run_opts(main_fd, &topts);
821 ASSERT_OK(err, "tailcall");
822 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
824 bpf_object__close(obj);
827 #include "tailcall_bpf2bpf4.skel.h"
829 /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
830 * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
831 * counter behaves correctly, bpf program will go through following flow:
833 * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
834 * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
835 * subprog2 [here bump global counter] --------^
837 * We go through first two tailcalls and start counting from the subprog2 where
838 * the loop begins. At the end of the test make sure that the global counter is
839 * equal to 31, because tailcall counter includes the first two tailcalls
840 * whereas global counter is incremented only on loop presented on flow above.
842 * The noise parameter is used to insert bpf_map_update calls into the logic
843 * to force verifier to patch instructions. This allows us to ensure jump
844 * logic remains correct with instruction movement.
846 static void test_tailcall_bpf2bpf_4(bool noise)
848 int err, map_fd, prog_fd, main_fd, data_fd, i;
849 struct tailcall_bpf2bpf4__bss val;
850 struct bpf_map *prog_array, *data_map;
851 struct bpf_program *prog;
852 struct bpf_object *obj;
854 LIBBPF_OPTS(bpf_test_run_opts, topts,
856 .data_size_in = sizeof(pkt_v4),
860 err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
865 prog = bpf_object__find_program_by_name(obj, "entry");
866 if (CHECK_FAIL(!prog))
869 main_fd = bpf_program__fd(prog);
870 if (CHECK_FAIL(main_fd < 0))
873 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
874 if (CHECK_FAIL(!prog_array))
877 map_fd = bpf_map__fd(prog_array);
878 if (CHECK_FAIL(map_fd < 0))
881 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
882 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
884 prog = bpf_object__find_program_by_name(obj, prog_name);
885 if (CHECK_FAIL(!prog))
888 prog_fd = bpf_program__fd(prog);
889 if (CHECK_FAIL(prog_fd < 0))
892 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
897 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
898 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
901 data_fd = bpf_map__fd(data_map);
902 if (CHECK_FAIL(data_fd < 0))
908 err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
912 err = bpf_prog_test_run_opts(main_fd, &topts);
913 ASSERT_OK(err, "tailcall");
914 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
917 err = bpf_map_lookup_elem(data_fd, &i, &val);
918 ASSERT_OK(err, "tailcall count");
919 ASSERT_EQ(val.count, 31, "tailcall count");
922 bpf_object__close(obj);
925 #include "tailcall_bpf2bpf6.skel.h"
927 /* Tail call counting works even when there is data on stack which is
928 * not aligned to 8 bytes.
930 static void test_tailcall_bpf2bpf_6(void)
932 struct tailcall_bpf2bpf6 *obj;
933 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
934 LIBBPF_OPTS(bpf_test_run_opts, topts,
936 .data_size_in = sizeof(pkt_v4),
940 obj = tailcall_bpf2bpf6__open_and_load();
941 if (!ASSERT_OK_PTR(obj, "open and load"))
944 main_fd = bpf_program__fd(obj->progs.entry);
945 if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
948 map_fd = bpf_map__fd(obj->maps.jmp_table);
949 if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
952 prog_fd = bpf_program__fd(obj->progs.classifier_0);
953 if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
957 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
958 if (!ASSERT_OK(err, "jmp_table map update"))
961 err = bpf_prog_test_run_opts(main_fd, &topts);
962 ASSERT_OK(err, "entry prog test run");
963 ASSERT_EQ(topts.retval, 0, "tailcall retval");
965 data_fd = bpf_map__fd(obj->maps.bss);
966 if (!ASSERT_GE(data_fd, 0, "bss map fd"))
970 err = bpf_map_lookup_elem(data_fd, &i, &val);
971 ASSERT_OK(err, "bss map lookup");
972 ASSERT_EQ(val, 1, "done flag is set");
975 tailcall_bpf2bpf6__destroy(obj);
978 /* test_tailcall_bpf2bpf_fentry checks that the count value of the tail call
979 * limit enforcement matches with expectations when tailcall is preceded with
980 * bpf2bpf call, and the bpf2bpf call is traced by fentry.
982 static void test_tailcall_bpf2bpf_fentry(void)
984 test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, false);
987 /* test_tailcall_bpf2bpf_fexit checks that the count value of the tail call
988 * limit enforcement matches with expectations when tailcall is preceded with
989 * bpf2bpf call, and the bpf2bpf call is traced by fexit.
991 static void test_tailcall_bpf2bpf_fexit(void)
993 test_tailcall_count("tailcall_bpf2bpf2.bpf.o", false, true);
996 /* test_tailcall_bpf2bpf_fentry_fexit checks that the count value of the tail
997 * call limit enforcement matches with expectations when tailcall is preceded
998 * with bpf2bpf call, and the bpf2bpf call is traced by both fentry and fexit.
1000 static void test_tailcall_bpf2bpf_fentry_fexit(void)
1002 test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, true);
1005 /* test_tailcall_bpf2bpf_fentry_entry checks that the count value of the tail
1006 * call limit enforcement matches with expectations when tailcall is preceded
1007 * with bpf2bpf call, and the bpf2bpf caller is traced by fentry.
1009 static void test_tailcall_bpf2bpf_fentry_entry(void)
1011 struct bpf_object *tgt_obj = NULL, *fentry_obj = NULL;
1012 int err, map_fd, prog_fd, data_fd, i, val;
1013 struct bpf_map *prog_array, *data_map;
1014 struct bpf_link *fentry_link = NULL;
1015 struct bpf_program *prog;
1016 char buff[128] = {};
1018 LIBBPF_OPTS(bpf_test_run_opts, topts,
1020 .data_size_in = sizeof(buff),
1024 err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o",
1025 BPF_PROG_TYPE_SCHED_CLS,
1026 &tgt_obj, &prog_fd);
1027 if (!ASSERT_OK(err, "load tgt_obj"))
1030 prog_array = bpf_object__find_map_by_name(tgt_obj, "jmp_table");
1031 if (!ASSERT_OK_PTR(prog_array, "find jmp_table map"))
1034 map_fd = bpf_map__fd(prog_array);
1035 if (!ASSERT_FALSE(map_fd < 0, "find jmp_table map fd"))
1038 prog = bpf_object__find_program_by_name(tgt_obj, "classifier_0");
1039 if (!ASSERT_OK_PTR(prog, "find classifier_0 prog"))
1042 prog_fd = bpf_program__fd(prog);
1043 if (!ASSERT_FALSE(prog_fd < 0, "find classifier_0 prog fd"))
1047 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
1048 if (!ASSERT_OK(err, "update jmp_table"))
1051 fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
1053 if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1056 prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
1057 if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1060 err = bpf_program__set_attach_target(prog, prog_fd, "classifier_0");
1061 if (!ASSERT_OK(err, "set_attach_target classifier_0"))
1064 err = bpf_object__load(fentry_obj);
1065 if (!ASSERT_OK(err, "load fentry_obj"))
1068 fentry_link = bpf_program__attach_trace(prog);
1069 if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1072 err = bpf_prog_test_run_opts(prog_fd, &topts);
1073 ASSERT_OK(err, "tailcall");
1074 ASSERT_EQ(topts.retval, 1, "tailcall retval");
1076 data_map = bpf_object__find_map_by_name(tgt_obj, "tailcall.bss");
1077 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1078 "find tailcall.bss map"))
1081 data_fd = bpf_map__fd(data_map);
1082 if (!ASSERT_FALSE(data_fd < 0, "find tailcall.bss map fd"))
1086 err = bpf_map_lookup_elem(data_fd, &i, &val);
1087 ASSERT_OK(err, "tailcall count");
1088 ASSERT_EQ(val, 34, "tailcall count");
1090 data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1091 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1092 "find tailcall_bpf2bpf_fentry.bss map"))
1095 data_fd = bpf_map__fd(data_map);
1096 if (!ASSERT_FALSE(data_fd < 0,
1097 "find tailcall_bpf2bpf_fentry.bss map fd"))
1101 err = bpf_map_lookup_elem(data_fd, &i, &val);
1102 ASSERT_OK(err, "fentry count");
1103 ASSERT_EQ(val, 1, "fentry count");
1106 bpf_link__destroy(fentry_link);
1107 bpf_object__close(fentry_obj);
1108 bpf_object__close(tgt_obj);
1111 #define JMP_TABLE "/sys/fs/bpf/jmp_table"
1113 static int poke_thread_exit;
1115 static void *poke_update(void *arg)
1117 __u32 zero = 0, prog1_fd, prog2_fd, map_fd;
1118 struct tailcall_poke *call = arg;
1120 map_fd = bpf_map__fd(call->maps.jmp_table);
1121 prog1_fd = bpf_program__fd(call->progs.call1);
1122 prog2_fd = bpf_program__fd(call->progs.call2);
1124 while (!poke_thread_exit) {
1125 bpf_map_update_elem(map_fd, &zero, &prog1_fd, BPF_ANY);
1126 bpf_map_update_elem(map_fd, &zero, &prog2_fd, BPF_ANY);
1133 * We are trying to hit prog array update during another program load
1134 * that shares the same prog array map.
1136 * For that we share the jmp_table map between two skeleton instances
1137 * by pinning the jmp_table to same path. Then first skeleton instance
1138 * periodically updates jmp_table in 'poke update' thread while we load
1139 * the second skeleton instance in the main thread.
1141 static void test_tailcall_poke(void)
1143 struct tailcall_poke *call, *test;
1149 call = tailcall_poke__open_and_load();
1150 if (!ASSERT_OK_PTR(call, "tailcall_poke__open"))
1153 err = bpf_map__pin(call->maps.jmp_table, JMP_TABLE);
1154 if (!ASSERT_OK(err, "bpf_map__pin"))
1157 err = pthread_create(&thread, NULL, poke_update, call);
1158 if (!ASSERT_OK(err, "new toggler"))
1162 test = tailcall_poke__open();
1163 if (!ASSERT_OK_PTR(test, "tailcall_poke__open"))
1166 err = bpf_map__set_pin_path(test->maps.jmp_table, JMP_TABLE);
1167 if (!ASSERT_OK(err, "bpf_map__pin")) {
1168 tailcall_poke__destroy(test);
1172 bpf_program__set_autoload(test->progs.test, true);
1173 bpf_program__set_autoload(test->progs.call1, false);
1174 bpf_program__set_autoload(test->progs.call2, false);
1176 err = tailcall_poke__load(test);
1177 tailcall_poke__destroy(test);
1178 if (!ASSERT_OK(err, "tailcall_poke__load"))
1182 poke_thread_exit = 1;
1183 ASSERT_OK(pthread_join(thread, NULL), "pthread_join");
1186 bpf_map__unpin(call->maps.jmp_table, JMP_TABLE);
1187 tailcall_poke__destroy(call);
1190 void test_tailcalls(void)
1192 if (test__start_subtest("tailcall_1"))
1194 if (test__start_subtest("tailcall_2"))
1196 if (test__start_subtest("tailcall_3"))
1198 if (test__start_subtest("tailcall_4"))
1200 if (test__start_subtest("tailcall_5"))
1202 if (test__start_subtest("tailcall_6"))
1204 if (test__start_subtest("tailcall_bpf2bpf_1"))
1205 test_tailcall_bpf2bpf_1();
1206 if (test__start_subtest("tailcall_bpf2bpf_2"))
1207 test_tailcall_bpf2bpf_2();
1208 if (test__start_subtest("tailcall_bpf2bpf_3"))
1209 test_tailcall_bpf2bpf_3();
1210 if (test__start_subtest("tailcall_bpf2bpf_4"))
1211 test_tailcall_bpf2bpf_4(false);
1212 if (test__start_subtest("tailcall_bpf2bpf_5"))
1213 test_tailcall_bpf2bpf_4(true);
1214 if (test__start_subtest("tailcall_bpf2bpf_6"))
1215 test_tailcall_bpf2bpf_6();
1216 if (test__start_subtest("tailcall_bpf2bpf_fentry"))
1217 test_tailcall_bpf2bpf_fentry();
1218 if (test__start_subtest("tailcall_bpf2bpf_fexit"))
1219 test_tailcall_bpf2bpf_fexit();
1220 if (test__start_subtest("tailcall_bpf2bpf_fentry_fexit"))
1221 test_tailcall_bpf2bpf_fentry_fexit();
1222 if (test__start_subtest("tailcall_bpf2bpf_fentry_entry"))
1223 test_tailcall_bpf2bpf_fentry_entry();
1224 if (test__start_subtest("tailcall_poke"))
1225 test_tailcall_poke();