1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
5 #include "test_progs.h"
6 #include "testing_helpers.h"
7 #include "cgroup_helpers.h"
13 #include <execinfo.h> /* backtrace */
14 #include <sys/sysinfo.h> /* get_nprocs */
15 #include <netinet/in.h>
16 #include <sys/select.h>
17 #include <sys/socket.h>
20 #include "json_writer.h"
22 static bool verbose(void)
24 return env.verbosity > VERBOSE_NONE;
27 static void stdio_hijack_init(char **log_buf, size_t *log_cnt)
30 if (verbose() && env.worker_id == -1) {
31 /* nothing to do, output to stdout by default */
38 stdout = open_memstream(log_buf, log_cnt);
41 perror("open_memstream");
45 if (env.subtest_state)
46 env.subtest_state->stdout = stdout;
48 env.test_state->stdout = stdout;
54 static void stdio_hijack(char **log_buf, size_t *log_cnt)
57 if (verbose() && env.worker_id == -1) {
58 /* nothing to do, output to stdout by default */
65 stdio_hijack_init(log_buf, log_cnt);
69 static void stdio_restore_cleanup(void)
72 if (verbose() && env.worker_id == -1) {
73 /* nothing to do, output to stdout by default */
79 if (env.subtest_state) {
80 fclose(env.subtest_state->stdout);
81 env.subtest_state->stdout = NULL;
82 stdout = env.test_state->stdout;
83 stderr = env.test_state->stdout;
85 fclose(env.test_state->stdout);
86 env.test_state->stdout = NULL;
91 static void stdio_restore(void)
94 if (verbose() && env.worker_id == -1) {
95 /* nothing to do, output to stdout by default */
99 if (stdout == env.stdout)
102 stdio_restore_cleanup();
109 /* Adapted from perf/util/string.c */
110 static bool glob_match(const char *str, const char *pat)
112 while (*str && *pat && *pat != '*') {
118 /* Check wild card */
122 if (!*pat) /* Tail wild card matches all */
125 if (glob_match(str++, pat))
128 return !*str && !*pat;
131 #define EXIT_NO_TEST 2
132 #define EXIT_ERR_SETUP_INFRA 3
134 /* defined in test_progs.h */
135 struct test_env env = {};
137 struct prog_test_def {
138 const char *test_name;
140 void (*run_test)(void);
141 void (*run_serial_test)(void);
143 bool need_cgroup_cleanup;
146 /* Override C runtime library's usleep() implementation to ensure nanosleep()
147 * is always called. Usleep is frequently used in selftests as a way to
148 * trigger kprobe and tracepoints.
150 int usleep(useconds_t usec)
152 struct timespec ts = {
153 .tv_sec = usec / 1000000,
154 .tv_nsec = (usec % 1000000) * 1000,
157 return syscall(__NR_nanosleep, &ts, NULL);
160 static bool should_run(struct test_selector *sel, int num, const char *name)
164 for (i = 0; i < sel->blacklist.cnt; i++) {
165 if (glob_match(name, sel->blacklist.tests[i].name) &&
166 !sel->blacklist.tests[i].subtest_cnt)
170 for (i = 0; i < sel->whitelist.cnt; i++) {
171 if (glob_match(name, sel->whitelist.tests[i].name))
175 if (!sel->whitelist.cnt && !sel->num_set)
178 return num < sel->num_set_len && sel->num_set[num];
181 static bool should_run_subtest(struct test_selector *sel,
182 struct test_selector *subtest_sel,
184 const char *test_name,
185 const char *subtest_name)
189 for (i = 0; i < sel->blacklist.cnt; i++) {
190 if (glob_match(test_name, sel->blacklist.tests[i].name)) {
191 if (!sel->blacklist.tests[i].subtest_cnt)
194 for (j = 0; j < sel->blacklist.tests[i].subtest_cnt; j++) {
195 if (glob_match(subtest_name,
196 sel->blacklist.tests[i].subtests[j]))
202 for (i = 0; i < sel->whitelist.cnt; i++) {
203 if (glob_match(test_name, sel->whitelist.tests[i].name)) {
204 if (!sel->whitelist.tests[i].subtest_cnt)
207 for (j = 0; j < sel->whitelist.tests[i].subtest_cnt; j++) {
208 if (glob_match(subtest_name,
209 sel->whitelist.tests[i].subtests[j]))
215 if (!sel->whitelist.cnt && !subtest_sel->num_set)
218 return subtest_num < subtest_sel->num_set_len && subtest_sel->num_set[subtest_num];
221 static char *test_result(bool failed, bool skipped)
223 return failed ? "FAIL" : (skipped ? "SKIP" : "OK");
226 #define TEST_NUM_WIDTH 7
228 static void print_test_result(const struct prog_test_def *test, const struct test_state *test_state)
230 int skipped_cnt = test_state->skip_cnt;
231 int subtests_cnt = test_state->subtest_num;
233 fprintf(env.stdout, "#%-*d %s:", TEST_NUM_WIDTH, test->test_num, test->test_name);
234 if (test_state->error_cnt)
235 fprintf(env.stdout, "FAIL");
236 else if (!skipped_cnt)
237 fprintf(env.stdout, "OK");
238 else if (skipped_cnt == subtests_cnt || !subtests_cnt)
239 fprintf(env.stdout, "SKIP");
241 fprintf(env.stdout, "OK (SKIP: %d/%d)", skipped_cnt, subtests_cnt);
243 fprintf(env.stdout, "\n");
246 static void print_test_log(char *log_buf, size_t log_cnt)
248 log_buf[log_cnt] = '\0';
249 fprintf(env.stdout, "%s", log_buf);
250 if (log_buf[log_cnt - 1] != '\n')
251 fprintf(env.stdout, "\n");
254 static void print_subtest_name(int test_num, int subtest_num,
255 const char *test_name, char *subtest_name,
258 char test_num_str[32];
260 snprintf(test_num_str, sizeof(test_num_str), "%d/%d", test_num, subtest_num);
262 fprintf(env.stdout, "#%-*s %s/%s",
263 TEST_NUM_WIDTH, test_num_str,
264 test_name, subtest_name);
267 fprintf(env.stdout, ":%s", result);
269 fprintf(env.stdout, "\n");
272 static void jsonw_write_log_message(json_writer_t *w, char *log_buf, size_t log_cnt)
274 /* open_memstream (from stdio_hijack_init) ensures that log_bug is terminated by a
275 * null byte. Yet in parallel mode, log_buf will be NULL if there is no message.
278 jsonw_string_field(w, "message", log_buf);
280 jsonw_string_field(w, "message", "");
284 static void dump_test_log(const struct prog_test_def *test,
285 const struct test_state *test_state,
286 bool skip_ok_subtests,
287 bool par_exec_result,
290 bool test_failed = test_state->error_cnt > 0;
291 bool force_log = test_state->force_log;
292 bool print_test = verbose() || force_log || test_failed;
294 struct subtest_state *subtest_state;
296 bool subtest_filtered;
299 /* we do not print anything in the worker thread */
300 if (env.worker_id != -1)
303 /* there is nothing to print when verbose log is used and execution
304 * is not in parallel mode
306 if (verbose() && !par_exec_result)
309 if (test_state->log_cnt && print_test)
310 print_test_log(test_state->log_buf, test_state->log_cnt);
312 if (w && print_test) {
313 jsonw_start_object(w);
314 jsonw_string_field(w, "name", test->test_name);
315 jsonw_uint_field(w, "number", test->test_num);
316 jsonw_write_log_message(w, test_state->log_buf, test_state->log_cnt);
317 jsonw_bool_field(w, "failed", test_failed);
318 jsonw_name(w, "subtests");
319 jsonw_start_array(w);
322 for (i = 0; i < test_state->subtest_num; i++) {
323 subtest_state = &test_state->subtest_states[i];
324 subtest_failed = subtest_state->error_cnt;
325 subtest_filtered = subtest_state->filtered;
326 print_subtest = verbose() || force_log || subtest_failed;
328 if ((skip_ok_subtests && !subtest_failed) || subtest_filtered)
331 if (subtest_state->log_cnt && print_subtest) {
332 print_test_log(subtest_state->log_buf,
333 subtest_state->log_cnt);
336 print_subtest_name(test->test_num, i + 1,
337 test->test_name, subtest_state->name,
338 test_result(subtest_state->error_cnt,
339 subtest_state->skipped));
341 if (w && print_subtest) {
342 jsonw_start_object(w);
343 jsonw_string_field(w, "name", subtest_state->name);
344 jsonw_uint_field(w, "number", i+1);
345 jsonw_write_log_message(w, subtest_state->log_buf, subtest_state->log_cnt);
346 jsonw_bool_field(w, "failed", subtest_failed);
351 if (w && print_test) {
356 print_test_result(test, test_state);
359 static void stdio_restore(void);
361 /* A bunch of tests set custom affinity per-thread and/or per-process. Reset
362 * it after each test/sub-test.
364 static void reset_affinity(void)
370 for (i = 0; i < env.nr_cpus; i++)
373 err = sched_setaffinity(0, sizeof(cpuset), &cpuset);
376 fprintf(stderr, "Failed to reset process affinity: %d!\n", err);
377 exit(EXIT_ERR_SETUP_INFRA);
379 err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
382 fprintf(stderr, "Failed to reset thread affinity: %d!\n", err);
383 exit(EXIT_ERR_SETUP_INFRA);
387 static void save_netns(void)
389 env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY);
390 if (env.saved_netns_fd == -1) {
391 perror("open(/proc/self/ns/net)");
392 exit(EXIT_ERR_SETUP_INFRA);
396 static void restore_netns(void)
398 if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) {
400 perror("setns(CLONE_NEWNS)");
401 exit(EXIT_ERR_SETUP_INFRA);
405 void test__end_subtest(void)
407 struct prog_test_def *test = env.test;
408 struct test_state *test_state = env.test_state;
409 struct subtest_state *subtest_state = env.subtest_state;
411 if (subtest_state->error_cnt) {
412 test_state->error_cnt++;
414 if (!subtest_state->skipped)
415 test_state->sub_succ_cnt++;
417 test_state->skip_cnt++;
420 if (verbose() && !env.workers)
421 print_subtest_name(test->test_num, test_state->subtest_num,
422 test->test_name, subtest_state->name,
423 test_result(subtest_state->error_cnt,
424 subtest_state->skipped));
426 stdio_restore_cleanup();
427 env.subtest_state = NULL;
430 bool test__start_subtest(const char *subtest_name)
432 struct prog_test_def *test = env.test;
433 struct test_state *state = env.test_state;
434 struct subtest_state *subtest_state;
435 size_t sub_state_size = sizeof(*subtest_state);
437 if (env.subtest_state)
440 state->subtest_num++;
441 state->subtest_states =
442 realloc(state->subtest_states,
443 state->subtest_num * sub_state_size);
444 if (!state->subtest_states) {
445 fprintf(stderr, "Not enough memory to allocate subtest result\n");
449 subtest_state = &state->subtest_states[state->subtest_num - 1];
451 memset(subtest_state, 0, sub_state_size);
453 if (!subtest_name || !subtest_name[0]) {
455 "Subtest #%d didn't provide sub-test name!\n",
460 subtest_state->name = strdup(subtest_name);
461 if (!subtest_state->name) {
463 "Subtest #%d: failed to copy subtest name!\n",
468 if (!should_run_subtest(&env.test_selector,
469 &env.subtest_selector,
473 subtest_state->filtered = true;
477 env.subtest_state = subtest_state;
478 stdio_hijack_init(&subtest_state->log_buf, &subtest_state->log_cnt);
483 void test__force_log(void)
485 env.test_state->force_log = true;
488 void test__skip(void)
490 if (env.subtest_state)
491 env.subtest_state->skipped = true;
493 env.test_state->skip_cnt++;
496 void test__fail(void)
498 if (env.subtest_state)
499 env.subtest_state->error_cnt++;
501 env.test_state->error_cnt++;
504 int test__join_cgroup(const char *path)
508 if (!env.test->need_cgroup_cleanup) {
509 if (setup_cgroup_environment()) {
511 "#%d %s: Failed to setup cgroup environment\n",
512 env.test->test_num, env.test->test_name);
516 env.test->need_cgroup_cleanup = true;
519 fd = create_and_get_cgroup(path);
522 "#%d %s: Failed to create cgroup '%s' (errno=%d)\n",
523 env.test->test_num, env.test->test_name, path, errno);
527 if (join_cgroup(path)) {
529 "#%d %s: Failed to join cgroup '%s' (errno=%d)\n",
530 env.test->test_num, env.test->test_name, path, errno);
537 int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
541 map = bpf_object__find_map_by_name(obj, name);
543 fprintf(stdout, "%s:FAIL:map '%s' not found\n", test, name);
547 return bpf_map__fd(map);
550 int compare_map_keys(int map1_fd, int map2_fd)
553 char val_buf[PERF_MAX_STACK_DEPTH *
554 sizeof(struct bpf_stack_build_id)];
557 err = bpf_map_get_next_key(map1_fd, NULL, &key);
560 err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
564 while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
565 err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
577 int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
579 __u32 key, next_key, *cur_key_p, *next_key_p;
580 char *val_buf1, *val_buf2;
583 val_buf1 = malloc(stack_trace_len);
584 val_buf2 = malloc(stack_trace_len);
587 while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) {
588 err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1);
591 err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2);
594 for (i = 0; i < stack_trace_len; i++) {
595 if (val_buf1[i] != val_buf2[i]) {
602 next_key_p = &next_key;
613 /* extern declarations for test funcs */
614 #define DEFINE_TEST(name) \
615 extern void test_##name(void) __weak; \
616 extern void serial_test_##name(void) __weak;
617 #include <prog_tests/tests.h>
620 static struct prog_test_def prog_test_defs[] = {
621 #define DEFINE_TEST(name) { \
622 .test_name = #name, \
623 .run_test = &test_##name, \
624 .run_serial_test = &serial_test_##name, \
626 #include <prog_tests/tests.h>
630 static const int prog_test_cnt = ARRAY_SIZE(prog_test_defs);
632 static struct test_state test_states[ARRAY_SIZE(prog_test_defs)];
634 const char *argp_program_version = "test_progs 0.1";
636 static const char argp_program_doc[] =
637 "BPF selftests test runner\v"
638 "Options accepting the NAMES parameter take either a comma-separated list\n"
639 "of test names, or a filename prefixed with @. The file contains one name\n"
640 "(or wildcard pattern) per line, and comments beginning with # are ignored.\n"
642 "These options can be passed repeatedly to read multiple files.\n";
647 ARG_TEST_NAME_BLACKLIST = 'b',
648 ARG_VERIFIER_STATS = 's',
650 ARG_GET_TEST_CNT = 'c',
651 ARG_LIST_TEST_NAMES = 'l',
652 ARG_TEST_NAME_GLOB_ALLOWLIST = 'a',
653 ARG_TEST_NAME_GLOB_DENYLIST = 'd',
654 ARG_NUM_WORKERS = 'j',
656 ARG_JSON_SUMMARY = 'J'
659 static const struct argp_option opts[] = {
660 { "num", ARG_TEST_NUM, "NUM", 0,
661 "Run test number NUM only " },
662 { "name", ARG_TEST_NAME, "NAMES", 0,
663 "Run tests with names containing any string from NAMES list" },
664 { "name-blacklist", ARG_TEST_NAME_BLACKLIST, "NAMES", 0,
665 "Don't run tests with names containing any string from NAMES list" },
666 { "verifier-stats", ARG_VERIFIER_STATS, NULL, 0,
667 "Output verifier statistics", },
668 { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL,
669 "Verbose output (use -vv or -vvv for progressively verbose output)" },
670 { "count", ARG_GET_TEST_CNT, NULL, 0,
671 "Get number of selected top-level tests " },
672 { "list", ARG_LIST_TEST_NAMES, NULL, 0,
673 "List test names that would run (without running them) " },
674 { "allow", ARG_TEST_NAME_GLOB_ALLOWLIST, "NAMES", 0,
675 "Run tests with name matching the pattern (supports '*' wildcard)." },
676 { "deny", ARG_TEST_NAME_GLOB_DENYLIST, "NAMES", 0,
677 "Don't run tests with name matching the pattern (supports '*' wildcard)." },
678 { "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL,
679 "Number of workers to run in parallel, default to number of cpus." },
680 { "debug", ARG_DEBUG, NULL, 0,
681 "print extra debug information for test_progs." },
682 { "json-summary", ARG_JSON_SUMMARY, "FILE", 0, "Write report in json format to this file."},
686 static FILE *libbpf_capture_stream;
691 } libbpf_output_capture;
693 /* Creates a global memstream capturing INFO and WARN level output
694 * passed to libbpf_print_fn.
695 * Returns 0 on success, negative value on failure.
696 * On failure the description is printed using PRINT_FAIL and
697 * current test case is marked as fail.
699 int start_libbpf_log_capture(void)
701 if (libbpf_capture_stream) {
702 PRINT_FAIL("%s: libbpf_capture_stream != NULL\n", __func__);
706 libbpf_capture_stream = open_memstream(&libbpf_output_capture.buf,
707 &libbpf_output_capture.buf_sz);
708 if (!libbpf_capture_stream) {
709 PRINT_FAIL("%s: open_memstream failed errno=%d\n", __func__, errno);
716 /* Destroys global memstream created by start_libbpf_log_capture().
717 * Returns a pointer to captured data which has to be freed.
718 * Returned buffer is null terminated.
720 char *stop_libbpf_log_capture(void)
724 if (!libbpf_capture_stream)
727 fputc(0, libbpf_capture_stream);
728 fclose(libbpf_capture_stream);
729 libbpf_capture_stream = NULL;
730 /* get 'buf' after fclose(), see open_memstream() documentation */
731 buf = libbpf_output_capture.buf;
732 memset(&libbpf_output_capture, 0, sizeof(libbpf_output_capture));
736 static int libbpf_print_fn(enum libbpf_print_level level,
737 const char *format, va_list args)
739 if (libbpf_capture_stream && level != LIBBPF_DEBUG) {
742 va_copy(args2, args);
743 vfprintf(libbpf_capture_stream, format, args2);
746 if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG)
749 vfprintf(stdout, format, args);
753 static void free_test_filter_set(const struct test_filter_set *set)
760 for (i = 0; i < set->cnt; i++) {
761 free((void *)set->tests[i].name);
762 for (j = 0; j < set->tests[i].subtest_cnt; j++)
763 free((void *)set->tests[i].subtests[j]);
765 free((void *)set->tests[i].subtests);
768 free((void *)set->tests);
771 static void free_test_selector(struct test_selector *test_selector)
773 free_test_filter_set(&test_selector->blacklist);
774 free_test_filter_set(&test_selector->whitelist);
775 free(test_selector->num_set);
778 extern int extra_prog_load_log_flags;
780 static error_t parse_arg(int key, char *arg, struct argp_state *state)
782 struct test_env *env = state->input;
787 char *subtest_str = strchr(arg, '/');
791 if (parse_num_list(subtest_str + 1,
792 &env->subtest_selector.num_set,
793 &env->subtest_selector.num_set_len)) {
795 "Failed to parse subtest numbers.\n");
799 if (parse_num_list(arg, &env->test_selector.num_set,
800 &env->test_selector.num_set_len)) {
801 fprintf(stderr, "Failed to parse test numbers.\n");
806 case ARG_TEST_NAME_GLOB_ALLOWLIST:
807 case ARG_TEST_NAME: {
809 err = parse_test_list_file(arg + 1,
810 &env->test_selector.whitelist,
811 key == ARG_TEST_NAME_GLOB_ALLOWLIST);
813 err = parse_test_list(arg,
814 &env->test_selector.whitelist,
815 key == ARG_TEST_NAME_GLOB_ALLOWLIST);
819 case ARG_TEST_NAME_GLOB_DENYLIST:
820 case ARG_TEST_NAME_BLACKLIST: {
822 err = parse_test_list_file(arg + 1,
823 &env->test_selector.blacklist,
824 key == ARG_TEST_NAME_GLOB_DENYLIST);
826 err = parse_test_list(arg,
827 &env->test_selector.blacklist,
828 key == ARG_TEST_NAME_GLOB_DENYLIST);
832 case ARG_VERIFIER_STATS:
833 env->verifier_stats = true;
836 env->verbosity = VERBOSE_NORMAL;
838 if (strcmp(arg, "v") == 0) {
839 env->verbosity = VERBOSE_VERY;
840 extra_prog_load_log_flags = 1;
841 } else if (strcmp(arg, "vv") == 0) {
842 env->verbosity = VERBOSE_SUPER;
843 extra_prog_load_log_flags = 2;
846 "Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n",
853 if (setenv("SELFTESTS_VERBOSE", "1", 1) == -1) {
855 "Unable to setenv SELFTESTS_VERBOSE=1 (errno=%d)",
862 case ARG_GET_TEST_CNT:
863 env->get_test_cnt = true;
865 case ARG_LIST_TEST_NAMES:
866 env->list_test_names = true;
868 case ARG_NUM_WORKERS:
870 env->workers = atoi(arg);
872 fprintf(stderr, "Invalid number of worker: %s.", arg);
876 env->workers = get_nprocs();
882 case ARG_JSON_SUMMARY:
883 env->json = fopen(arg, "w");
884 if (env->json == NULL) {
885 perror("Failed to open json summary file");
895 return ARGP_ERR_UNKNOWN;
901 * Determine if test_progs is running as a "flavored" test runner and switch
902 * into corresponding sub-directory to load correct BPF objects.
904 * This is done by looking at executable name. If it contains "-flavor"
905 * suffix, then we are running as a flavored test runner.
907 int cd_flavor_subdir(const char *exec_name)
909 /* General form of argv[0] passed here is:
910 * some/path/to/test_progs[-flavor], where -flavor part is optional.
911 * First cut out "test_progs[-flavor]" part, then extract "flavor"
912 * part, if it's there.
914 const char *flavor = strrchr(exec_name, '/');
921 flavor = strrchr(flavor, '-');
926 fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor);
928 return chdir(flavor);
931 int trigger_module_test_read(int read_sz)
935 fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY);
937 if (!ASSERT_GE(fd, 0, "testmod_file_open"))
940 read(fd, NULL, read_sz);
946 int trigger_module_test_write(int write_sz)
949 char *buf = malloc(write_sz);
954 memset(buf, 'a', write_sz);
955 buf[write_sz-1] = '\0';
957 fd = open(BPF_TESTMOD_TEST_FILE, O_WRONLY);
959 if (!ASSERT_GE(fd, 0, "testmod_file_open")) {
964 write(fd, buf, write_sz);
970 int write_sysctl(const char *sysctl, const char *value)
974 fd = open(sysctl, O_WRONLY);
975 if (!ASSERT_NEQ(fd, -1, "open sysctl"))
979 err = write(fd, value, len);
981 if (!ASSERT_EQ(err, len, "write sysctl"))
987 int get_bpf_max_tramp_links_from(struct btf *btf)
989 const struct btf_enum *e;
990 const struct btf_type *t;
995 for (i = 1, type_cnt = btf__type_cnt(btf); i < type_cnt; i++) {
996 t = btf__type_by_id(btf, i);
997 if (!t || !btf_is_enum(t) || t->name_off)
1000 for (j = 0, vlen = btf_vlen(t); j < vlen; j++, e++) {
1001 name = btf__str_by_offset(btf, e->name_off);
1002 if (name && !strcmp(name, "BPF_MAX_TRAMP_LINKS"))
1010 int get_bpf_max_tramp_links(void)
1012 struct btf *vmlinux_btf;
1015 vmlinux_btf = btf__load_vmlinux_btf();
1016 if (!ASSERT_OK_PTR(vmlinux_btf, "vmlinux btf"))
1018 ret = get_bpf_max_tramp_links_from(vmlinux_btf);
1019 btf__free(vmlinux_btf);
1024 #define MAX_BACKTRACE_SZ 128
1025 void crash_handler(int signum)
1027 void *bt[MAX_BACKTRACE_SZ];
1030 sz = backtrace(bt, ARRAY_SIZE(bt));
1035 env.test_state->error_cnt++;
1036 dump_test_log(env.test, env.test_state, true, false, NULL);
1038 if (env.worker_id != -1)
1039 fprintf(stderr, "[%d]: ", env.worker_id);
1040 fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum);
1041 backtrace_symbols_fd(bt, sz, STDERR_FILENO);
1044 static void sigint_handler(int signum)
1048 for (i = 0; i < env.workers; i++)
1049 if (env.worker_socks[i] > 0)
1050 close(env.worker_socks[i]);
1053 static int current_test_idx;
1054 static pthread_mutex_t current_test_lock;
1055 static pthread_mutex_t stdout_output_lock;
1057 static inline const char *str_msg(const struct msg *msg, char *buf)
1059 switch (msg->type) {
1061 sprintf(buf, "MSG_DO_TEST %d", msg->do_test.num);
1064 sprintf(buf, "MSG_TEST_DONE %d (log: %d)",
1066 msg->test_done.have_log);
1068 case MSG_SUBTEST_DONE:
1069 sprintf(buf, "MSG_SUBTEST_DONE %d (log: %d)",
1070 msg->subtest_done.num,
1071 msg->subtest_done.have_log);
1074 sprintf(buf, "MSG_TEST_LOG (cnt: %zu, last: %d)",
1075 strlen(msg->test_log.log_buf),
1076 msg->test_log.is_last);
1079 sprintf(buf, "MSG_EXIT");
1082 sprintf(buf, "UNKNOWN");
1089 static int send_message(int sock, const struct msg *msg)
1094 fprintf(stderr, "Sending msg: %s\n", str_msg(msg, buf));
1095 return send(sock, msg, sizeof(*msg), 0);
1098 static int recv_message(int sock, struct msg *msg)
1103 memset(msg, 0, sizeof(*msg));
1104 ret = recv(sock, msg, sizeof(*msg), 0);
1107 fprintf(stderr, "Received msg: %s\n", str_msg(msg, buf));
1112 static void run_one_test(int test_num)
1114 struct prog_test_def *test = &prog_test_defs[test_num];
1115 struct test_state *state = &test_states[test_num];
1118 env.test_state = state;
1120 stdio_hijack(&state->log_buf, &state->log_cnt);
1124 else if (test->run_serial_test)
1125 test->run_serial_test();
1127 /* ensure last sub-test is finalized properly */
1128 if (env.subtest_state)
1129 test__end_subtest();
1131 state->tested = true;
1133 if (verbose() && env.worker_id == -1)
1134 print_test_result(test, state);
1138 if (test->need_cgroup_cleanup)
1139 cleanup_cgroup_environment();
1142 free(stop_libbpf_log_capture());
1144 dump_test_log(test, state, false, false, NULL);
1147 struct dispatch_data {
1152 static int read_prog_test_msg(int sock_fd, struct msg *msg, enum msg_type type)
1154 if (recv_message(sock_fd, msg) < 0)
1157 if (msg->type != type) {
1158 printf("%s: unexpected message type %d. expected %d\n", __func__, msg->type, type);
1165 static int dispatch_thread_read_log(int sock_fd, char **log_buf, size_t *log_cnt)
1167 FILE *log_fp = NULL;
1170 log_fp = open_memstream(log_buf, log_cnt);
1177 if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_LOG)) {
1182 fprintf(log_fp, "%s", msg.test_log.log_buf);
1183 if (msg.test_log.is_last)
1193 static int dispatch_thread_send_subtests(int sock_fd, struct test_state *state)
1196 struct subtest_state *subtest_state;
1197 int subtest_num = state->subtest_num;
1199 state->subtest_states = malloc(subtest_num * sizeof(*subtest_state));
1201 for (int i = 0; i < subtest_num; i++) {
1202 subtest_state = &state->subtest_states[i];
1204 memset(subtest_state, 0, sizeof(*subtest_state));
1206 if (read_prog_test_msg(sock_fd, &msg, MSG_SUBTEST_DONE))
1209 subtest_state->name = strdup(msg.subtest_done.name);
1210 subtest_state->error_cnt = msg.subtest_done.error_cnt;
1211 subtest_state->skipped = msg.subtest_done.skipped;
1212 subtest_state->filtered = msg.subtest_done.filtered;
1214 /* collect all logs */
1215 if (msg.subtest_done.have_log)
1216 if (dispatch_thread_read_log(sock_fd,
1217 &subtest_state->log_buf,
1218 &subtest_state->log_cnt))
1225 static void *dispatch_thread(void *ctx)
1227 struct dispatch_data *data = ctx;
1230 sock_fd = data->sock_fd;
1233 int test_to_run = -1;
1234 struct prog_test_def *test;
1235 struct test_state *state;
1239 pthread_mutex_lock(¤t_test_lock);
1241 if (current_test_idx >= prog_test_cnt) {
1242 pthread_mutex_unlock(¤t_test_lock);
1246 test = &prog_test_defs[current_test_idx];
1247 test_to_run = current_test_idx;
1250 pthread_mutex_unlock(¤t_test_lock);
1253 if (!test->should_run || test->run_serial_test)
1256 /* run test through worker */
1258 struct msg msg_do_test;
1260 memset(&msg_do_test, 0, sizeof(msg_do_test));
1261 msg_do_test.type = MSG_DO_TEST;
1262 msg_do_test.do_test.num = test_to_run;
1263 if (send_message(sock_fd, &msg_do_test) < 0) {
1264 perror("Fail to send command");
1267 env.worker_current_test[data->worker_id] = test_to_run;
1270 /* wait for test done */
1274 if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_DONE))
1276 if (test_to_run != msg.test_done.num)
1279 state = &test_states[test_to_run];
1280 state->tested = true;
1281 state->error_cnt = msg.test_done.error_cnt;
1282 state->skip_cnt = msg.test_done.skip_cnt;
1283 state->sub_succ_cnt = msg.test_done.sub_succ_cnt;
1284 state->subtest_num = msg.test_done.subtest_num;
1286 /* collect all logs */
1287 if (msg.test_done.have_log) {
1288 if (dispatch_thread_read_log(sock_fd,
1294 /* collect all subtests and subtest logs */
1295 if (!state->subtest_num)
1298 if (dispatch_thread_send_subtests(sock_fd, state))
1302 pthread_mutex_lock(&stdout_output_lock);
1303 dump_test_log(test, state, false, true, NULL);
1304 pthread_mutex_unlock(&stdout_output_lock);
1305 } /* while (true) */
1308 fprintf(stderr, "[%d]: Protocol/IO error: %s.\n", data->worker_id, strerror(errno));
1312 struct msg msg_exit;
1314 msg_exit.type = MSG_EXIT;
1315 if (send_message(sock_fd, &msg_exit) < 0) {
1317 fprintf(stderr, "[%d]: send_message msg_exit: %s.\n",
1318 data->worker_id, strerror(errno));
1324 static void calculate_summary_and_print_errors(struct test_env *env)
1327 int succ_cnt = 0, fail_cnt = 0, sub_succ_cnt = 0, skip_cnt = 0;
1328 json_writer_t *w = NULL;
1330 for (i = 0; i < prog_test_cnt; i++) {
1331 struct test_state *state = &test_states[i];
1336 sub_succ_cnt += state->sub_succ_cnt;
1337 skip_cnt += state->skip_cnt;
1339 if (state->error_cnt)
1346 w = jsonw_new(env->json);
1348 fprintf(env->stderr, "Failed to create new JSON stream.");
1352 jsonw_start_object(w);
1353 jsonw_uint_field(w, "success", succ_cnt);
1354 jsonw_uint_field(w, "success_subtest", sub_succ_cnt);
1355 jsonw_uint_field(w, "skipped", skip_cnt);
1356 jsonw_uint_field(w, "failed", fail_cnt);
1357 jsonw_name(w, "results");
1358 jsonw_start_array(w);
1362 * We only print error logs summary when there are failed tests and
1363 * verbose mode is not enabled. Otherwise, results may be incosistent.
1366 if (!verbose() && fail_cnt) {
1367 printf("\nAll error logs:\n");
1369 /* print error logs again */
1370 for (i = 0; i < prog_test_cnt; i++) {
1371 struct prog_test_def *test = &prog_test_defs[i];
1372 struct test_state *state = &test_states[i];
1374 if (!state->tested || !state->error_cnt)
1377 dump_test_log(test, state, true, true, w);
1383 jsonw_end_object(w);
1390 printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
1391 succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt);
1393 env->succ_cnt = succ_cnt;
1394 env->sub_succ_cnt = sub_succ_cnt;
1395 env->fail_cnt = fail_cnt;
1396 env->skip_cnt = skip_cnt;
1399 static void server_main(void)
1401 pthread_t *dispatcher_threads;
1402 struct dispatch_data *data;
1403 struct sigaction sigact_int = {
1404 .sa_handler = sigint_handler,
1405 .sa_flags = SA_RESETHAND,
1409 sigaction(SIGINT, &sigact_int, NULL);
1411 dispatcher_threads = calloc(sizeof(pthread_t), env.workers);
1412 data = calloc(sizeof(struct dispatch_data), env.workers);
1414 env.worker_current_test = calloc(sizeof(int), env.workers);
1415 for (i = 0; i < env.workers; i++) {
1418 data[i].worker_id = i;
1419 data[i].sock_fd = env.worker_socks[i];
1420 rc = pthread_create(&dispatcher_threads[i], NULL, dispatch_thread, &data[i]);
1422 perror("Failed to launch dispatcher thread");
1423 exit(EXIT_ERR_SETUP_INFRA);
1427 /* wait for all dispatcher to finish */
1428 for (i = 0; i < env.workers; i++) {
1430 int ret = pthread_tryjoin_np(dispatcher_threads[i], NULL);
1434 } else if (ret == EBUSY) {
1436 fprintf(stderr, "Still waiting for thread %d (test %d).\n",
1437 i, env.worker_current_test[i] + 1);
1438 usleep(1000 * 1000);
1441 fprintf(stderr, "Unexpected error joining dispatcher thread: %d", ret);
1446 free(dispatcher_threads);
1447 free(env.worker_current_test);
1450 /* run serial tests */
1453 for (int i = 0; i < prog_test_cnt; i++) {
1454 struct prog_test_def *test = &prog_test_defs[i];
1456 if (!test->should_run || !test->run_serial_test)
1462 /* generate summary */
1466 calculate_summary_and_print_errors(&env);
1468 /* reap all workers */
1469 for (i = 0; i < env.workers; i++) {
1472 pid = waitpid(env.worker_pids[i], &wstatus, 0);
1473 if (pid != env.worker_pids[i])
1474 perror("Unable to reap worker");
1478 static void worker_main_send_log(int sock, char *log_buf, size_t log_cnt)
1490 memset(&msg_log, 0, sizeof(msg_log));
1491 msg_log.type = MSG_TEST_LOG;
1492 dest = msg_log.test_log.log_buf;
1493 len = slen >= MAX_LOG_TRUNK_SIZE ? MAX_LOG_TRUNK_SIZE : slen;
1494 memcpy(dest, src, len);
1499 msg_log.test_log.is_last = true;
1501 assert(send_message(sock, &msg_log) >= 0);
1505 static void free_subtest_state(struct subtest_state *state)
1507 if (state->log_buf) {
1508 free(state->log_buf);
1509 state->log_buf = NULL;
1516 static int worker_main_send_subtests(int sock, struct test_state *state)
1520 struct subtest_state *subtest_state;
1522 memset(&msg, 0, sizeof(msg));
1523 msg.type = MSG_SUBTEST_DONE;
1525 for (i = 0; i < state->subtest_num; i++) {
1526 subtest_state = &state->subtest_states[i];
1528 msg.subtest_done.num = i;
1530 strncpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME);
1532 msg.subtest_done.error_cnt = subtest_state->error_cnt;
1533 msg.subtest_done.skipped = subtest_state->skipped;
1534 msg.subtest_done.filtered = subtest_state->filtered;
1535 msg.subtest_done.have_log = false;
1537 if (verbose() || state->force_log || subtest_state->error_cnt) {
1538 if (subtest_state->log_cnt)
1539 msg.subtest_done.have_log = true;
1542 if (send_message(sock, &msg) < 0) {
1543 perror("Fail to send message done");
1549 if (msg.subtest_done.have_log)
1550 worker_main_send_log(sock, subtest_state->log_buf, subtest_state->log_cnt);
1552 free_subtest_state(subtest_state);
1553 free(subtest_state->name);
1557 for (; i < state->subtest_num; i++)
1558 free_subtest_state(&state->subtest_states[i]);
1559 free(state->subtest_states);
1563 static int worker_main(int sock)
1568 /* receive command */
1571 if (recv_message(sock, &msg) < 0)
1577 fprintf(stderr, "[%d]: worker exit.\n",
1581 int test_to_run = msg.do_test.num;
1582 struct prog_test_def *test = &prog_test_defs[test_to_run];
1583 struct test_state *state = &test_states[test_to_run];
1587 fprintf(stderr, "[%d]: #%d:%s running.\n",
1592 run_one_test(test_to_run);
1594 memset(&msg, 0, sizeof(msg));
1595 msg.type = MSG_TEST_DONE;
1596 msg.test_done.num = test_to_run;
1597 msg.test_done.error_cnt = state->error_cnt;
1598 msg.test_done.skip_cnt = state->skip_cnt;
1599 msg.test_done.sub_succ_cnt = state->sub_succ_cnt;
1600 msg.test_done.subtest_num = state->subtest_num;
1601 msg.test_done.have_log = false;
1603 if (verbose() || state->force_log || state->error_cnt) {
1605 msg.test_done.have_log = true;
1607 if (send_message(sock, &msg) < 0) {
1608 perror("Fail to send message done");
1613 if (msg.test_done.have_log)
1614 worker_main_send_log(sock, state->log_buf, state->log_cnt);
1616 if (state->log_buf) {
1617 free(state->log_buf);
1618 state->log_buf = NULL;
1622 if (state->subtest_num)
1623 if (worker_main_send_subtests(sock, state))
1627 fprintf(stderr, "[%d]: #%d:%s done.\n",
1632 } /* case MSG_DO_TEST */
1635 fprintf(stderr, "[%d]: unknown message.\n", env.worker_id);
1643 static void free_test_states(void)
1647 for (i = 0; i < ARRAY_SIZE(prog_test_defs); i++) {
1648 struct test_state *test_state = &test_states[i];
1650 for (j = 0; j < test_state->subtest_num; j++)
1651 free_subtest_state(&test_state->subtest_states[j]);
1653 free(test_state->subtest_states);
1654 free(test_state->log_buf);
1655 test_state->subtest_states = NULL;
1656 test_state->log_buf = NULL;
1660 int main(int argc, char **argv)
1662 static const struct argp argp = {
1664 .parser = parse_arg,
1665 .doc = argp_program_doc,
1667 struct sigaction sigact = {
1668 .sa_handler = crash_handler,
1669 .sa_flags = SA_RESETHAND,
1673 sigaction(SIGSEGV, &sigact, NULL);
1675 err = argp_parse(&argp, argc, argv, 0, NULL, &env);
1679 err = cd_flavor_subdir(argv[0]);
1683 /* Use libbpf 1.0 API mode */
1684 libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
1685 libbpf_set_print(libbpf_print_fn);
1689 env.jit_enabled = is_jit_enabled();
1690 env.nr_cpus = libbpf_num_possible_cpus();
1691 if (env.nr_cpus < 0) {
1692 fprintf(stderr, "Failed to get number of CPUs: %d!\n",
1697 env.stdout = stdout;
1698 env.stderr = stderr;
1700 env.has_testmod = true;
1701 if (!env.list_test_names) {
1702 /* ensure previous instance of the module is unloaded */
1703 unload_bpf_testmod(verbose());
1705 if (load_bpf_testmod(verbose())) {
1706 fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n");
1707 env.has_testmod = false;
1711 /* initializing tests */
1712 for (i = 0; i < prog_test_cnt; i++) {
1713 struct prog_test_def *test = &prog_test_defs[i];
1715 test->test_num = i + 1;
1716 test->should_run = should_run(&env.test_selector,
1717 test->test_num, test->test_name);
1719 if ((test->run_test == NULL && test->run_serial_test == NULL) ||
1720 (test->run_test != NULL && test->run_serial_test != NULL)) {
1721 fprintf(stderr, "Test %d:%s must have either test_%s() or serial_test_%sl() defined.\n",
1722 test->test_num, test->test_name, test->test_name, test->test_name);
1723 exit(EXIT_ERR_SETUP_INFRA);
1727 /* ignore workers if we are just listing */
1728 if (env.get_test_cnt || env.list_test_names)
1731 /* launch workers if requested */
1732 env.worker_id = -1; /* main process */
1734 env.worker_pids = calloc(sizeof(__pid_t), env.workers);
1735 env.worker_socks = calloc(sizeof(int), env.workers);
1737 fprintf(stdout, "Launching %d workers.\n", env.workers);
1738 for (i = 0; i < env.workers; i++) {
1742 if (socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, sv) < 0) {
1743 perror("Fail to create worker socket");
1748 perror("Failed to fork worker");
1750 } else if (pid != 0) { /* main process */
1752 env.worker_pids[i] = pid;
1753 env.worker_socks[i] = sv[0];
1754 } else { /* inside each worker process */
1757 return worker_main(sv[1]);
1761 if (env.worker_id == -1) {
1767 /* The rest of the main process */
1769 /* on single mode */
1772 for (i = 0; i < prog_test_cnt; i++) {
1773 struct prog_test_def *test = &prog_test_defs[i];
1775 if (!test->should_run)
1778 if (env.get_test_cnt) {
1783 if (env.list_test_names) {
1784 fprintf(env.stdout, "%s\n", test->test_name);
1792 if (env.get_test_cnt) {
1793 printf("%d\n", env.succ_cnt);
1797 if (env.list_test_names)
1800 calculate_summary_and_print_errors(&env);
1802 close(env.saved_netns_fd);
1804 if (!env.list_test_names && env.has_testmod)
1805 unload_bpf_testmod(verbose());
1807 free_test_selector(&env.test_selector);
1808 free_test_selector(&env.subtest_selector);
1811 if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
1812 return EXIT_NO_TEST;
1814 return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;