1 // SPDX-License-Identifier: GPL-2.0
5 #include <test_progs.h>
6 #include "uprobe_multi.skel.h"
7 #include "uprobe_multi_bench.skel.h"
8 #include "uprobe_multi_usdt.skel.h"
9 #include "uprobe_multi_consumers.skel.h"
10 #include "uprobe_multi_pid_filter.skel.h"
11 #include "uprobe_multi_session.skel.h"
12 #include "uprobe_multi_session_single.skel.h"
13 #include "uprobe_multi_session_cookie.skel.h"
14 #include "uprobe_multi_session_recursive.skel.h"
15 #include "uprobe_multi_verifier.skel.h"
16 #include "bpf/libbpf_internal.h"
17 #include "testing_helpers.h"
20 static char test_data[] = "test_data";
22 noinline void uprobe_multi_func_1(void)
27 noinline void uprobe_multi_func_2(void)
32 noinline void uprobe_multi_func_3(void)
37 noinline void usdt_trigger(void)
39 STAP_PROBE(test, pid_filter_usdt);
42 noinline void uprobe_session_recursive(int i)
45 uprobe_session_recursive(i - 1);
50 int c2p[2]; /* child -> parent channel */
57 static void release_child(struct child *child)
66 pthread_join(child->thread, NULL);
70 waitpid(child->pid, &child_status, 0);
73 static void kick_child(struct child *child)
78 write(child->go[1], &c, 1);
84 static int child_func(void *arg)
86 struct child *child = arg;
91 /* wait for parent's kick */
92 err = read(child->go[0], &c, 1);
96 uprobe_multi_func_1();
97 uprobe_multi_func_2();
98 uprobe_multi_func_3();
104 static int spawn_child_flag(struct child *child, bool clone_vm)
106 /* pipe to notify child to execute the trigger functions */
111 child->pid = child->tid = clone(child_func, child->stack + sizeof(child->stack)/2,
112 CLONE_VM|SIGCHLD, child);
114 child->pid = child->tid = fork();
116 if (child->pid < 0) {
117 release_child(child);
123 if (!clone_vm && child->pid == 0)
129 static int spawn_child(struct child *child)
131 return spawn_child_flag(child, false);
134 static void *child_thread(void *ctx)
136 struct child *child = ctx;
139 child->tid = sys_gettid();
141 /* let parent know we are ready */
142 err = write(child->c2p[1], &c, 1);
146 /* wait for parent's kick */
147 err = read(child->go[0], &c, 1);
151 uprobe_multi_func_1();
152 uprobe_multi_func_2();
153 uprobe_multi_func_3();
160 static int spawn_thread(struct child *child)
164 /* pipe to notify child to execute the trigger functions */
167 /* pipe to notify parent that child thread is ready */
168 if (pipe(child->c2p)) {
174 child->pid = getpid();
176 err = pthread_create(&child->thread, NULL, child_thread, child);
181 close(child->c2p[0]);
182 close(child->c2p[1]);
187 err = read(child->c2p[0], &c, 1);
188 if (!ASSERT_EQ(err, 1, "child_thread_ready"))
194 static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child)
196 skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
197 skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2;
198 skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3;
200 skel->bss->user_ptr = test_data;
203 * Disable pid check in bpf program if we are pid filter test,
204 * because the probe should be executed only by child->pid
205 * passed at the probe attach.
207 skel->bss->pid = child ? 0 : getpid();
208 skel->bss->expect_pid = child ? child->pid : 0;
210 /* trigger all probes, if we are testing child *process*, just to make
211 * sure that PID filtering doesn't let through activations from wrong
212 * PIDs; when we test child *thread*, we don't want to do this to
213 * avoid double counting number of triggering events
215 if (!child || !child->thread) {
216 uprobe_multi_func_1();
217 uprobe_multi_func_2();
218 uprobe_multi_func_3();
226 * There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123]
227 * function and each sleepable probe (6) increments uprobe_multi_sleep_result.
229 ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 2, "uprobe_multi_func_1_result");
230 ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 2, "uprobe_multi_func_2_result");
231 ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 2, "uprobe_multi_func_3_result");
233 ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 2, "uretprobe_multi_func_1_result");
234 ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 2, "uretprobe_multi_func_2_result");
235 ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 2, "uretprobe_multi_func_3_result");
237 ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 6, "uprobe_multi_sleep_result");
239 ASSERT_FALSE(skel->bss->bad_pid_seen, "bad_pid_seen");
242 ASSERT_EQ(skel->bss->child_pid, child->pid, "uprobe_multi_child_pid");
243 ASSERT_EQ(skel->bss->child_tid, child->tid, "uprobe_multi_child_tid");
247 static void test_skel_api(void)
249 struct uprobe_multi *skel = NULL;
252 skel = uprobe_multi__open_and_load();
253 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
256 err = uprobe_multi__attach(skel);
257 if (!ASSERT_OK(err, "uprobe_multi__attach"))
260 uprobe_multi_test_run(skel, NULL);
263 uprobe_multi__destroy(skel);
267 __test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts,
270 pid_t pid = child ? child->pid : -1;
271 struct uprobe_multi *skel = NULL;
273 skel = uprobe_multi__open_and_load();
274 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
277 opts->retprobe = false;
278 skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, pid,
279 binary, pattern, opts);
280 if (!ASSERT_OK_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi"))
283 opts->retprobe = true;
284 skel->links.uretprobe = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, pid,
285 binary, pattern, opts);
286 if (!ASSERT_OK_PTR(skel->links.uretprobe, "bpf_program__attach_uprobe_multi"))
289 opts->retprobe = false;
290 skel->links.uprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uprobe_sleep, pid,
291 binary, pattern, opts);
292 if (!ASSERT_OK_PTR(skel->links.uprobe_sleep, "bpf_program__attach_uprobe_multi"))
295 opts->retprobe = true;
296 skel->links.uretprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uretprobe_sleep,
297 pid, binary, pattern, opts);
298 if (!ASSERT_OK_PTR(skel->links.uretprobe_sleep, "bpf_program__attach_uprobe_multi"))
301 opts->retprobe = false;
302 skel->links.uprobe_extra = bpf_program__attach_uprobe_multi(skel->progs.uprobe_extra, -1,
303 binary, pattern, opts);
304 if (!ASSERT_OK_PTR(skel->links.uprobe_extra, "bpf_program__attach_uprobe_multi"))
307 /* Attach (uprobe-backed) USDTs */
308 skel->links.usdt_pid = bpf_program__attach_usdt(skel->progs.usdt_pid, pid, binary,
309 "test", "pid_filter_usdt", NULL);
310 if (!ASSERT_OK_PTR(skel->links.usdt_pid, "attach_usdt_pid"))
313 skel->links.usdt_extra = bpf_program__attach_usdt(skel->progs.usdt_extra, -1, binary,
314 "test", "pid_filter_usdt", NULL);
315 if (!ASSERT_OK_PTR(skel->links.usdt_extra, "attach_usdt_extra"))
318 uprobe_multi_test_run(skel, child);
320 ASSERT_FALSE(skel->bss->bad_pid_seen_usdt, "bad_pid_seen_usdt");
322 ASSERT_EQ(skel->bss->child_pid_usdt, child->pid, "usdt_multi_child_pid");
323 ASSERT_EQ(skel->bss->child_tid_usdt, child->tid, "usdt_multi_child_tid");
326 uprobe_multi__destroy(skel);
330 test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts)
332 static struct child child;
335 __test_attach_api(binary, pattern, opts, NULL);
338 if (!ASSERT_OK(spawn_child(&child), "spawn_child"))
341 __test_attach_api(binary, pattern, opts, &child);
343 /* pid filter (thread) */
344 if (!ASSERT_OK(spawn_thread(&child), "spawn_thread"))
347 __test_attach_api(binary, pattern, opts, &child);
350 static void test_attach_api_pattern(void)
352 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
354 test_attach_api("/proc/self/exe", "uprobe_multi_func_*", &opts);
355 test_attach_api("/proc/self/exe", "uprobe_multi_func_?", &opts);
358 static void test_attach_api_syms(void)
360 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
361 const char *syms[3] = {
362 "uprobe_multi_func_1",
363 "uprobe_multi_func_2",
364 "uprobe_multi_func_3",
368 opts.cnt = ARRAY_SIZE(syms);
369 test_attach_api("/proc/self/exe", NULL, &opts);
372 static void test_attach_api_fails(void)
374 LIBBPF_OPTS(bpf_link_create_opts, opts);
375 const char *path = "/proc/self/exe";
376 struct uprobe_multi *skel = NULL;
377 int prog_fd, link_fd = -1;
378 unsigned long offset = 0;
380 skel = uprobe_multi__open_and_load();
381 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
384 prog_fd = bpf_program__fd(skel->progs.uprobe_extra);
387 opts.uprobe_multi.path = path;
388 opts.uprobe_multi.offsets = &offset;
389 opts.uprobe_multi.cnt = INT_MAX;
390 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
391 if (!ASSERT_ERR(link_fd, "link_fd"))
393 if (!ASSERT_EQ(link_fd, -E2BIG, "big cnt"))
397 LIBBPF_OPTS_RESET(opts,
398 .uprobe_multi.path = path,
399 .uprobe_multi.offsets = (unsigned long *) &offset,
402 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
403 if (!ASSERT_ERR(link_fd, "link_fd"))
405 if (!ASSERT_EQ(link_fd, -EINVAL, "cnt_is_zero"))
408 /* negative offset */
410 opts.uprobe_multi.path = path;
411 opts.uprobe_multi.offsets = (unsigned long *) &offset;
412 opts.uprobe_multi.cnt = 1;
414 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
415 if (!ASSERT_ERR(link_fd, "link_fd"))
417 if (!ASSERT_EQ(link_fd, -EINVAL, "offset_is_negative"))
420 /* offsets is NULL */
421 LIBBPF_OPTS_RESET(opts,
422 .uprobe_multi.path = path,
423 .uprobe_multi.cnt = 1,
426 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
427 if (!ASSERT_ERR(link_fd, "link_fd"))
429 if (!ASSERT_EQ(link_fd, -EINVAL, "offsets_is_null"))
432 /* wrong offsets pointer */
433 LIBBPF_OPTS_RESET(opts,
434 .uprobe_multi.path = path,
435 .uprobe_multi.offsets = (unsigned long *) 1,
436 .uprobe_multi.cnt = 1,
439 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
440 if (!ASSERT_ERR(link_fd, "link_fd"))
442 if (!ASSERT_EQ(link_fd, -EFAULT, "offsets_is_wrong"))
447 LIBBPF_OPTS_RESET(opts,
448 .uprobe_multi.offsets = (unsigned long *) &offset,
449 .uprobe_multi.cnt = 1,
452 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
453 if (!ASSERT_ERR(link_fd, "link_fd"))
455 if (!ASSERT_EQ(link_fd, -EINVAL, "path_is_null"))
458 /* wrong path pointer */
459 LIBBPF_OPTS_RESET(opts,
460 .uprobe_multi.path = (const char *) 1,
461 .uprobe_multi.offsets = (unsigned long *) &offset,
462 .uprobe_multi.cnt = 1,
465 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
466 if (!ASSERT_ERR(link_fd, "link_fd"))
468 if (!ASSERT_EQ(link_fd, -EFAULT, "path_is_wrong"))
471 /* wrong path type */
472 LIBBPF_OPTS_RESET(opts,
473 .uprobe_multi.path = "/",
474 .uprobe_multi.offsets = (unsigned long *) &offset,
475 .uprobe_multi.cnt = 1,
478 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
479 if (!ASSERT_ERR(link_fd, "link_fd"))
481 if (!ASSERT_EQ(link_fd, -EBADF, "path_is_wrong_type"))
484 /* wrong cookies pointer */
485 LIBBPF_OPTS_RESET(opts,
486 .uprobe_multi.path = path,
487 .uprobe_multi.offsets = (unsigned long *) &offset,
488 .uprobe_multi.cookies = (__u64 *) 1ULL,
489 .uprobe_multi.cnt = 1,
492 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
493 if (!ASSERT_ERR(link_fd, "link_fd"))
495 if (!ASSERT_EQ(link_fd, -EFAULT, "cookies_is_wrong"))
498 /* wrong ref_ctr_offsets pointer */
499 LIBBPF_OPTS_RESET(opts,
500 .uprobe_multi.path = path,
501 .uprobe_multi.offsets = (unsigned long *) &offset,
502 .uprobe_multi.cookies = (__u64 *) &offset,
503 .uprobe_multi.ref_ctr_offsets = (unsigned long *) 1,
504 .uprobe_multi.cnt = 1,
507 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
508 if (!ASSERT_ERR(link_fd, "link_fd"))
510 if (!ASSERT_EQ(link_fd, -EFAULT, "ref_ctr_offsets_is_wrong"))
514 LIBBPF_OPTS_RESET(opts,
515 .uprobe_multi.flags = 1 << 31,
518 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
519 if (!ASSERT_ERR(link_fd, "link_fd"))
521 if (!ASSERT_EQ(link_fd, -EINVAL, "wrong_flags"))
525 LIBBPF_OPTS_RESET(opts,
526 .uprobe_multi.path = path,
527 .uprobe_multi.offsets = (unsigned long *) &offset,
528 .uprobe_multi.cnt = 1,
529 .uprobe_multi.pid = -2,
532 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
533 if (!ASSERT_ERR(link_fd, "link_fd"))
535 ASSERT_EQ(link_fd, -EINVAL, "pid_is_wrong");
540 uprobe_multi__destroy(skel);
544 noinline void uprobe_multi_error_func(void)
547 * If --fcf-protection=branch is enabled the gcc generates endbr as
548 * first instruction, so marking the exact address of int3 with the
549 * symbol to be used in the attach_uprobe_fail_trap test below.
552 ".globl uprobe_multi_error_func_int3; \n"
553 "uprobe_multi_error_func_int3: \n"
559 * Attaching uprobe on uprobe_multi_error_func results in error
560 * because it already starts with int3 instruction.
562 static void attach_uprobe_fail_trap(struct uprobe_multi *skel)
564 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
565 const char *syms[4] = {
566 "uprobe_multi_func_1",
567 "uprobe_multi_func_2",
568 "uprobe_multi_func_3",
569 "uprobe_multi_error_func_int3",
573 opts.cnt = ARRAY_SIZE(syms);
575 skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, -1,
576 "/proc/self/exe", NULL, &opts);
577 if (!ASSERT_ERR_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi")) {
578 bpf_link__destroy(skel->links.uprobe);
579 skel->links.uprobe = NULL;
583 static void attach_uprobe_fail_trap(struct uprobe_multi *skel) { }
586 short sema_1 __used, sema_2 __used;
588 static void attach_uprobe_fail_refctr(struct uprobe_multi *skel)
590 unsigned long *tmp_offsets = NULL, *tmp_ref_ctr_offsets = NULL;
591 unsigned long offsets[3], ref_ctr_offsets[3];
592 LIBBPF_OPTS(bpf_link_create_opts, opts);
593 const char *path = "/proc/self/exe";
594 const char *syms[3] = {
595 "uprobe_multi_func_1",
596 "uprobe_multi_func_2",
598 const char *sema[3] = {
602 int prog_fd, link_fd, err;
604 prog_fd = bpf_program__fd(skel->progs.uprobe_extra);
606 err = elf_resolve_syms_offsets("/proc/self/exe", 2, (const char **) &syms,
607 &tmp_offsets, STT_FUNC);
608 if (!ASSERT_OK(err, "elf_resolve_syms_offsets_func"))
611 err = elf_resolve_syms_offsets("/proc/self/exe", 2, (const char **) &sema,
612 &tmp_ref_ctr_offsets, STT_OBJECT);
613 if (!ASSERT_OK(err, "elf_resolve_syms_offsets_sema"))
617 * We attach to 3 uprobes on 2 functions, so 2 uprobes share single function,
618 * but with different ref_ctr_offset which is not allowed and results in fail.
620 offsets[0] = tmp_offsets[0]; /* uprobe_multi_func_1 */
621 offsets[1] = tmp_offsets[1]; /* uprobe_multi_func_2 */
622 offsets[2] = tmp_offsets[1]; /* uprobe_multi_func_2 */
624 ref_ctr_offsets[0] = tmp_ref_ctr_offsets[0]; /* sema_1 */
625 ref_ctr_offsets[1] = tmp_ref_ctr_offsets[1]; /* sema_2 */
626 ref_ctr_offsets[2] = tmp_ref_ctr_offsets[0]; /* sema_1, error */
628 opts.uprobe_multi.path = path;
629 opts.uprobe_multi.offsets = (const unsigned long *) &offsets;
630 opts.uprobe_multi.ref_ctr_offsets = (const unsigned long *) &ref_ctr_offsets;
631 opts.uprobe_multi.cnt = 3;
633 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
634 if (!ASSERT_ERR(link_fd, "link_fd"))
638 free(tmp_ref_ctr_offsets);
642 static void test_attach_uprobe_fails(void)
644 struct uprobe_multi *skel = NULL;
646 skel = uprobe_multi__open_and_load();
647 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
650 /* attach fails due to adding uprobe on trap instruction, x86_64 only */
651 attach_uprobe_fail_trap(skel);
653 /* attach fail due to wrong ref_ctr_offs on one of the uprobes */
654 attach_uprobe_fail_refctr(skel);
656 uprobe_multi__destroy(skel);
659 static void __test_link_api(struct child *child)
661 int prog_fd, link1_fd = -1, link2_fd = -1, link3_fd = -1, link4_fd = -1;
662 LIBBPF_OPTS(bpf_link_create_opts, opts);
663 const char *path = "/proc/self/exe";
664 struct uprobe_multi *skel = NULL;
665 unsigned long *offsets = NULL;
666 const char *syms[3] = {
667 "uprobe_multi_func_1",
668 "uprobe_multi_func_2",
669 "uprobe_multi_func_3",
671 int link_extra_fd = -1;
674 err = elf_resolve_syms_offsets(path, 3, syms, (unsigned long **) &offsets, STT_FUNC);
675 if (!ASSERT_OK(err, "elf_resolve_syms_offsets"))
678 opts.uprobe_multi.path = path;
679 opts.uprobe_multi.offsets = offsets;
680 opts.uprobe_multi.cnt = ARRAY_SIZE(syms);
681 opts.uprobe_multi.pid = child ? child->pid : 0;
683 skel = uprobe_multi__open_and_load();
684 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
687 opts.kprobe_multi.flags = 0;
688 prog_fd = bpf_program__fd(skel->progs.uprobe);
689 link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
690 if (!ASSERT_GE(link1_fd, 0, "link1_fd"))
693 opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN;
694 prog_fd = bpf_program__fd(skel->progs.uretprobe);
695 link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
696 if (!ASSERT_GE(link2_fd, 0, "link2_fd"))
699 opts.kprobe_multi.flags = 0;
700 prog_fd = bpf_program__fd(skel->progs.uprobe_sleep);
701 link3_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
702 if (!ASSERT_GE(link3_fd, 0, "link3_fd"))
705 opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN;
706 prog_fd = bpf_program__fd(skel->progs.uretprobe_sleep);
707 link4_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
708 if (!ASSERT_GE(link4_fd, 0, "link4_fd"))
711 opts.kprobe_multi.flags = 0;
712 opts.uprobe_multi.pid = 0;
713 prog_fd = bpf_program__fd(skel->progs.uprobe_extra);
714 link_extra_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
715 if (!ASSERT_GE(link_extra_fd, 0, "link_extra_fd"))
718 uprobe_multi_test_run(skel, child);
729 if (link_extra_fd >= 0)
730 close(link_extra_fd);
732 uprobe_multi__destroy(skel);
736 static void test_link_api(void)
738 static struct child child;
741 __test_link_api(NULL);
744 if (!ASSERT_OK(spawn_child(&child), "spawn_child"))
747 __test_link_api(&child);
749 /* pid filter (thread) */
750 if (!ASSERT_OK(spawn_thread(&child), "spawn_thread"))
753 __test_link_api(&child);
756 static struct bpf_program *
757 get_program(struct uprobe_multi_consumers *skel, int prog)
761 return skel->progs.uprobe_0;
763 return skel->progs.uprobe_1;
765 return skel->progs.uprobe_2;
767 return skel->progs.uprobe_3;
769 ASSERT_FAIL("get_program");
774 static struct bpf_link **
775 get_link(struct uprobe_multi_consumers *skel, int link)
779 return &skel->links.uprobe_0;
781 return &skel->links.uprobe_1;
783 return &skel->links.uprobe_2;
785 return &skel->links.uprobe_3;
787 ASSERT_FAIL("get_link");
792 static int uprobe_attach(struct uprobe_multi_consumers *skel, int idx, unsigned long offset)
794 struct bpf_program *prog = get_program(skel, idx);
795 struct bpf_link **link = get_link(skel, idx);
796 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
801 opts.offsets = &offset;
805 * bit/prog: 0 uprobe entry
806 * bit/prog: 1 uprobe return
807 * bit/prog: 2 uprobe session without return
808 * bit/prog: 3 uprobe session with return
810 opts.retprobe = idx == 1;
811 opts.session = idx == 2 || idx == 3;
813 *link = bpf_program__attach_uprobe_multi(prog, 0, "/proc/self/exe", NULL, &opts);
814 if (!ASSERT_OK_PTR(*link, "bpf_program__attach_uprobe_multi"))
819 static void uprobe_detach(struct uprobe_multi_consumers *skel, int idx)
821 struct bpf_link **link = get_link(skel, idx);
823 bpf_link__destroy(*link);
827 static bool test_bit(int bit, unsigned long val)
829 return val & (1 << bit);
833 uprobe_consumer_test(struct uprobe_multi_consumers *skel,
834 unsigned long before, unsigned long after,
835 unsigned long offset)
839 /* detach uprobe for each unset programs in 'before' state ... */
840 for (idx = 0; idx < 4; idx++) {
841 if (test_bit(idx, before) && !test_bit(idx, after))
842 uprobe_detach(skel, idx);
845 /* ... and attach all new programs in 'after' state */
846 for (idx = 0; idx < 4; idx++) {
847 if (!test_bit(idx, before) && test_bit(idx, after)) {
848 if (!ASSERT_OK(uprobe_attach(skel, idx, offset), "uprobe_attach_after"))
856 * We generate 16 consumer_testX functions that will have uprobe installed on
857 * and will be called in separate threads. All function pointer are stored in
858 * "consumers" section and each thread will pick one function based on index.
861 extern const void *__start_consumers;
863 #define __CONSUMER_TEST(func) \
864 noinline int func(struct uprobe_multi_consumers *skel, unsigned long before, \
865 unsigned long after, unsigned long offset) \
867 return uprobe_consumer_test(skel, before, after, offset); \
869 void *__ ## func __used __attribute__((section("consumers"))) = (void *) func;
871 #define CONSUMER_TEST(func) __CONSUMER_TEST(func)
873 #define C1 CONSUMER_TEST(__PASTE(consumer_test, __COUNTER__))
874 #define C4 C1 C1 C1 C1
875 #define C16 C4 C4 C4 C4
879 typedef int (*test_t)(struct uprobe_multi_consumers *, unsigned long,
880 unsigned long, unsigned long);
882 static int consumer_test(struct uprobe_multi_consumers *skel,
883 unsigned long before, unsigned long after,
884 test_t test, unsigned long offset)
886 int err, idx, ret = -1;
888 printf("consumer_test before %lu after %lu\n", before, after);
890 /* 'before' is each, we attach uprobe for every set idx */
891 for (idx = 0; idx < 4; idx++) {
892 if (test_bit(idx, before)) {
893 if (!ASSERT_OK(uprobe_attach(skel, idx, offset), "uprobe_attach_before"))
898 err = test(skel, before, after, offset);
899 if (!ASSERT_EQ(err, 0, "uprobe_consumer_test"))
902 for (idx = 0; idx < 4; idx++) {
903 bool uret_stays, uret_survives;
904 const char *fmt = "BUG";
911 * +1 if define in 'before'
913 if (test_bit(idx, before))
915 fmt = "prog 0: uprobe";
919 * To trigger uretprobe consumer, the uretprobe under test either stayed from
920 * before to after (uret_stays + test_bit) or uretprobe instance survived and
921 * we have uretprobe active in after (uret_survives + test_bit)
923 uret_stays = before & after & 0b0110;
924 uret_survives = ((before & 0b0110) && (after & 0b0110) && (before & 0b1001));
926 if ((uret_stays || uret_survives) && test_bit(idx, after))
928 fmt = "prog 1: uretprobe";
932 * session with return
933 * +1 if defined in 'before'
934 * +1 if defined in 'after'
936 if (test_bit(idx, before)) {
938 if (test_bit(idx, after))
941 fmt = "prog 2: session with return";
945 * session without return
946 * +1 if defined in 'before'
948 if (test_bit(idx, before))
950 fmt = "prog 3: session with NO return";
954 if (!ASSERT_EQ(skel->bss->uprobe_result[idx], val, fmt))
956 skel->bss->uprobe_result[idx] = 0;
962 for (idx = 0; idx < 4; idx++)
963 uprobe_detach(skel, idx);
967 #define CONSUMER_MAX 16
970 * Each thread runs 1/16 of the load by running test for single
971 * 'before' number (based on thread index) and full scale of
974 static void *consumer_thread(void *arg)
976 unsigned long idx = (unsigned long) arg;
977 struct uprobe_multi_consumers *skel;
978 unsigned long offset;
982 skel = uprobe_multi_consumers__open_and_load();
983 if (!ASSERT_OK_PTR(skel, "uprobe_multi_consumers__open_and_load"))
986 func = *((&__start_consumers) + idx);
988 offset = get_uprobe_offset(func);
989 if (!ASSERT_GE(offset, 0, "uprobe_offset"))
992 for (after = 0; after < CONSUMER_MAX; after++)
993 if (consumer_test(skel, idx, after, func, offset))
997 uprobe_multi_consumers__destroy(skel);
1002 static void test_consumers(void)
1004 pthread_t pt[CONSUMER_MAX];
1009 * The idea of this test is to try all possible combinations of
1010 * uprobes consumers attached on single function.
1012 * - 1 uprobe entry consumer
1013 * - 1 uprobe exit consumer
1014 * - 1 uprobe session with return
1015 * - 1 uprobe session without return
1017 * The test uses 4 uprobes attached on single function, but that
1018 * translates into single uprobe with 4 consumers in kernel.
1020 * The before/after values present the state of attached consumers
1021 * before and after the probed function:
1023 * bit/prog 0 : uprobe entry
1024 * bit/prog 1 : uprobe return
1031 * it means that before we call 'uprobe_consumer_test' we attach
1032 * uprobes defined in 'before' value:
1034 * - bit/prog 1: uprobe entry
1036 * uprobe_consumer_test is called and inside it we attach and detach
1037 * uprobes based on 'after' value:
1039 * - bit/prog 0: is detached
1040 * - bit/prog 1: is attached
1042 * uprobe_consumer_test returns and we check counters values increased
1043 * by bpf programs on each uprobe to match the expected count based on
1044 * before/after bits.
1047 for (idx = 0; idx < CONSUMER_MAX; idx++) {
1048 err = pthread_create(&pt[idx], NULL, consumer_thread, (void *) idx);
1049 if (!ASSERT_OK(err, "pthread_create"))
1054 pthread_join(pt[--idx], NULL);
1057 static struct bpf_program *uprobe_multi_program(struct uprobe_multi_pid_filter *skel, int idx)
1060 case 0: return skel->progs.uprobe_multi_0;
1061 case 1: return skel->progs.uprobe_multi_1;
1062 case 2: return skel->progs.uprobe_multi_2;
1069 static void run_pid_filter(struct uprobe_multi_pid_filter *skel, bool clone_vm, bool retprobe)
1071 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts, .retprobe = retprobe);
1072 struct bpf_link *link[TASKS] = {};
1073 struct child child[TASKS] = {};
1076 memset(skel->bss->test, 0, sizeof(skel->bss->test));
1078 for (i = 0; i < TASKS; i++) {
1079 if (!ASSERT_OK(spawn_child_flag(&child[i], clone_vm), "spawn_child"))
1081 skel->bss->pids[i] = child[i].pid;
1084 for (i = 0; i < TASKS; i++) {
1085 link[i] = bpf_program__attach_uprobe_multi(uprobe_multi_program(skel, i),
1086 child[i].pid, "/proc/self/exe",
1087 "uprobe_multi_func_1", &opts);
1088 if (!ASSERT_OK_PTR(link[i], "bpf_program__attach_uprobe_multi"))
1092 for (i = 0; i < TASKS; i++)
1093 kick_child(&child[i]);
1095 for (i = 0; i < TASKS; i++) {
1096 ASSERT_EQ(skel->bss->test[i][0], 1, "pid");
1097 ASSERT_EQ(skel->bss->test[i][1], 0, "unknown");
1101 for (i = 0; i < TASKS; i++)
1102 bpf_link__destroy(link[i]);
1103 for (i = 0; i < TASKS; i++)
1104 release_child(&child[i]);
1107 static void test_pid_filter_process(bool clone_vm)
1109 struct uprobe_multi_pid_filter *skel;
1111 skel = uprobe_multi_pid_filter__open_and_load();
1112 if (!ASSERT_OK_PTR(skel, "uprobe_multi_pid_filter__open_and_load"))
1115 run_pid_filter(skel, clone_vm, false);
1116 run_pid_filter(skel, clone_vm, true);
1118 uprobe_multi_pid_filter__destroy(skel);
1121 static void test_session_skel_api(void)
1123 struct uprobe_multi_session *skel = NULL;
1124 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
1125 struct bpf_link *link = NULL;
1128 skel = uprobe_multi_session__open_and_load();
1129 if (!ASSERT_OK_PTR(skel, "uprobe_multi_session__open_and_load"))
1132 skel->bss->pid = getpid();
1133 skel->bss->user_ptr = test_data;
1135 err = uprobe_multi_session__attach(skel);
1136 if (!ASSERT_OK(err, "uprobe_multi_session__attach"))
1139 /* trigger all probes */
1140 skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
1141 skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2;
1142 skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3;
1144 uprobe_multi_func_1();
1145 uprobe_multi_func_2();
1146 uprobe_multi_func_3();
1149 * We expect 2 for uprobe_multi_func_2 because it runs both entry/return probe,
1150 * uprobe_multi_func_[13] run just the entry probe. All expected numbers are
1151 * doubled, because we run extra test for sleepable session.
1153 ASSERT_EQ(skel->bss->uprobe_session_result[0], 2, "uprobe_multi_func_1_result");
1154 ASSERT_EQ(skel->bss->uprobe_session_result[1], 4, "uprobe_multi_func_2_result");
1155 ASSERT_EQ(skel->bss->uprobe_session_result[2], 2, "uprobe_multi_func_3_result");
1157 /* We expect increase in 3 entry and 1 return session calls -> 4 */
1158 ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 4, "uprobe_multi_sleep_result");
1161 bpf_link__destroy(link);
1162 uprobe_multi_session__destroy(skel);
1165 static void test_session_single_skel_api(void)
1167 struct uprobe_multi_session_single *skel = NULL;
1168 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
1171 skel = uprobe_multi_session_single__open_and_load();
1172 if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_single__open_and_load"))
1175 skel->bss->pid = getpid();
1177 err = uprobe_multi_session_single__attach(skel);
1178 if (!ASSERT_OK(err, "uprobe_multi_session_single__attach"))
1181 uprobe_multi_func_1();
1184 * We expect consumer 0 and 2 to trigger just entry handler (value 1)
1185 * and consumer 1 to hit both (value 2).
1187 ASSERT_EQ(skel->bss->uprobe_session_result[0], 1, "uprobe_session_result_0");
1188 ASSERT_EQ(skel->bss->uprobe_session_result[1], 2, "uprobe_session_result_1");
1189 ASSERT_EQ(skel->bss->uprobe_session_result[2], 1, "uprobe_session_result_2");
1192 uprobe_multi_session_single__destroy(skel);
1195 static void test_session_cookie_skel_api(void)
1197 struct uprobe_multi_session_cookie *skel = NULL;
1200 skel = uprobe_multi_session_cookie__open_and_load();
1201 if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_cookie__open_and_load"))
1204 skel->bss->pid = getpid();
1206 err = uprobe_multi_session_cookie__attach(skel);
1207 if (!ASSERT_OK(err, "uprobe_multi_session_cookie__attach"))
1210 /* trigger all probes */
1211 uprobe_multi_func_1();
1212 uprobe_multi_func_2();
1213 uprobe_multi_func_3();
1215 ASSERT_EQ(skel->bss->test_uprobe_1_result, 1, "test_uprobe_1_result");
1216 ASSERT_EQ(skel->bss->test_uprobe_2_result, 2, "test_uprobe_2_result");
1217 ASSERT_EQ(skel->bss->test_uprobe_3_result, 3, "test_uprobe_3_result");
1220 uprobe_multi_session_cookie__destroy(skel);
1223 static void test_session_recursive_skel_api(void)
1225 struct uprobe_multi_session_recursive *skel = NULL;
1228 skel = uprobe_multi_session_recursive__open_and_load();
1229 if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_recursive__open_and_load"))
1232 skel->bss->pid = getpid();
1234 err = uprobe_multi_session_recursive__attach(skel);
1235 if (!ASSERT_OK(err, "uprobe_multi_session_recursive__attach"))
1238 for (i = 0; i < ARRAY_SIZE(skel->bss->test_uprobe_cookie_entry); i++)
1239 skel->bss->test_uprobe_cookie_entry[i] = i + 1;
1241 uprobe_session_recursive(5);
1245 * uprobe_session_recursive(5) { *cookie = 1, return 0
1246 * uprobe_session_recursive(4) { *cookie = 2, return 1
1247 * uprobe_session_recursive(3) { *cookie = 3, return 0
1248 * uprobe_session_recursive(2) { *cookie = 4, return 1
1249 * uprobe_session_recursive(1) { *cookie = 5, return 0
1250 * uprobe_session_recursive(0) { *cookie = 6, return 1
1252 * } i = 0 not executed
1253 * } i = 1 test_uprobe_cookie_return[0] = 5
1254 * } i = 2 not executed
1255 * } i = 3 test_uprobe_cookie_return[1] = 3
1256 * } i = 4 not executed
1257 * } i = 5 test_uprobe_cookie_return[2] = 1
1260 ASSERT_EQ(skel->bss->idx_entry, 6, "idx_entry");
1261 ASSERT_EQ(skel->bss->idx_return, 3, "idx_return");
1263 ASSERT_EQ(skel->bss->test_uprobe_cookie_return[0], 5, "test_uprobe_cookie_return[0]");
1264 ASSERT_EQ(skel->bss->test_uprobe_cookie_return[1], 3, "test_uprobe_cookie_return[1]");
1265 ASSERT_EQ(skel->bss->test_uprobe_cookie_return[2], 1, "test_uprobe_cookie_return[2]");
1268 uprobe_multi_session_recursive__destroy(skel);
1271 static void test_bench_attach_uprobe(void)
1273 long attach_start_ns = 0, attach_end_ns = 0;
1274 struct uprobe_multi_bench *skel = NULL;
1275 long detach_start_ns, detach_end_ns;
1276 double attach_delta, detach_delta;
1279 skel = uprobe_multi_bench__open_and_load();
1280 if (!ASSERT_OK_PTR(skel, "uprobe_multi_bench__open_and_load"))
1283 attach_start_ns = get_time_ns();
1285 err = uprobe_multi_bench__attach(skel);
1286 if (!ASSERT_OK(err, "uprobe_multi_bench__attach"))
1289 attach_end_ns = get_time_ns();
1291 system("./uprobe_multi bench");
1293 ASSERT_EQ(skel->bss->count, 50000, "uprobes_count");
1296 detach_start_ns = get_time_ns();
1297 uprobe_multi_bench__destroy(skel);
1298 detach_end_ns = get_time_ns();
1300 attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
1301 detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
1303 printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
1304 printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
1307 static void test_bench_attach_usdt(void)
1309 long attach_start_ns = 0, attach_end_ns = 0;
1310 struct uprobe_multi_usdt *skel = NULL;
1311 long detach_start_ns, detach_end_ns;
1312 double attach_delta, detach_delta;
1314 skel = uprobe_multi_usdt__open_and_load();
1315 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open"))
1318 attach_start_ns = get_time_ns();
1320 skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0, -1, "./uprobe_multi",
1321 "test", "usdt", NULL);
1322 if (!ASSERT_OK_PTR(skel->links.usdt0, "bpf_program__attach_usdt"))
1325 attach_end_ns = get_time_ns();
1327 system("./uprobe_multi usdt");
1329 ASSERT_EQ(skel->bss->count, 50000, "usdt_count");
1332 detach_start_ns = get_time_ns();
1333 uprobe_multi_usdt__destroy(skel);
1334 detach_end_ns = get_time_ns();
1336 attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
1337 detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
1339 printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
1340 printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
1343 void test_uprobe_multi_test(void)
1345 if (test__start_subtest("skel_api"))
1347 if (test__start_subtest("attach_api_pattern"))
1348 test_attach_api_pattern();
1349 if (test__start_subtest("attach_api_syms"))
1350 test_attach_api_syms();
1351 if (test__start_subtest("link_api"))
1353 if (test__start_subtest("bench_uprobe"))
1354 test_bench_attach_uprobe();
1355 if (test__start_subtest("bench_usdt"))
1356 test_bench_attach_usdt();
1357 if (test__start_subtest("attach_api_fails"))
1358 test_attach_api_fails();
1359 if (test__start_subtest("attach_uprobe_fails"))
1360 test_attach_uprobe_fails();
1361 if (test__start_subtest("consumers"))
1363 if (test__start_subtest("filter_fork"))
1364 test_pid_filter_process(false);
1365 if (test__start_subtest("filter_clone_vm"))
1366 test_pid_filter_process(true);
1367 if (test__start_subtest("session"))
1368 test_session_skel_api();
1369 if (test__start_subtest("session_single"))
1370 test_session_single_skel_api();
1371 if (test__start_subtest("session_cookie"))
1372 test_session_cookie_skel_api();
1373 if (test__start_subtest("session_cookie_recursive"))
1374 test_session_recursive_skel_api();
1375 RUN_TESTS(uprobe_multi_verifier);