]> Git Repo - J-linux.git/blob - tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / tools / testing / selftests / bpf / prog_tests / uretprobe_stack.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3
4 #include <test_progs.h>
5 #include "uretprobe_stack.skel.h"
6 #include "../sdt.h"
7
8 /* We set up target_1() -> target_2() -> target_3() -> target_4() -> USDT()
9  * call chain, each being traced by our BPF program. On entry or return from
10  * each target_*() we are capturing user stack trace and recording it in
11  * global variable, so that user space part of the test can validate it.
12  *
13  * Note, we put each target function into a custom section to get those
14  * __start_XXX/__stop_XXX symbols, generated by linker for us, which allow us
15  * to know address range of those functions
16  */
17 __attribute__((section("uprobe__target_4")))
18 __weak int target_4(void)
19 {
20         STAP_PROBE1(uretprobe_stack, target, 42);
21         return 42;
22 }
23
24 extern const void *__start_uprobe__target_4;
25 extern const void *__stop_uprobe__target_4;
26
27 __attribute__((section("uprobe__target_3")))
28 __weak int target_3(void)
29 {
30         return target_4();
31 }
32
33 extern const void *__start_uprobe__target_3;
34 extern const void *__stop_uprobe__target_3;
35
36 __attribute__((section("uprobe__target_2")))
37 __weak int target_2(void)
38 {
39         return target_3();
40 }
41
42 extern const void *__start_uprobe__target_2;
43 extern const void *__stop_uprobe__target_2;
44
45 __attribute__((section("uprobe__target_1")))
46 __weak int target_1(int depth)
47 {
48         if (depth < 1)
49                 return 1 + target_1(depth + 1);
50         else
51                 return target_2();
52 }
53
54 extern const void *__start_uprobe__target_1;
55 extern const void *__stop_uprobe__target_1;
56
57 extern const void *__start_uretprobe_stack_sec;
58 extern const void *__stop_uretprobe_stack_sec;
59
60 struct range {
61         long start;
62         long stop;
63 };
64
65 static struct range targets[] = {
66         {}, /* we want target_1 to map to target[1], so need 1-based indexing */
67         { (long)&__start_uprobe__target_1, (long)&__stop_uprobe__target_1 },
68         { (long)&__start_uprobe__target_2, (long)&__stop_uprobe__target_2 },
69         { (long)&__start_uprobe__target_3, (long)&__stop_uprobe__target_3 },
70         { (long)&__start_uprobe__target_4, (long)&__stop_uprobe__target_4 },
71 };
72
73 static struct range caller = {
74         (long)&__start_uretprobe_stack_sec,
75         (long)&__stop_uretprobe_stack_sec,
76 };
77
78 static void validate_stack(__u64 *ips, int stack_len, int cnt, ...)
79 {
80         int i, j;
81         va_list args;
82
83         if (!ASSERT_GT(stack_len, 0, "stack_len"))
84                 return;
85
86         stack_len /= 8;
87
88         /* check if we have enough entries to satisfy test expectations */
89         if (!ASSERT_GE(stack_len, cnt, "stack_len2"))
90                 return;
91
92         if (env.verbosity >= VERBOSE_NORMAL) {
93                 printf("caller: %#lx - %#lx\n", caller.start, caller.stop);
94                 for (i = 1; i < ARRAY_SIZE(targets); i++)
95                         printf("target_%d: %#lx - %#lx\n", i, targets[i].start, targets[i].stop);
96                 for (i = 0; i < stack_len; i++) {
97                         for (j = 1; j < ARRAY_SIZE(targets); j++) {
98                                 if (ips[i] >= targets[j].start && ips[i] < targets[j].stop)
99                                         break;
100                         }
101                         if (j < ARRAY_SIZE(targets)) { /* found target match */
102                                 printf("ENTRY #%d: %#lx (in target_%d)\n", i, (long)ips[i], j);
103                         } else if (ips[i] >= caller.start && ips[i] < caller.stop) {
104                                 printf("ENTRY #%d: %#lx (in caller)\n", i, (long)ips[i]);
105                         } else {
106                                 printf("ENTRY #%d: %#lx\n", i, (long)ips[i]);
107                         }
108                 }
109         }
110
111         va_start(args, cnt);
112
113         for (i = cnt - 1; i >= 0; i--) {
114                 /* most recent entry is the deepest target function */
115                 const struct range *t = va_arg(args, const struct range *);
116
117                 ASSERT_GE(ips[i], t->start, "addr_start");
118                 ASSERT_LT(ips[i], t->stop, "addr_stop");
119         }
120
121         va_end(args);
122 }
123
124 /* __weak prevents inlining */
125 __attribute__((section("uretprobe_stack_sec")))
126 __weak void test_uretprobe_stack(void)
127 {
128         LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
129         struct uretprobe_stack *skel;
130         int err;
131
132         skel = uretprobe_stack__open_and_load();
133         if (!ASSERT_OK_PTR(skel, "skel_open"))
134                 return;
135
136         err = uretprobe_stack__attach(skel);
137         if (!ASSERT_OK(err, "skel_attach"))
138                 goto cleanup;
139
140         /* trigger */
141         ASSERT_EQ(target_1(0), 42 + 1, "trigger_return");
142
143         /*
144          * Stacks captured on ENTRY uprobes
145          */
146
147         /* (uprobe 1) target_1 in stack trace*/
148         validate_stack(skel->bss->entry_stack1, skel->bss->entry1_len,
149                        2, &caller, &targets[1]);
150         /* (uprobe 1, recursed) */
151         validate_stack(skel->bss->entry_stack1_recur, skel->bss->entry1_recur_len,
152                        3, &caller, &targets[1], &targets[1]);
153         /* (uprobe 2) caller -> target_1 -> target_1 -> target_2 */
154         validate_stack(skel->bss->entry_stack2, skel->bss->entry2_len,
155                        4, &caller, &targets[1], &targets[1], &targets[2]);
156         /* (uprobe 3) */
157         validate_stack(skel->bss->entry_stack3, skel->bss->entry3_len,
158                        5, &caller, &targets[1], &targets[1], &targets[2], &targets[3]);
159         /* (uprobe 4) caller -> target_1 -> target_1 -> target_2 -> target_3 -> target_4 */
160         validate_stack(skel->bss->entry_stack4, skel->bss->entry4_len,
161                        6, &caller, &targets[1], &targets[1], &targets[2], &targets[3], &targets[4]);
162
163         /* (USDT): full caller -> target_1 -> target_1 -> target_2 (uretprobed)
164          *              -> target_3 -> target_4 (uretprobes) chain
165          */
166         validate_stack(skel->bss->usdt_stack, skel->bss->usdt_len,
167                        6, &caller, &targets[1], &targets[1], &targets[2], &targets[3], &targets[4]);
168
169         /*
170          * Now stacks captured on the way out in EXIT uprobes
171          */
172
173         /* (uretprobe 4) everything up to target_4, but excluding it */
174         validate_stack(skel->bss->exit_stack4, skel->bss->exit4_len,
175                        5, &caller, &targets[1], &targets[1], &targets[2], &targets[3]);
176         /* we didn't install uretprobes on target_2 and target_3 */
177         /* (uretprobe 1, recur) first target_1 call only */
178         validate_stack(skel->bss->exit_stack1_recur, skel->bss->exit1_recur_len,
179                        2, &caller, &targets[1]);
180         /* (uretprobe 1) just a caller in the stack trace */
181         validate_stack(skel->bss->exit_stack1, skel->bss->exit1_len,
182                        1, &caller);
183
184 cleanup:
185         uretprobe_stack__destroy(skel);
186 }
This page took 0.037634 seconds and 4 git commands to generate.