]>
Commit | Line | Data |
---|---|---|
7170066e | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
8c1c9356 AM |
2 | /* |
3 | * test_kprobes.c - simple sanity test for *probes | |
4 | * | |
5 | * Copyright IBM Corp. 2008 | |
8c1c9356 AM |
6 | */ |
7 | ||
8 | #include <linux/kernel.h> | |
9 | #include <linux/kprobes.h> | |
10 | #include <linux/random.h> | |
e44e81c5 | 11 | #include <kunit/test.h> |
8c1c9356 AM |
12 | |
13 | #define div_factor 3 | |
14 | ||
2c7d662e | 15 | static u32 rand1, preh_val, posth_val; |
8e114405 | 16 | static u32 (*target)(u32 value); |
12da3b88 | 17 | static u32 (*target2)(u32 value); |
e44e81c5 | 18 | static struct kunit *current_test; |
8c1c9356 | 19 | |
1f6d3a8f MH |
20 | static unsigned long (*internal_target)(void); |
21 | static unsigned long (*stacktrace_target)(void); | |
22 | static unsigned long (*stacktrace_driver)(void); | |
23 | static unsigned long target_return_address[2]; | |
24 | ||
8c1c9356 AM |
25 | static noinline u32 kprobe_target(u32 value) |
26 | { | |
8c1c9356 AM |
27 | return (value / div_factor); |
28 | } | |
29 | ||
30 | static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |
31 | { | |
e44e81c5 | 32 | KUNIT_EXPECT_FALSE(current_test, preemptible()); |
8c1c9356 AM |
33 | preh_val = (rand1 / div_factor); |
34 | return 0; | |
35 | } | |
36 | ||
37 | static void kp_post_handler(struct kprobe *p, struct pt_regs *regs, | |
38 | unsigned long flags) | |
39 | { | |
e44e81c5 SS |
40 | KUNIT_EXPECT_FALSE(current_test, preemptible()); |
41 | KUNIT_EXPECT_EQ(current_test, preh_val, (rand1 / div_factor)); | |
8c1c9356 AM |
42 | posth_val = preh_val + div_factor; |
43 | } | |
44 | ||
45 | static struct kprobe kp = { | |
46 | .symbol_name = "kprobe_target", | |
47 | .pre_handler = kp_pre_handler, | |
48 | .post_handler = kp_post_handler | |
49 | }; | |
50 | ||
e44e81c5 | 51 | static void test_kprobe(struct kunit *test) |
8c1c9356 | 52 | { |
e44e81c5 SS |
53 | current_test = test; |
54 | KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp)); | |
55 | target(rand1); | |
8c1c9356 | 56 | unregister_kprobe(&kp); |
e44e81c5 SS |
57 | KUNIT_EXPECT_NE(test, 0, preh_val); |
58 | KUNIT_EXPECT_NE(test, 0, posth_val); | |
8c1c9356 AM |
59 | } |
60 | ||
12da3b88 MH |
61 | static noinline u32 kprobe_target2(u32 value) |
62 | { | |
63 | return (value / div_factor) + 1; | |
64 | } | |
65 | ||
1f6d3a8f MH |
66 | static noinline unsigned long kprobe_stacktrace_internal_target(void) |
67 | { | |
68 | if (!target_return_address[0]) | |
69 | target_return_address[0] = (unsigned long)__builtin_return_address(0); | |
70 | return target_return_address[0]; | |
71 | } | |
72 | ||
73 | static noinline unsigned long kprobe_stacktrace_target(void) | |
74 | { | |
75 | if (!target_return_address[1]) | |
76 | target_return_address[1] = (unsigned long)__builtin_return_address(0); | |
77 | ||
78 | if (internal_target) | |
79 | internal_target(); | |
80 | ||
81 | return target_return_address[1]; | |
82 | } | |
83 | ||
84 | static noinline unsigned long kprobe_stacktrace_driver(void) | |
85 | { | |
86 | if (stacktrace_target) | |
87 | stacktrace_target(); | |
88 | ||
89 | /* This is for preventing inlining the function */ | |
90 | return (unsigned long)__builtin_return_address(0); | |
91 | } | |
92 | ||
12da3b88 MH |
93 | static int kp_pre_handler2(struct kprobe *p, struct pt_regs *regs) |
94 | { | |
95 | preh_val = (rand1 / div_factor) + 1; | |
96 | return 0; | |
97 | } | |
98 | ||
99 | static void kp_post_handler2(struct kprobe *p, struct pt_regs *regs, | |
100 | unsigned long flags) | |
101 | { | |
e44e81c5 | 102 | KUNIT_EXPECT_EQ(current_test, preh_val, (rand1 / div_factor) + 1); |
12da3b88 MH |
103 | posth_val = preh_val + div_factor; |
104 | } | |
105 | ||
106 | static struct kprobe kp2 = { | |
107 | .symbol_name = "kprobe_target2", | |
108 | .pre_handler = kp_pre_handler2, | |
109 | .post_handler = kp_post_handler2 | |
110 | }; | |
111 | ||
e44e81c5 | 112 | static void test_kprobes(struct kunit *test) |
12da3b88 | 113 | { |
12da3b88 MH |
114 | struct kprobe *kps[2] = {&kp, &kp2}; |
115 | ||
e44e81c5 SS |
116 | current_test = test; |
117 | ||
fd02e6f7 MH |
118 | /* addr and flags should be cleard for reusing kprobe. */ |
119 | kp.addr = NULL; | |
120 | kp.flags = 0; | |
12da3b88 | 121 | |
e44e81c5 | 122 | KUNIT_EXPECT_EQ(test, 0, register_kprobes(kps, 2)); |
12da3b88 MH |
123 | preh_val = 0; |
124 | posth_val = 0; | |
e44e81c5 | 125 | target(rand1); |
12da3b88 | 126 | |
e44e81c5 SS |
127 | KUNIT_EXPECT_NE(test, 0, preh_val); |
128 | KUNIT_EXPECT_NE(test, 0, posth_val); | |
12da3b88 MH |
129 | |
130 | preh_val = 0; | |
131 | posth_val = 0; | |
e44e81c5 | 132 | target2(rand1); |
12da3b88 | 133 | |
e44e81c5 SS |
134 | KUNIT_EXPECT_NE(test, 0, preh_val); |
135 | KUNIT_EXPECT_NE(test, 0, posth_val); | |
12da3b88 | 136 | unregister_kprobes(kps, 2); |
12da3b88 MH |
137 | } |
138 | ||
8c1c9356 AM |
139 | #ifdef CONFIG_KRETPROBES |
140 | static u32 krph_val; | |
141 | ||
f47cd9b5 AS |
142 | static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs) |
143 | { | |
e44e81c5 | 144 | KUNIT_EXPECT_FALSE(current_test, preemptible()); |
f47cd9b5 AS |
145 | krph_val = (rand1 / div_factor); |
146 | return 0; | |
147 | } | |
148 | ||
8c1c9356 AM |
149 | static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs) |
150 | { | |
151 | unsigned long ret = regs_return_value(regs); | |
152 | ||
e44e81c5 SS |
153 | KUNIT_EXPECT_FALSE(current_test, preemptible()); |
154 | KUNIT_EXPECT_EQ(current_test, ret, rand1 / div_factor); | |
155 | KUNIT_EXPECT_NE(current_test, krph_val, 0); | |
f47cd9b5 | 156 | krph_val = rand1; |
8c1c9356 AM |
157 | return 0; |
158 | } | |
159 | ||
160 | static struct kretprobe rp = { | |
161 | .handler = return_handler, | |
f47cd9b5 | 162 | .entry_handler = entry_handler, |
8c1c9356 AM |
163 | .kp.symbol_name = "kprobe_target" |
164 | }; | |
165 | ||
e44e81c5 | 166 | static void test_kretprobe(struct kunit *test) |
8c1c9356 | 167 | { |
e44e81c5 SS |
168 | current_test = test; |
169 | KUNIT_EXPECT_EQ(test, 0, register_kretprobe(&rp)); | |
170 | target(rand1); | |
8c1c9356 | 171 | unregister_kretprobe(&rp); |
e44e81c5 | 172 | KUNIT_EXPECT_EQ(test, krph_val, rand1); |
8c1c9356 | 173 | } |
12da3b88 MH |
174 | |
175 | static int return_handler2(struct kretprobe_instance *ri, struct pt_regs *regs) | |
176 | { | |
177 | unsigned long ret = regs_return_value(regs); | |
178 | ||
e44e81c5 SS |
179 | KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor) + 1); |
180 | KUNIT_EXPECT_NE(current_test, krph_val, 0); | |
12da3b88 MH |
181 | krph_val = rand1; |
182 | return 0; | |
183 | } | |
184 | ||
185 | static struct kretprobe rp2 = { | |
186 | .handler = return_handler2, | |
187 | .entry_handler = entry_handler, | |
188 | .kp.symbol_name = "kprobe_target2" | |
189 | }; | |
190 | ||
e44e81c5 | 191 | static void test_kretprobes(struct kunit *test) |
12da3b88 | 192 | { |
12da3b88 MH |
193 | struct kretprobe *rps[2] = {&rp, &rp2}; |
194 | ||
e44e81c5 | 195 | current_test = test; |
fd02e6f7 MH |
196 | /* addr and flags should be cleard for reusing kprobe. */ |
197 | rp.kp.addr = NULL; | |
198 | rp.kp.flags = 0; | |
e44e81c5 | 199 | KUNIT_EXPECT_EQ(test, 0, register_kretprobes(rps, 2)); |
12da3b88 MH |
200 | |
201 | krph_val = 0; | |
e44e81c5 SS |
202 | target(rand1); |
203 | KUNIT_EXPECT_EQ(test, krph_val, rand1); | |
12da3b88 MH |
204 | |
205 | krph_val = 0; | |
e44e81c5 SS |
206 | target2(rand1); |
207 | KUNIT_EXPECT_EQ(test, krph_val, rand1); | |
12da3b88 | 208 | unregister_kretprobes(rps, 2); |
12da3b88 | 209 | } |
1f6d3a8f MH |
210 | |
211 | #ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE | |
212 | #define STACK_BUF_SIZE 16 | |
213 | static unsigned long stack_buf[STACK_BUF_SIZE]; | |
214 | ||
215 | static int stacktrace_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs) | |
216 | { | |
217 | unsigned long retval = regs_return_value(regs); | |
218 | int i, ret; | |
219 | ||
220 | KUNIT_EXPECT_FALSE(current_test, preemptible()); | |
221 | KUNIT_EXPECT_EQ(current_test, retval, target_return_address[1]); | |
222 | ||
223 | /* | |
224 | * Test stacktrace inside the kretprobe handler, this will involves | |
225 | * kretprobe trampoline, but must include correct return address | |
226 | * of the target function. | |
227 | */ | |
228 | ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0); | |
229 | KUNIT_EXPECT_NE(current_test, ret, 0); | |
230 | ||
231 | for (i = 0; i < ret; i++) { | |
232 | if (stack_buf[i] == target_return_address[1]) | |
233 | break; | |
234 | } | |
235 | KUNIT_EXPECT_NE(current_test, i, ret); | |
236 | ||
237 | #if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST) | |
238 | /* | |
239 | * Test stacktrace from pt_regs at the return address. Thus the stack | |
240 | * trace must start from the target return address. | |
241 | */ | |
242 | ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0); | |
243 | KUNIT_EXPECT_NE(current_test, ret, 0); | |
244 | KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[1]); | |
245 | #endif | |
246 | ||
247 | return 0; | |
248 | } | |
249 | ||
250 | static struct kretprobe rp3 = { | |
251 | .handler = stacktrace_return_handler, | |
252 | .kp.symbol_name = "kprobe_stacktrace_target" | |
253 | }; | |
254 | ||
255 | static void test_stacktrace_on_kretprobe(struct kunit *test) | |
256 | { | |
257 | unsigned long myretaddr = (unsigned long)__builtin_return_address(0); | |
258 | ||
259 | current_test = test; | |
260 | rp3.kp.addr = NULL; | |
261 | rp3.kp.flags = 0; | |
262 | ||
263 | /* | |
264 | * Run the stacktrace_driver() to record correct return address in | |
265 | * stacktrace_target() and ensure stacktrace_driver() call is not | |
266 | * inlined by checking the return address of stacktrace_driver() | |
267 | * and the return address of this function is different. | |
268 | */ | |
269 | KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver()); | |
270 | ||
271 | KUNIT_ASSERT_EQ(test, 0, register_kretprobe(&rp3)); | |
272 | KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver()); | |
273 | unregister_kretprobe(&rp3); | |
274 | } | |
275 | ||
276 | static int stacktrace_internal_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs) | |
277 | { | |
278 | unsigned long retval = regs_return_value(regs); | |
279 | int i, ret; | |
280 | ||
281 | KUNIT_EXPECT_FALSE(current_test, preemptible()); | |
282 | KUNIT_EXPECT_EQ(current_test, retval, target_return_address[0]); | |
283 | ||
284 | /* | |
285 | * Test stacktrace inside the kretprobe handler for nested case. | |
286 | * The unwinder will find the kretprobe_trampoline address on the | |
287 | * return address, and kretprobe must solve that. | |
288 | */ | |
289 | ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0); | |
290 | KUNIT_EXPECT_NE(current_test, ret, 0); | |
291 | ||
292 | for (i = 0; i < ret - 1; i++) { | |
293 | if (stack_buf[i] == target_return_address[0]) { | |
294 | KUNIT_EXPECT_EQ(current_test, stack_buf[i + 1], target_return_address[1]); | |
295 | break; | |
296 | } | |
297 | } | |
298 | KUNIT_EXPECT_NE(current_test, i, ret); | |
299 | ||
300 | #if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST) | |
301 | /* Ditto for the regs version. */ | |
302 | ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0); | |
303 | KUNIT_EXPECT_NE(current_test, ret, 0); | |
304 | KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[0]); | |
305 | KUNIT_EXPECT_EQ(current_test, stack_buf[1], target_return_address[1]); | |
306 | #endif | |
307 | ||
308 | return 0; | |
309 | } | |
310 | ||
311 | static struct kretprobe rp4 = { | |
312 | .handler = stacktrace_internal_return_handler, | |
313 | .kp.symbol_name = "kprobe_stacktrace_internal_target" | |
314 | }; | |
315 | ||
316 | static void test_stacktrace_on_nested_kretprobe(struct kunit *test) | |
317 | { | |
318 | unsigned long myretaddr = (unsigned long)__builtin_return_address(0); | |
319 | struct kretprobe *rps[2] = {&rp3, &rp4}; | |
320 | ||
321 | current_test = test; | |
322 | rp3.kp.addr = NULL; | |
323 | rp3.kp.flags = 0; | |
324 | ||
325 | //KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver()); | |
326 | ||
327 | KUNIT_ASSERT_EQ(test, 0, register_kretprobes(rps, 2)); | |
328 | KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver()); | |
329 | unregister_kretprobes(rps, 2); | |
330 | } | |
331 | #endif /* CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE */ | |
332 | ||
8c1c9356 AM |
333 | #endif /* CONFIG_KRETPROBES */ |
334 | ||
e44e81c5 | 335 | static int kprobes_test_init(struct kunit *test) |
8c1c9356 | 336 | { |
8e114405 | 337 | target = kprobe_target; |
12da3b88 | 338 | target2 = kprobe_target2; |
1f6d3a8f MH |
339 | stacktrace_target = kprobe_stacktrace_target; |
340 | internal_target = kprobe_stacktrace_internal_target; | |
341 | stacktrace_driver = kprobe_stacktrace_driver; | |
8e114405 | 342 | |
8c1c9356 | 343 | do { |
6d65df33 | 344 | rand1 = prandom_u32(); |
8c1c9356 | 345 | } while (rand1 <= div_factor); |
e44e81c5 SS |
346 | return 0; |
347 | } | |
8c1c9356 | 348 | |
e44e81c5 SS |
349 | static struct kunit_case kprobes_testcases[] = { |
350 | KUNIT_CASE(test_kprobe), | |
351 | KUNIT_CASE(test_kprobes), | |
8c1c9356 | 352 | #ifdef CONFIG_KRETPROBES |
e44e81c5 SS |
353 | KUNIT_CASE(test_kretprobe), |
354 | KUNIT_CASE(test_kretprobes), | |
1f6d3a8f MH |
355 | #ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE |
356 | KUNIT_CASE(test_stacktrace_on_kretprobe), | |
357 | KUNIT_CASE(test_stacktrace_on_nested_kretprobe), | |
358 | #endif | |
e44e81c5 SS |
359 | #endif |
360 | {} | |
361 | }; | |
8c1c9356 | 362 | |
e44e81c5 SS |
363 | static struct kunit_suite kprobes_test_suite = { |
364 | .name = "kprobes_test", | |
365 | .init = kprobes_test_init, | |
366 | .test_cases = kprobes_testcases, | |
367 | }; | |
8c1c9356 | 368 | |
e44e81c5 SS |
369 | kunit_test_suites(&kprobes_test_suite); |
370 | ||
371 | MODULE_LICENSE("GPL"); |