Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
60a11774 SR |
2 | /* Include in trace.c */ |
3 | ||
ae7e81c0 | 4 | #include <uapi/linux/sched/types.h> |
9cc26a26 | 5 | #include <linux/stringify.h> |
60a11774 | 6 | #include <linux/kthread.h> |
c7aafc54 | 7 | #include <linux/delay.h> |
5a0e3ad6 | 8 | #include <linux/slab.h> |
60a11774 | 9 | |
e309b41d | 10 | static inline int trace_valid_entry(struct trace_entry *entry) |
60a11774 SR |
11 | { |
12 | switch (entry->type) { | |
13 | case TRACE_FN: | |
14 | case TRACE_CTX: | |
57422797 | 15 | case TRACE_WAKE: |
06fa75ab | 16 | case TRACE_STACK: |
dd0e545f | 17 | case TRACE_PRINT: |
80e5ea45 | 18 | case TRACE_BRANCH: |
7447dce9 FW |
19 | case TRACE_GRAPH_ENT: |
20 | case TRACE_GRAPH_RET: | |
60a11774 SR |
21 | return 1; |
22 | } | |
23 | return 0; | |
24 | } | |
25 | ||
1c5eb448 | 26 | static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu) |
60a11774 | 27 | { |
3928a8a2 SR |
28 | struct ring_buffer_event *event; |
29 | struct trace_entry *entry; | |
4b3e3d22 | 30 | unsigned int loops = 0; |
60a11774 | 31 | |
12883efb | 32 | while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) { |
3928a8a2 | 33 | entry = ring_buffer_event_data(event); |
60a11774 | 34 | |
4b3e3d22 SR |
35 | /* |
36 | * The ring buffer is a size of trace_buf_size, if | |
37 | * we loop more than the size, there's something wrong | |
38 | * with the ring buffer. | |
39 | */ | |
40 | if (loops++ > trace_buf_size) { | |
41 | printk(KERN_CONT ".. bad ring buffer "); | |
42 | goto failed; | |
43 | } | |
3928a8a2 | 44 | if (!trace_valid_entry(entry)) { |
c7aafc54 | 45 | printk(KERN_CONT ".. invalid entry %d ", |
3928a8a2 | 46 | entry->type); |
60a11774 SR |
47 | goto failed; |
48 | } | |
60a11774 | 49 | } |
60a11774 SR |
50 | return 0; |
51 | ||
52 | failed: | |
08bafa0e SR |
53 | /* disable tracing */ |
54 | tracing_disabled = 1; | |
60a11774 SR |
55 | printk(KERN_CONT ".. corrupted trace buffer .. "); |
56 | return -1; | |
57 | } | |
58 | ||
59 | /* | |
60 | * Test the trace buffer to see if all the elements | |
61 | * are still sane. | |
62 | */ | |
1c5eb448 | 63 | static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count) |
60a11774 | 64 | { |
30afdcb1 SR |
65 | unsigned long flags, cnt = 0; |
66 | int cpu, ret = 0; | |
60a11774 | 67 | |
30afdcb1 | 68 | /* Don't allow flipping of max traces now */ |
d51ad7ac | 69 | local_irq_save(flags); |
0b9b12c1 | 70 | arch_spin_lock(&buf->tr->max_lock); |
60a11774 | 71 | |
12883efb | 72 | cnt = ring_buffer_entries(buf->buffer); |
60a11774 | 73 | |
0c5119c1 SR |
74 | /* |
75 | * The trace_test_buffer_cpu runs a while loop to consume all data. | |
76 | * If the calling tracer is broken, and is constantly filling | |
77 | * the buffer, this will run forever, and hard lock the box. | |
78 | * We disable the ring buffer while we do this test to prevent | |
79 | * a hard lock up. | |
80 | */ | |
81 | tracing_off(); | |
3928a8a2 | 82 | for_each_possible_cpu(cpu) { |
12883efb | 83 | ret = trace_test_buffer_cpu(buf, cpu); |
60a11774 SR |
84 | if (ret) |
85 | break; | |
86 | } | |
0c5119c1 | 87 | tracing_on(); |
0b9b12c1 | 88 | arch_spin_unlock(&buf->tr->max_lock); |
d51ad7ac | 89 | local_irq_restore(flags); |
60a11774 SR |
90 | |
91 | if (count) | |
92 | *count = cnt; | |
93 | ||
94 | return ret; | |
95 | } | |
96 | ||
1c80025a FW |
97 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) |
98 | { | |
99 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | |
100 | trace->name, init_ret); | |
101 | } | |
606576ce | 102 | #ifdef CONFIG_FUNCTION_TRACER |
77a2b37d SR |
103 | |
104 | #ifdef CONFIG_DYNAMIC_FTRACE | |
105 | ||
95950c2e SR |
106 | static int trace_selftest_test_probe1_cnt; |
107 | static void trace_selftest_test_probe1_func(unsigned long ip, | |
2f5f6ad9 | 108 | unsigned long pip, |
a1e2e31d SR |
109 | struct ftrace_ops *op, |
110 | struct pt_regs *pt_regs) | |
95950c2e SR |
111 | { |
112 | trace_selftest_test_probe1_cnt++; | |
113 | } | |
114 | ||
115 | static int trace_selftest_test_probe2_cnt; | |
116 | static void trace_selftest_test_probe2_func(unsigned long ip, | |
2f5f6ad9 | 117 | unsigned long pip, |
a1e2e31d SR |
118 | struct ftrace_ops *op, |
119 | struct pt_regs *pt_regs) | |
95950c2e SR |
120 | { |
121 | trace_selftest_test_probe2_cnt++; | |
122 | } | |
123 | ||
124 | static int trace_selftest_test_probe3_cnt; | |
125 | static void trace_selftest_test_probe3_func(unsigned long ip, | |
2f5f6ad9 | 126 | unsigned long pip, |
a1e2e31d SR |
127 | struct ftrace_ops *op, |
128 | struct pt_regs *pt_regs) | |
95950c2e SR |
129 | { |
130 | trace_selftest_test_probe3_cnt++; | |
131 | } | |
132 | ||
133 | static int trace_selftest_test_global_cnt; | |
134 | static void trace_selftest_test_global_func(unsigned long ip, | |
2f5f6ad9 | 135 | unsigned long pip, |
a1e2e31d SR |
136 | struct ftrace_ops *op, |
137 | struct pt_regs *pt_regs) | |
95950c2e SR |
138 | { |
139 | trace_selftest_test_global_cnt++; | |
140 | } | |
141 | ||
142 | static int trace_selftest_test_dyn_cnt; | |
143 | static void trace_selftest_test_dyn_func(unsigned long ip, | |
2f5f6ad9 | 144 | unsigned long pip, |
a1e2e31d SR |
145 | struct ftrace_ops *op, |
146 | struct pt_regs *pt_regs) | |
95950c2e SR |
147 | { |
148 | trace_selftest_test_dyn_cnt++; | |
149 | } | |
150 | ||
151 | static struct ftrace_ops test_probe1 = { | |
152 | .func = trace_selftest_test_probe1_func, | |
153 | }; | |
154 | ||
155 | static struct ftrace_ops test_probe2 = { | |
156 | .func = trace_selftest_test_probe2_func, | |
157 | }; | |
158 | ||
159 | static struct ftrace_ops test_probe3 = { | |
160 | .func = trace_selftest_test_probe3_func, | |
161 | }; | |
162 | ||
95950c2e SR |
163 | static void print_counts(void) |
164 | { | |
165 | printk("(%d %d %d %d %d) ", | |
166 | trace_selftest_test_probe1_cnt, | |
167 | trace_selftest_test_probe2_cnt, | |
168 | trace_selftest_test_probe3_cnt, | |
169 | trace_selftest_test_global_cnt, | |
170 | trace_selftest_test_dyn_cnt); | |
171 | } | |
172 | ||
173 | static void reset_counts(void) | |
174 | { | |
175 | trace_selftest_test_probe1_cnt = 0; | |
176 | trace_selftest_test_probe2_cnt = 0; | |
177 | trace_selftest_test_probe3_cnt = 0; | |
178 | trace_selftest_test_global_cnt = 0; | |
179 | trace_selftest_test_dyn_cnt = 0; | |
180 | } | |
181 | ||
4104d326 | 182 | static int trace_selftest_ops(struct trace_array *tr, int cnt) |
95950c2e SR |
183 | { |
184 | int save_ftrace_enabled = ftrace_enabled; | |
185 | struct ftrace_ops *dyn_ops; | |
186 | char *func1_name; | |
187 | char *func2_name; | |
188 | int len1; | |
189 | int len2; | |
190 | int ret = -1; | |
191 | ||
192 | printk(KERN_CONT "PASSED\n"); | |
193 | pr_info("Testing dynamic ftrace ops #%d: ", cnt); | |
194 | ||
195 | ftrace_enabled = 1; | |
196 | reset_counts(); | |
197 | ||
198 | /* Handle PPC64 '.' name */ | |
199 | func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | |
200 | func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); | |
201 | len1 = strlen(func1_name); | |
202 | len2 = strlen(func2_name); | |
203 | ||
204 | /* | |
205 | * Probe 1 will trace function 1. | |
206 | * Probe 2 will trace function 2. | |
207 | * Probe 3 will trace functions 1 and 2. | |
208 | */ | |
209 | ftrace_set_filter(&test_probe1, func1_name, len1, 1); | |
210 | ftrace_set_filter(&test_probe2, func2_name, len2, 1); | |
211 | ftrace_set_filter(&test_probe3, func1_name, len1, 1); | |
212 | ftrace_set_filter(&test_probe3, func2_name, len2, 0); | |
213 | ||
214 | register_ftrace_function(&test_probe1); | |
215 | register_ftrace_function(&test_probe2); | |
216 | register_ftrace_function(&test_probe3); | |
4104d326 SRRH |
217 | /* First time we are running with main function */ |
218 | if (cnt > 1) { | |
219 | ftrace_init_array_ops(tr, trace_selftest_test_global_func); | |
220 | register_ftrace_function(tr->ops); | |
221 | } | |
95950c2e SR |
222 | |
223 | DYN_FTRACE_TEST_NAME(); | |
224 | ||
225 | print_counts(); | |
226 | ||
227 | if (trace_selftest_test_probe1_cnt != 1) | |
228 | goto out; | |
229 | if (trace_selftest_test_probe2_cnt != 0) | |
230 | goto out; | |
231 | if (trace_selftest_test_probe3_cnt != 1) | |
232 | goto out; | |
4104d326 SRRH |
233 | if (cnt > 1) { |
234 | if (trace_selftest_test_global_cnt == 0) | |
235 | goto out; | |
236 | } | |
95950c2e SR |
237 | |
238 | DYN_FTRACE_TEST_NAME2(); | |
239 | ||
240 | print_counts(); | |
241 | ||
242 | if (trace_selftest_test_probe1_cnt != 1) | |
243 | goto out; | |
244 | if (trace_selftest_test_probe2_cnt != 1) | |
245 | goto out; | |
246 | if (trace_selftest_test_probe3_cnt != 2) | |
247 | goto out; | |
248 | ||
249 | /* Add a dynamic probe */ | |
250 | dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); | |
251 | if (!dyn_ops) { | |
252 | printk("MEMORY ERROR "); | |
253 | goto out; | |
254 | } | |
255 | ||
256 | dyn_ops->func = trace_selftest_test_dyn_func; | |
257 | ||
258 | register_ftrace_function(dyn_ops); | |
259 | ||
260 | trace_selftest_test_global_cnt = 0; | |
261 | ||
262 | DYN_FTRACE_TEST_NAME(); | |
263 | ||
264 | print_counts(); | |
265 | ||
266 | if (trace_selftest_test_probe1_cnt != 2) | |
267 | goto out_free; | |
268 | if (trace_selftest_test_probe2_cnt != 1) | |
269 | goto out_free; | |
270 | if (trace_selftest_test_probe3_cnt != 3) | |
271 | goto out_free; | |
4104d326 SRRH |
272 | if (cnt > 1) { |
273 | if (trace_selftest_test_global_cnt == 0) | |
46320a6a | 274 | goto out_free; |
4104d326 | 275 | } |
95950c2e SR |
276 | if (trace_selftest_test_dyn_cnt == 0) |
277 | goto out_free; | |
278 | ||
279 | DYN_FTRACE_TEST_NAME2(); | |
280 | ||
281 | print_counts(); | |
282 | ||
283 | if (trace_selftest_test_probe1_cnt != 2) | |
284 | goto out_free; | |
285 | if (trace_selftest_test_probe2_cnt != 2) | |
286 | goto out_free; | |
287 | if (trace_selftest_test_probe3_cnt != 4) | |
288 | goto out_free; | |
289 | ||
290 | ret = 0; | |
291 | out_free: | |
292 | unregister_ftrace_function(dyn_ops); | |
293 | kfree(dyn_ops); | |
294 | ||
295 | out: | |
296 | /* Purposely unregister in the same order */ | |
297 | unregister_ftrace_function(&test_probe1); | |
298 | unregister_ftrace_function(&test_probe2); | |
299 | unregister_ftrace_function(&test_probe3); | |
4104d326 SRRH |
300 | if (cnt > 1) |
301 | unregister_ftrace_function(tr->ops); | |
302 | ftrace_reset_array_ops(tr); | |
95950c2e SR |
303 | |
304 | /* Make sure everything is off */ | |
305 | reset_counts(); | |
306 | DYN_FTRACE_TEST_NAME(); | |
307 | DYN_FTRACE_TEST_NAME(); | |
308 | ||
309 | if (trace_selftest_test_probe1_cnt || | |
310 | trace_selftest_test_probe2_cnt || | |
311 | trace_selftest_test_probe3_cnt || | |
312 | trace_selftest_test_global_cnt || | |
313 | trace_selftest_test_dyn_cnt) | |
314 | ret = -1; | |
315 | ||
316 | ftrace_enabled = save_ftrace_enabled; | |
317 | ||
318 | return ret; | |
319 | } | |
320 | ||
77a2b37d | 321 | /* Test dynamic code modification and ftrace filters */ |
ad1438a0 FF |
322 | static int trace_selftest_startup_dynamic_tracing(struct tracer *trace, |
323 | struct trace_array *tr, | |
324 | int (*func)(void)) | |
77a2b37d | 325 | { |
77a2b37d | 326 | int save_ftrace_enabled = ftrace_enabled; |
dd0e545f | 327 | unsigned long count; |
4e491d14 | 328 | char *func_name; |
dd0e545f | 329 | int ret; |
77a2b37d SR |
330 | |
331 | /* The ftrace test PASSED */ | |
332 | printk(KERN_CONT "PASSED\n"); | |
333 | pr_info("Testing dynamic ftrace: "); | |
334 | ||
335 | /* enable tracing, and record the filter function */ | |
336 | ftrace_enabled = 1; | |
77a2b37d SR |
337 | |
338 | /* passed in by parameter to fool gcc from optimizing */ | |
339 | func(); | |
340 | ||
4e491d14 | 341 | /* |
73d8b8bc | 342 | * Some archs *cough*PowerPC*cough* add characters to the |
4e491d14 | 343 | * start of the function names. We simply put a '*' to |
73d8b8bc | 344 | * accommodate them. |
4e491d14 | 345 | */ |
9cc26a26 | 346 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
4e491d14 | 347 | |
77a2b37d | 348 | /* filter only on our function */ |
936e074b | 349 | ftrace_set_global_filter(func_name, strlen(func_name), 1); |
77a2b37d SR |
350 | |
351 | /* enable tracing */ | |
b6f11df2 | 352 | ret = tracer_init(trace, tr); |
1c80025a FW |
353 | if (ret) { |
354 | warn_failed_init_tracer(trace, ret); | |
355 | goto out; | |
356 | } | |
dd0e545f | 357 | |
77a2b37d SR |
358 | /* Sleep for a 1/10 of a second */ |
359 | msleep(100); | |
360 | ||
361 | /* we should have nothing in the buffer */ | |
1c5eb448 | 362 | ret = trace_test_buffer(&tr->array_buffer, &count); |
77a2b37d SR |
363 | if (ret) |
364 | goto out; | |
365 | ||
366 | if (count) { | |
367 | ret = -1; | |
368 | printk(KERN_CONT ".. filter did not filter .. "); | |
369 | goto out; | |
370 | } | |
371 | ||
372 | /* call our function again */ | |
373 | func(); | |
374 | ||
375 | /* sleep again */ | |
376 | msleep(100); | |
377 | ||
378 | /* stop the tracing. */ | |
bbf5b1a0 | 379 | tracing_stop(); |
77a2b37d SR |
380 | ftrace_enabled = 0; |
381 | ||
382 | /* check the trace buffer */ | |
1c5eb448 | 383 | ret = trace_test_buffer(&tr->array_buffer, &count); |
3ddee63a SRRH |
384 | |
385 | ftrace_enabled = 1; | |
bbf5b1a0 | 386 | tracing_start(); |
77a2b37d SR |
387 | |
388 | /* we should only have one item */ | |
389 | if (!ret && count != 1) { | |
95950c2e | 390 | trace->reset(tr); |
06fa75ab | 391 | printk(KERN_CONT ".. filter failed count=%ld ..", count); |
77a2b37d SR |
392 | ret = -1; |
393 | goto out; | |
394 | } | |
bbf5b1a0 | 395 | |
95950c2e | 396 | /* Test the ops with global tracing running */ |
4104d326 | 397 | ret = trace_selftest_ops(tr, 1); |
95950c2e SR |
398 | trace->reset(tr); |
399 | ||
77a2b37d SR |
400 | out: |
401 | ftrace_enabled = save_ftrace_enabled; | |
77a2b37d SR |
402 | |
403 | /* Enable tracing on all functions again */ | |
936e074b | 404 | ftrace_set_global_filter(NULL, 0, 1); |
77a2b37d | 405 | |
95950c2e SR |
406 | /* Test the ops with global tracing off */ |
407 | if (!ret) | |
4104d326 | 408 | ret = trace_selftest_ops(tr, 2); |
95950c2e | 409 | |
77a2b37d SR |
410 | return ret; |
411 | } | |
ea701f11 SR |
412 | |
413 | static int trace_selftest_recursion_cnt; | |
414 | static void trace_selftest_test_recursion_func(unsigned long ip, | |
415 | unsigned long pip, | |
416 | struct ftrace_ops *op, | |
417 | struct pt_regs *pt_regs) | |
418 | { | |
419 | /* | |
420 | * This function is registered without the recursion safe flag. | |
421 | * The ftrace infrastructure should provide the recursion | |
422 | * protection. If not, this will crash the kernel! | |
423 | */ | |
9640388b SR |
424 | if (trace_selftest_recursion_cnt++ > 10) |
425 | return; | |
ea701f11 SR |
426 | DYN_FTRACE_TEST_NAME(); |
427 | } | |
428 | ||
429 | static void trace_selftest_test_recursion_safe_func(unsigned long ip, | |
430 | unsigned long pip, | |
431 | struct ftrace_ops *op, | |
432 | struct pt_regs *pt_regs) | |
433 | { | |
434 | /* | |
435 | * We said we would provide our own recursion. By calling | |
436 | * this function again, we should recurse back into this function | |
437 | * and count again. But this only happens if the arch supports | |
438 | * all of ftrace features and nothing else is using the function | |
439 | * tracing utility. | |
440 | */ | |
441 | if (trace_selftest_recursion_cnt++) | |
442 | return; | |
443 | DYN_FTRACE_TEST_NAME(); | |
444 | } | |
445 | ||
446 | static struct ftrace_ops test_rec_probe = { | |
447 | .func = trace_selftest_test_recursion_func, | |
a25d036d | 448 | .flags = FTRACE_OPS_FL_RECURSION, |
ea701f11 SR |
449 | }; |
450 | ||
451 | static struct ftrace_ops test_recsafe_probe = { | |
452 | .func = trace_selftest_test_recursion_safe_func, | |
ea701f11 SR |
453 | }; |
454 | ||
455 | static int | |
456 | trace_selftest_function_recursion(void) | |
457 | { | |
458 | int save_ftrace_enabled = ftrace_enabled; | |
ea701f11 SR |
459 | char *func_name; |
460 | int len; | |
461 | int ret; | |
ea701f11 SR |
462 | |
463 | /* The previous test PASSED */ | |
464 | pr_cont("PASSED\n"); | |
465 | pr_info("Testing ftrace recursion: "); | |
466 | ||
467 | ||
468 | /* enable tracing, and record the filter function */ | |
469 | ftrace_enabled = 1; | |
ea701f11 SR |
470 | |
471 | /* Handle PPC64 '.' name */ | |
472 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | |
473 | len = strlen(func_name); | |
474 | ||
475 | ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1); | |
476 | if (ret) { | |
477 | pr_cont("*Could not set filter* "); | |
478 | goto out; | |
479 | } | |
480 | ||
481 | ret = register_ftrace_function(&test_rec_probe); | |
482 | if (ret) { | |
483 | pr_cont("*could not register callback* "); | |
484 | goto out; | |
485 | } | |
486 | ||
487 | DYN_FTRACE_TEST_NAME(); | |
488 | ||
489 | unregister_ftrace_function(&test_rec_probe); | |
490 | ||
491 | ret = -1; | |
726b3d3f SRV |
492 | /* |
493 | * Recursion allows for transitions between context, | |
494 | * and may call the callback twice. | |
495 | */ | |
496 | if (trace_selftest_recursion_cnt != 1 && | |
497 | trace_selftest_recursion_cnt != 2) { | |
498 | pr_cont("*callback not called once (or twice) (%d)* ", | |
ea701f11 SR |
499 | trace_selftest_recursion_cnt); |
500 | goto out; | |
501 | } | |
502 | ||
503 | trace_selftest_recursion_cnt = 1; | |
504 | ||
505 | pr_cont("PASSED\n"); | |
506 | pr_info("Testing ftrace recursion safe: "); | |
507 | ||
508 | ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1); | |
509 | if (ret) { | |
510 | pr_cont("*Could not set filter* "); | |
511 | goto out; | |
512 | } | |
513 | ||
514 | ret = register_ftrace_function(&test_recsafe_probe); | |
515 | if (ret) { | |
516 | pr_cont("*could not register callback* "); | |
517 | goto out; | |
518 | } | |
519 | ||
520 | DYN_FTRACE_TEST_NAME(); | |
521 | ||
522 | unregister_ftrace_function(&test_recsafe_probe); | |
523 | ||
ea701f11 | 524 | ret = -1; |
05cbbf64 SR |
525 | if (trace_selftest_recursion_cnt != 2) { |
526 | pr_cont("*callback not called expected 2 times (%d)* ", | |
527 | trace_selftest_recursion_cnt); | |
ea701f11 SR |
528 | goto out; |
529 | } | |
530 | ||
531 | ret = 0; | |
532 | out: | |
533 | ftrace_enabled = save_ftrace_enabled; | |
ea701f11 SR |
534 | |
535 | return ret; | |
536 | } | |
77a2b37d SR |
537 | #else |
538 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | |
ea701f11 | 539 | # define trace_selftest_function_recursion() ({ 0; }) |
77a2b37d | 540 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
e9a22d1f | 541 | |
ad97772a SR |
542 | static enum { |
543 | TRACE_SELFTEST_REGS_START, | |
544 | TRACE_SELFTEST_REGS_FOUND, | |
545 | TRACE_SELFTEST_REGS_NOT_FOUND, | |
546 | } trace_selftest_regs_stat; | |
547 | ||
548 | static void trace_selftest_test_regs_func(unsigned long ip, | |
549 | unsigned long pip, | |
550 | struct ftrace_ops *op, | |
551 | struct pt_regs *pt_regs) | |
552 | { | |
553 | if (pt_regs) | |
554 | trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND; | |
555 | else | |
556 | trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND; | |
557 | } | |
558 | ||
559 | static struct ftrace_ops test_regs_probe = { | |
560 | .func = trace_selftest_test_regs_func, | |
a25d036d | 561 | .flags = FTRACE_OPS_FL_SAVE_REGS, |
ad97772a SR |
562 | }; |
563 | ||
564 | static int | |
565 | trace_selftest_function_regs(void) | |
566 | { | |
567 | int save_ftrace_enabled = ftrace_enabled; | |
ad97772a SR |
568 | char *func_name; |
569 | int len; | |
570 | int ret; | |
571 | int supported = 0; | |
572 | ||
06aeaaea | 573 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
ad97772a SR |
574 | supported = 1; |
575 | #endif | |
576 | ||
577 | /* The previous test PASSED */ | |
578 | pr_cont("PASSED\n"); | |
579 | pr_info("Testing ftrace regs%s: ", | |
580 | !supported ? "(no arch support)" : ""); | |
581 | ||
582 | /* enable tracing, and record the filter function */ | |
583 | ftrace_enabled = 1; | |
ad97772a SR |
584 | |
585 | /* Handle PPC64 '.' name */ | |
586 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | |
587 | len = strlen(func_name); | |
588 | ||
589 | ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1); | |
590 | /* | |
591 | * If DYNAMIC_FTRACE is not set, then we just trace all functions. | |
592 | * This test really doesn't care. | |
593 | */ | |
594 | if (ret && ret != -ENODEV) { | |
595 | pr_cont("*Could not set filter* "); | |
596 | goto out; | |
597 | } | |
598 | ||
599 | ret = register_ftrace_function(&test_regs_probe); | |
600 | /* | |
601 | * Now if the arch does not support passing regs, then this should | |
602 | * have failed. | |
603 | */ | |
604 | if (!supported) { | |
605 | if (!ret) { | |
606 | pr_cont("*registered save-regs without arch support* "); | |
607 | goto out; | |
608 | } | |
609 | test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED; | |
610 | ret = register_ftrace_function(&test_regs_probe); | |
611 | } | |
612 | if (ret) { | |
613 | pr_cont("*could not register callback* "); | |
614 | goto out; | |
615 | } | |
616 | ||
617 | ||
618 | DYN_FTRACE_TEST_NAME(); | |
619 | ||
620 | unregister_ftrace_function(&test_regs_probe); | |
621 | ||
622 | ret = -1; | |
623 | ||
624 | switch (trace_selftest_regs_stat) { | |
625 | case TRACE_SELFTEST_REGS_START: | |
626 | pr_cont("*callback never called* "); | |
627 | goto out; | |
628 | ||
629 | case TRACE_SELFTEST_REGS_FOUND: | |
630 | if (supported) | |
631 | break; | |
632 | pr_cont("*callback received regs without arch support* "); | |
633 | goto out; | |
634 | ||
635 | case TRACE_SELFTEST_REGS_NOT_FOUND: | |
636 | if (!supported) | |
637 | break; | |
638 | pr_cont("*callback received NULL regs* "); | |
639 | goto out; | |
640 | } | |
641 | ||
642 | ret = 0; | |
643 | out: | |
644 | ftrace_enabled = save_ftrace_enabled; | |
ad97772a SR |
645 | |
646 | return ret; | |
647 | } | |
648 | ||
60a11774 SR |
649 | /* |
650 | * Simple verification test of ftrace function tracer. | |
651 | * Enable ftrace, sleep 1/10 second, and then read the trace | |
652 | * buffer to see if all is in order. | |
653 | */ | |
f1ed7c74 | 654 | __init int |
60a11774 SR |
655 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) |
656 | { | |
77a2b37d | 657 | int save_ftrace_enabled = ftrace_enabled; |
dd0e545f SR |
658 | unsigned long count; |
659 | int ret; | |
60a11774 | 660 | |
f1ed7c74 SRRH |
661 | #ifdef CONFIG_DYNAMIC_FTRACE |
662 | if (ftrace_filter_param) { | |
663 | printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); | |
664 | return 0; | |
665 | } | |
666 | #endif | |
667 | ||
77a2b37d SR |
668 | /* make sure msleep has been recorded */ |
669 | msleep(1); | |
670 | ||
60a11774 | 671 | /* start the tracing */ |
c7aafc54 IM |
672 | ftrace_enabled = 1; |
673 | ||
b6f11df2 | 674 | ret = tracer_init(trace, tr); |
1c80025a FW |
675 | if (ret) { |
676 | warn_failed_init_tracer(trace, ret); | |
677 | goto out; | |
678 | } | |
679 | ||
60a11774 SR |
680 | /* Sleep for a 1/10 of a second */ |
681 | msleep(100); | |
682 | /* stop the tracing. */ | |
bbf5b1a0 | 683 | tracing_stop(); |
c7aafc54 IM |
684 | ftrace_enabled = 0; |
685 | ||
60a11774 | 686 | /* check the trace buffer */ |
1c5eb448 | 687 | ret = trace_test_buffer(&tr->array_buffer, &count); |
3ddee63a SRRH |
688 | |
689 | ftrace_enabled = 1; | |
60a11774 | 690 | trace->reset(tr); |
bbf5b1a0 | 691 | tracing_start(); |
60a11774 SR |
692 | |
693 | if (!ret && !count) { | |
694 | printk(KERN_CONT ".. no entries found .."); | |
695 | ret = -1; | |
77a2b37d | 696 | goto out; |
60a11774 SR |
697 | } |
698 | ||
77a2b37d SR |
699 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, |
700 | DYN_FTRACE_TEST_NAME); | |
ea701f11 SR |
701 | if (ret) |
702 | goto out; | |
77a2b37d | 703 | |
ea701f11 | 704 | ret = trace_selftest_function_recursion(); |
ad97772a SR |
705 | if (ret) |
706 | goto out; | |
707 | ||
708 | ret = trace_selftest_function_regs(); | |
77a2b37d SR |
709 | out: |
710 | ftrace_enabled = save_ftrace_enabled; | |
77a2b37d | 711 | |
4eebcc81 SR |
712 | /* kill ftrace totally if we failed */ |
713 | if (ret) | |
714 | ftrace_kill(); | |
715 | ||
60a11774 SR |
716 | return ret; |
717 | } | |
606576ce | 718 | #endif /* CONFIG_FUNCTION_TRACER */ |
60a11774 | 719 | |
7447dce9 FW |
720 | |
721 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
cf586b61 FW |
722 | |
723 | /* Maximum number of functions to trace before diagnosing a hang */ | |
724 | #define GRAPH_MAX_FUNC_TEST 100000000 | |
725 | ||
cf586b61 FW |
726 | static unsigned int graph_hang_thresh; |
727 | ||
728 | /* Wrap the real function entry probe to avoid possible hanging */ | |
729 | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | |
730 | { | |
731 | /* This is harmlessly racy, we want to approximately detect a hang */ | |
732 | if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { | |
733 | ftrace_graph_stop(); | |
734 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | |
7fe70b57 SRRH |
735 | if (ftrace_dump_on_oops) { |
736 | ftrace_dump(DUMP_ALL); | |
737 | /* ftrace_dump() disables tracing */ | |
738 | tracing_on(); | |
739 | } | |
cf586b61 FW |
740 | return 0; |
741 | } | |
742 | ||
743 | return trace_graph_entry(trace); | |
744 | } | |
745 | ||
688f7089 SRV |
746 | static struct fgraph_ops fgraph_ops __initdata = { |
747 | .entryfunc = &trace_graph_entry_watchdog, | |
748 | .retfunc = &trace_graph_return, | |
749 | }; | |
750 | ||
7447dce9 FW |
751 | /* |
752 | * Pretty much the same than for the function tracer from which the selftest | |
753 | * has been borrowed. | |
754 | */ | |
f1ed7c74 | 755 | __init int |
7447dce9 FW |
756 | trace_selftest_startup_function_graph(struct tracer *trace, |
757 | struct trace_array *tr) | |
758 | { | |
759 | int ret; | |
760 | unsigned long count; | |
761 | ||
f1ed7c74 SRRH |
762 | #ifdef CONFIG_DYNAMIC_FTRACE |
763 | if (ftrace_filter_param) { | |
764 | printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); | |
765 | return 0; | |
766 | } | |
767 | #endif | |
768 | ||
cf586b61 FW |
769 | /* |
770 | * Simulate the init() callback but we attach a watchdog callback | |
771 | * to detect and recover from possible hangs | |
772 | */ | |
1c5eb448 | 773 | tracing_reset_online_cpus(&tr->array_buffer); |
1a0799a8 | 774 | set_graph_array(tr); |
688f7089 | 775 | ret = register_ftrace_graph(&fgraph_ops); |
7447dce9 FW |
776 | if (ret) { |
777 | warn_failed_init_tracer(trace, ret); | |
778 | goto out; | |
779 | } | |
cf586b61 | 780 | tracing_start_cmdline_record(); |
7447dce9 FW |
781 | |
782 | /* Sleep for a 1/10 of a second */ | |
783 | msleep(100); | |
784 | ||
cf586b61 FW |
785 | /* Have we just recovered from a hang? */ |
786 | if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { | |
0cf53ff6 | 787 | tracing_selftest_disabled = true; |
cf586b61 FW |
788 | ret = -1; |
789 | goto out; | |
790 | } | |
791 | ||
7447dce9 FW |
792 | tracing_stop(); |
793 | ||
794 | /* check the trace buffer */ | |
1c5eb448 | 795 | ret = trace_test_buffer(&tr->array_buffer, &count); |
7447dce9 | 796 | |
52fde6e7 SRV |
797 | /* Need to also simulate the tr->reset to remove this fgraph_ops */ |
798 | tracing_stop_cmdline_record(); | |
799 | unregister_ftrace_graph(&fgraph_ops); | |
800 | ||
7447dce9 FW |
801 | tracing_start(); |
802 | ||
803 | if (!ret && !count) { | |
804 | printk(KERN_CONT ".. no entries found .."); | |
805 | ret = -1; | |
806 | goto out; | |
807 | } | |
808 | ||
809 | /* Don't test dynamic tracing, the function tracer already did */ | |
810 | ||
811 | out: | |
812 | /* Stop it if we failed */ | |
813 | if (ret) | |
814 | ftrace_graph_stop(); | |
815 | ||
816 | return ret; | |
817 | } | |
818 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
819 | ||
820 | ||
60a11774 SR |
821 | #ifdef CONFIG_IRQSOFF_TRACER |
822 | int | |
823 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |
824 | { | |
6d9b3fa5 | 825 | unsigned long save_max = tr->max_latency; |
60a11774 SR |
826 | unsigned long count; |
827 | int ret; | |
828 | ||
829 | /* start the tracing */ | |
b6f11df2 | 830 | ret = tracer_init(trace, tr); |
1c80025a FW |
831 | if (ret) { |
832 | warn_failed_init_tracer(trace, ret); | |
833 | return ret; | |
834 | } | |
835 | ||
60a11774 | 836 | /* reset the max latency */ |
6d9b3fa5 | 837 | tr->max_latency = 0; |
60a11774 SR |
838 | /* disable interrupts for a bit */ |
839 | local_irq_disable(); | |
840 | udelay(100); | |
841 | local_irq_enable(); | |
49036200 FW |
842 | |
843 | /* | |
844 | * Stop the tracer to avoid a warning subsequent | |
845 | * to buffer flipping failure because tracing_stop() | |
846 | * disables the tr and max buffers, making flipping impossible | |
847 | * in case of parallels max irqs off latencies. | |
848 | */ | |
849 | trace->stop(tr); | |
60a11774 | 850 | /* stop the tracing. */ |
bbf5b1a0 | 851 | tracing_stop(); |
60a11774 | 852 | /* check both trace buffers */ |
1c5eb448 | 853 | ret = trace_test_buffer(&tr->array_buffer, NULL); |
60a11774 | 854 | if (!ret) |
12883efb | 855 | ret = trace_test_buffer(&tr->max_buffer, &count); |
60a11774 | 856 | trace->reset(tr); |
bbf5b1a0 | 857 | tracing_start(); |
60a11774 SR |
858 | |
859 | if (!ret && !count) { | |
860 | printk(KERN_CONT ".. no entries found .."); | |
861 | ret = -1; | |
862 | } | |
863 | ||
6d9b3fa5 | 864 | tr->max_latency = save_max; |
60a11774 SR |
865 | |
866 | return ret; | |
867 | } | |
868 | #endif /* CONFIG_IRQSOFF_TRACER */ | |
869 | ||
870 | #ifdef CONFIG_PREEMPT_TRACER | |
871 | int | |
872 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |
873 | { | |
6d9b3fa5 | 874 | unsigned long save_max = tr->max_latency; |
60a11774 SR |
875 | unsigned long count; |
876 | int ret; | |
877 | ||
769c48eb SR |
878 | /* |
879 | * Now that the big kernel lock is no longer preemptable, | |
880 | * and this is called with the BKL held, it will always | |
881 | * fail. If preemption is already disabled, simply | |
882 | * pass the test. When the BKL is removed, or becomes | |
883 | * preemptible again, we will once again test this, | |
884 | * so keep it in. | |
885 | */ | |
886 | if (preempt_count()) { | |
887 | printk(KERN_CONT "can not test ... force "); | |
888 | return 0; | |
889 | } | |
890 | ||
60a11774 | 891 | /* start the tracing */ |
b6f11df2 | 892 | ret = tracer_init(trace, tr); |
1c80025a FW |
893 | if (ret) { |
894 | warn_failed_init_tracer(trace, ret); | |
895 | return ret; | |
896 | } | |
897 | ||
60a11774 | 898 | /* reset the max latency */ |
6d9b3fa5 | 899 | tr->max_latency = 0; |
60a11774 SR |
900 | /* disable preemption for a bit */ |
901 | preempt_disable(); | |
902 | udelay(100); | |
903 | preempt_enable(); | |
49036200 FW |
904 | |
905 | /* | |
906 | * Stop the tracer to avoid a warning subsequent | |
907 | * to buffer flipping failure because tracing_stop() | |
908 | * disables the tr and max buffers, making flipping impossible | |
909 | * in case of parallels max preempt off latencies. | |
910 | */ | |
911 | trace->stop(tr); | |
60a11774 | 912 | /* stop the tracing. */ |
bbf5b1a0 | 913 | tracing_stop(); |
60a11774 | 914 | /* check both trace buffers */ |
1c5eb448 | 915 | ret = trace_test_buffer(&tr->array_buffer, NULL); |
60a11774 | 916 | if (!ret) |
12883efb | 917 | ret = trace_test_buffer(&tr->max_buffer, &count); |
60a11774 | 918 | trace->reset(tr); |
bbf5b1a0 | 919 | tracing_start(); |
60a11774 SR |
920 | |
921 | if (!ret && !count) { | |
922 | printk(KERN_CONT ".. no entries found .."); | |
923 | ret = -1; | |
924 | } | |
925 | ||
6d9b3fa5 | 926 | tr->max_latency = save_max; |
60a11774 SR |
927 | |
928 | return ret; | |
929 | } | |
930 | #endif /* CONFIG_PREEMPT_TRACER */ | |
931 | ||
932 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | |
933 | int | |
934 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | |
935 | { | |
6d9b3fa5 | 936 | unsigned long save_max = tr->max_latency; |
60a11774 SR |
937 | unsigned long count; |
938 | int ret; | |
939 | ||
769c48eb SR |
940 | /* |
941 | * Now that the big kernel lock is no longer preemptable, | |
942 | * and this is called with the BKL held, it will always | |
943 | * fail. If preemption is already disabled, simply | |
944 | * pass the test. When the BKL is removed, or becomes | |
945 | * preemptible again, we will once again test this, | |
946 | * so keep it in. | |
947 | */ | |
948 | if (preempt_count()) { | |
949 | printk(KERN_CONT "can not test ... force "); | |
950 | return 0; | |
951 | } | |
952 | ||
60a11774 | 953 | /* start the tracing */ |
b6f11df2 | 954 | ret = tracer_init(trace, tr); |
1c80025a FW |
955 | if (ret) { |
956 | warn_failed_init_tracer(trace, ret); | |
ac1d52d0 | 957 | goto out_no_start; |
1c80025a | 958 | } |
60a11774 SR |
959 | |
960 | /* reset the max latency */ | |
6d9b3fa5 | 961 | tr->max_latency = 0; |
60a11774 SR |
962 | |
963 | /* disable preemption and interrupts for a bit */ | |
964 | preempt_disable(); | |
965 | local_irq_disable(); | |
966 | udelay(100); | |
967 | preempt_enable(); | |
968 | /* reverse the order of preempt vs irqs */ | |
969 | local_irq_enable(); | |
970 | ||
49036200 FW |
971 | /* |
972 | * Stop the tracer to avoid a warning subsequent | |
973 | * to buffer flipping failure because tracing_stop() | |
974 | * disables the tr and max buffers, making flipping impossible | |
975 | * in case of parallels max irqs/preempt off latencies. | |
976 | */ | |
977 | trace->stop(tr); | |
60a11774 | 978 | /* stop the tracing. */ |
bbf5b1a0 | 979 | tracing_stop(); |
60a11774 | 980 | /* check both trace buffers */ |
1c5eb448 | 981 | ret = trace_test_buffer(&tr->array_buffer, NULL); |
ac1d52d0 | 982 | if (ret) |
60a11774 SR |
983 | goto out; |
984 | ||
12883efb | 985 | ret = trace_test_buffer(&tr->max_buffer, &count); |
ac1d52d0 | 986 | if (ret) |
60a11774 SR |
987 | goto out; |
988 | ||
989 | if (!ret && !count) { | |
990 | printk(KERN_CONT ".. no entries found .."); | |
991 | ret = -1; | |
992 | goto out; | |
993 | } | |
994 | ||
995 | /* do the test by disabling interrupts first this time */ | |
6d9b3fa5 | 996 | tr->max_latency = 0; |
bbf5b1a0 | 997 | tracing_start(); |
49036200 FW |
998 | trace->start(tr); |
999 | ||
60a11774 SR |
1000 | preempt_disable(); |
1001 | local_irq_disable(); | |
1002 | udelay(100); | |
1003 | preempt_enable(); | |
1004 | /* reverse the order of preempt vs irqs */ | |
1005 | local_irq_enable(); | |
1006 | ||
49036200 | 1007 | trace->stop(tr); |
60a11774 | 1008 | /* stop the tracing. */ |
bbf5b1a0 | 1009 | tracing_stop(); |
60a11774 | 1010 | /* check both trace buffers */ |
1c5eb448 | 1011 | ret = trace_test_buffer(&tr->array_buffer, NULL); |
60a11774 SR |
1012 | if (ret) |
1013 | goto out; | |
1014 | ||
12883efb | 1015 | ret = trace_test_buffer(&tr->max_buffer, &count); |
60a11774 SR |
1016 | |
1017 | if (!ret && !count) { | |
1018 | printk(KERN_CONT ".. no entries found .."); | |
1019 | ret = -1; | |
1020 | goto out; | |
1021 | } | |
1022 | ||
ac1d52d0 | 1023 | out: |
bbf5b1a0 | 1024 | tracing_start(); |
ac1d52d0 FW |
1025 | out_no_start: |
1026 | trace->reset(tr); | |
6d9b3fa5 | 1027 | tr->max_latency = save_max; |
60a11774 SR |
1028 | |
1029 | return ret; | |
1030 | } | |
1031 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | |
1032 | ||
fb1b6d8b SN |
1033 | #ifdef CONFIG_NOP_TRACER |
1034 | int | |
1035 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | |
1036 | { | |
1037 | /* What could possibly go wrong? */ | |
1038 | return 0; | |
1039 | } | |
1040 | #endif | |
1041 | ||
60a11774 | 1042 | #ifdef CONFIG_SCHED_TRACER |
addff1fe SR |
1043 | |
1044 | struct wakeup_test_data { | |
1045 | struct completion is_ready; | |
1046 | int go; | |
1047 | }; | |
1048 | ||
60a11774 SR |
1049 | static int trace_wakeup_test_thread(void *data) |
1050 | { | |
af6ace76 DF |
1051 | /* Make this a -deadline thread */ |
1052 | static const struct sched_attr attr = { | |
1053 | .sched_policy = SCHED_DEADLINE, | |
1054 | .sched_runtime = 100000ULL, | |
1055 | .sched_deadline = 10000000ULL, | |
1056 | .sched_period = 10000000ULL | |
1057 | }; | |
addff1fe | 1058 | struct wakeup_test_data *x = data; |
60a11774 | 1059 | |
af6ace76 | 1060 | sched_setattr(current, &attr); |
60a11774 SR |
1061 | |
1062 | /* Make it know we have a new prio */ | |
addff1fe | 1063 | complete(&x->is_ready); |
60a11774 SR |
1064 | |
1065 | /* now go to sleep and let the test wake us up */ | |
1066 | set_current_state(TASK_INTERRUPTIBLE); | |
addff1fe SR |
1067 | while (!x->go) { |
1068 | schedule(); | |
1069 | set_current_state(TASK_INTERRUPTIBLE); | |
1070 | } | |
60a11774 | 1071 | |
addff1fe SR |
1072 | complete(&x->is_ready); |
1073 | ||
1074 | set_current_state(TASK_INTERRUPTIBLE); | |
3c18c10b | 1075 | |
60a11774 SR |
1076 | /* we are awake, now wait to disappear */ |
1077 | while (!kthread_should_stop()) { | |
addff1fe SR |
1078 | schedule(); |
1079 | set_current_state(TASK_INTERRUPTIBLE); | |
60a11774 SR |
1080 | } |
1081 | ||
addff1fe SR |
1082 | __set_current_state(TASK_RUNNING); |
1083 | ||
60a11774 SR |
1084 | return 0; |
1085 | } | |
60a11774 SR |
1086 | int |
1087 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |
1088 | { | |
6d9b3fa5 | 1089 | unsigned long save_max = tr->max_latency; |
60a11774 | 1090 | struct task_struct *p; |
addff1fe | 1091 | struct wakeup_test_data data; |
60a11774 SR |
1092 | unsigned long count; |
1093 | int ret; | |
1094 | ||
addff1fe SR |
1095 | memset(&data, 0, sizeof(data)); |
1096 | ||
1097 | init_completion(&data.is_ready); | |
60a11774 | 1098 | |
af6ace76 | 1099 | /* create a -deadline thread */ |
addff1fe | 1100 | p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test"); |
c7aafc54 | 1101 | if (IS_ERR(p)) { |
60a11774 SR |
1102 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); |
1103 | return -1; | |
1104 | } | |
1105 | ||
af6ace76 | 1106 | /* make sure the thread is running at -deadline policy */ |
addff1fe | 1107 | wait_for_completion(&data.is_ready); |
60a11774 SR |
1108 | |
1109 | /* start the tracing */ | |
b6f11df2 | 1110 | ret = tracer_init(trace, tr); |
1c80025a FW |
1111 | if (ret) { |
1112 | warn_failed_init_tracer(trace, ret); | |
1113 | return ret; | |
1114 | } | |
1115 | ||
60a11774 | 1116 | /* reset the max latency */ |
6d9b3fa5 | 1117 | tr->max_latency = 0; |
60a11774 | 1118 | |
3c18c10b SR |
1119 | while (p->on_rq) { |
1120 | /* | |
af6ace76 | 1121 | * Sleep to make sure the -deadline thread is asleep too. |
3c18c10b SR |
1122 | * On virtual machines we can't rely on timings, |
1123 | * but we want to make sure this test still works. | |
1124 | */ | |
1125 | msleep(100); | |
1126 | } | |
60a11774 | 1127 | |
addff1fe SR |
1128 | init_completion(&data.is_ready); |
1129 | ||
1130 | data.go = 1; | |
1131 | /* memory barrier is in the wake_up_process() */ | |
60a11774 SR |
1132 | |
1133 | wake_up_process(p); | |
1134 | ||
3c18c10b | 1135 | /* Wait for the task to wake up */ |
addff1fe | 1136 | wait_for_completion(&data.is_ready); |
5aa60c60 | 1137 | |
60a11774 | 1138 | /* stop the tracing. */ |
bbf5b1a0 | 1139 | tracing_stop(); |
60a11774 | 1140 | /* check both trace buffers */ |
1c5eb448 | 1141 | ret = trace_test_buffer(&tr->array_buffer, NULL); |
60a11774 | 1142 | if (!ret) |
12883efb | 1143 | ret = trace_test_buffer(&tr->max_buffer, &count); |
60a11774 SR |
1144 | |
1145 | ||
1146 | trace->reset(tr); | |
bbf5b1a0 | 1147 | tracing_start(); |
60a11774 | 1148 | |
6d9b3fa5 | 1149 | tr->max_latency = save_max; |
60a11774 SR |
1150 | |
1151 | /* kill the thread */ | |
1152 | kthread_stop(p); | |
1153 | ||
1154 | if (!ret && !count) { | |
1155 | printk(KERN_CONT ".. no entries found .."); | |
1156 | ret = -1; | |
1157 | } | |
1158 | ||
1159 | return ret; | |
1160 | } | |
1161 | #endif /* CONFIG_SCHED_TRACER */ | |
1162 | ||
80e5ea45 SR |
1163 | #ifdef CONFIG_BRANCH_TRACER |
1164 | int | |
1165 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |
1166 | { | |
1167 | unsigned long count; | |
1168 | int ret; | |
1169 | ||
1170 | /* start the tracing */ | |
b6f11df2 | 1171 | ret = tracer_init(trace, tr); |
1c80025a FW |
1172 | if (ret) { |
1173 | warn_failed_init_tracer(trace, ret); | |
1174 | return ret; | |
1175 | } | |
1176 | ||
80e5ea45 SR |
1177 | /* Sleep for a 1/10 of a second */ |
1178 | msleep(100); | |
1179 | /* stop the tracing. */ | |
1180 | tracing_stop(); | |
1181 | /* check the trace buffer */ | |
1c5eb448 | 1182 | ret = trace_test_buffer(&tr->array_buffer, &count); |
80e5ea45 SR |
1183 | trace->reset(tr); |
1184 | tracing_start(); | |
1185 | ||
d2ef7c2f WH |
1186 | if (!ret && !count) { |
1187 | printk(KERN_CONT ".. no entries found .."); | |
1188 | ret = -1; | |
1189 | } | |
1190 | ||
80e5ea45 SR |
1191 | return ret; |
1192 | } | |
1193 | #endif /* CONFIG_BRANCH_TRACER */ | |
321bb5e1 | 1194 |