]>
Commit | Line | Data |
---|---|---|
60a11774 SR |
1 | /* Include in trace.c */ |
2 | ||
3 | #include <linux/kthread.h> | |
c7aafc54 | 4 | #include <linux/delay.h> |
60a11774 | 5 | |
e309b41d | 6 | static inline int trace_valid_entry(struct trace_entry *entry) |
60a11774 SR |
7 | { |
8 | switch (entry->type) { | |
9 | case TRACE_FN: | |
10 | case TRACE_CTX: | |
57422797 | 11 | case TRACE_WAKE: |
06fa75ab SR |
12 | case TRACE_STACK: |
13 | case TRACE_SPECIAL: | |
60a11774 SR |
14 | return 1; |
15 | } | |
16 | return 0; | |
17 | } | |
18 | ||
19 | static int | |
20 | trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data) | |
21 | { | |
60a11774 | 22 | struct trace_entry *entries; |
c7aafc54 | 23 | struct page *page; |
60a11774 SR |
24 | int idx = 0; |
25 | int i; | |
26 | ||
c7aafc54 | 27 | BUG_ON(list_empty(&data->trace_pages)); |
60a11774 SR |
28 | page = list_entry(data->trace_pages.next, struct page, lru); |
29 | entries = page_address(page); | |
30 | ||
c7aafc54 | 31 | if (head_page(data) != entries) |
60a11774 SR |
32 | goto failed; |
33 | ||
34 | /* | |
35 | * The starting trace buffer always has valid elements, | |
c7aafc54 | 36 | * if any element exists. |
60a11774 | 37 | */ |
c7aafc54 | 38 | entries = head_page(data); |
60a11774 SR |
39 | |
40 | for (i = 0; i < tr->entries; i++) { | |
41 | ||
c7aafc54 IM |
42 | if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) { |
43 | printk(KERN_CONT ".. invalid entry %d ", | |
44 | entries[idx].type); | |
60a11774 SR |
45 | goto failed; |
46 | } | |
47 | ||
48 | idx++; | |
49 | if (idx >= ENTRIES_PER_PAGE) { | |
50 | page = virt_to_page(entries); | |
51 | if (page->lru.next == &data->trace_pages) { | |
52 | if (i != tr->entries - 1) { | |
53 | printk(KERN_CONT ".. entries buffer mismatch"); | |
54 | goto failed; | |
55 | } | |
56 | } else { | |
57 | page = list_entry(page->lru.next, struct page, lru); | |
58 | entries = page_address(page); | |
59 | } | |
60 | idx = 0; | |
61 | } | |
62 | } | |
63 | ||
64 | page = virt_to_page(entries); | |
65 | if (page->lru.next != &data->trace_pages) { | |
66 | printk(KERN_CONT ".. too many entries"); | |
67 | goto failed; | |
68 | } | |
69 | ||
70 | return 0; | |
71 | ||
72 | failed: | |
08bafa0e SR |
73 | /* disable tracing */ |
74 | tracing_disabled = 1; | |
60a11774 SR |
75 | printk(KERN_CONT ".. corrupted trace buffer .. "); |
76 | return -1; | |
77 | } | |
78 | ||
79 | /* | |
80 | * Test the trace buffer to see if all the elements | |
81 | * are still sane. | |
82 | */ | |
83 | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |
84 | { | |
30afdcb1 SR |
85 | unsigned long flags, cnt = 0; |
86 | int cpu, ret = 0; | |
60a11774 | 87 | |
30afdcb1 SR |
88 | /* Don't allow flipping of max traces now */ |
89 | raw_local_irq_save(flags); | |
90 | __raw_spin_lock(&ftrace_max_lock); | |
60a11774 | 91 | for_each_possible_cpu(cpu) { |
c7aafc54 | 92 | if (!head_page(tr->data[cpu])) |
60a11774 SR |
93 | continue; |
94 | ||
95 | cnt += tr->data[cpu]->trace_idx; | |
60a11774 SR |
96 | |
97 | ret = trace_test_buffer_cpu(tr, tr->data[cpu]); | |
98 | if (ret) | |
99 | break; | |
100 | } | |
30afdcb1 SR |
101 | __raw_spin_unlock(&ftrace_max_lock); |
102 | raw_local_irq_restore(flags); | |
60a11774 SR |
103 | |
104 | if (count) | |
105 | *count = cnt; | |
106 | ||
107 | return ret; | |
108 | } | |
109 | ||
110 | #ifdef CONFIG_FTRACE | |
77a2b37d SR |
111 | |
112 | #ifdef CONFIG_DYNAMIC_FTRACE | |
113 | ||
77a2b37d SR |
114 | #define __STR(x) #x |
115 | #define STR(x) __STR(x) | |
77a2b37d SR |
116 | |
117 | /* Test dynamic code modification and ftrace filters */ | |
118 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |
119 | struct trace_array *tr, | |
120 | int (*func)(void)) | |
121 | { | |
122 | unsigned long count; | |
123 | int ret; | |
124 | int save_ftrace_enabled = ftrace_enabled; | |
125 | int save_tracer_enabled = tracer_enabled; | |
126 | ||
127 | /* The ftrace test PASSED */ | |
128 | printk(KERN_CONT "PASSED\n"); | |
129 | pr_info("Testing dynamic ftrace: "); | |
130 | ||
131 | /* enable tracing, and record the filter function */ | |
132 | ftrace_enabled = 1; | |
133 | tracer_enabled = 1; | |
134 | ||
135 | /* passed in by parameter to fool gcc from optimizing */ | |
136 | func(); | |
137 | ||
138 | /* update the records */ | |
139 | ret = ftrace_force_update(); | |
140 | if (ret) { | |
141 | printk(KERN_CONT ".. ftraced failed .. "); | |
142 | return ret; | |
143 | } | |
144 | ||
145 | /* filter only on our function */ | |
146 | ftrace_set_filter(STR(DYN_FTRACE_TEST_NAME), | |
147 | sizeof(STR(DYN_FTRACE_TEST_NAME)), 1); | |
148 | ||
149 | /* enable tracing */ | |
150 | tr->ctrl = 1; | |
151 | trace->init(tr); | |
152 | /* Sleep for a 1/10 of a second */ | |
153 | msleep(100); | |
154 | ||
155 | /* we should have nothing in the buffer */ | |
156 | ret = trace_test_buffer(tr, &count); | |
157 | if (ret) | |
158 | goto out; | |
159 | ||
160 | if (count) { | |
161 | ret = -1; | |
162 | printk(KERN_CONT ".. filter did not filter .. "); | |
163 | goto out; | |
164 | } | |
165 | ||
166 | /* call our function again */ | |
167 | func(); | |
168 | ||
169 | /* sleep again */ | |
170 | msleep(100); | |
171 | ||
172 | /* stop the tracing. */ | |
173 | tr->ctrl = 0; | |
174 | trace->ctrl_update(tr); | |
175 | ftrace_enabled = 0; | |
176 | ||
177 | /* check the trace buffer */ | |
178 | ret = trace_test_buffer(tr, &count); | |
179 | trace->reset(tr); | |
180 | ||
181 | /* we should only have one item */ | |
182 | if (!ret && count != 1) { | |
06fa75ab | 183 | printk(KERN_CONT ".. filter failed count=%ld ..", count); |
77a2b37d SR |
184 | ret = -1; |
185 | goto out; | |
186 | } | |
187 | out: | |
188 | ftrace_enabled = save_ftrace_enabled; | |
189 | tracer_enabled = save_tracer_enabled; | |
190 | ||
191 | /* Enable tracing on all functions again */ | |
192 | ftrace_set_filter(NULL, 0, 1); | |
193 | ||
194 | return ret; | |
195 | } | |
196 | #else | |
197 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | |
198 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
60a11774 SR |
199 | /* |
200 | * Simple verification test of ftrace function tracer. | |
201 | * Enable ftrace, sleep 1/10 second, and then read the trace | |
202 | * buffer to see if all is in order. | |
203 | */ | |
204 | int | |
205 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |
206 | { | |
207 | unsigned long count; | |
208 | int ret; | |
77a2b37d SR |
209 | int save_ftrace_enabled = ftrace_enabled; |
210 | int save_tracer_enabled = tracer_enabled; | |
60a11774 | 211 | |
77a2b37d SR |
212 | /* make sure msleep has been recorded */ |
213 | msleep(1); | |
214 | ||
215 | /* force the recorded functions to be traced */ | |
60a11774 SR |
216 | ret = ftrace_force_update(); |
217 | if (ret) { | |
218 | printk(KERN_CONT ".. ftraced failed .. "); | |
219 | return ret; | |
220 | } | |
221 | ||
222 | /* start the tracing */ | |
c7aafc54 | 223 | ftrace_enabled = 1; |
77a2b37d | 224 | tracer_enabled = 1; |
c7aafc54 | 225 | |
60a11774 SR |
226 | tr->ctrl = 1; |
227 | trace->init(tr); | |
228 | /* Sleep for a 1/10 of a second */ | |
229 | msleep(100); | |
230 | /* stop the tracing. */ | |
231 | tr->ctrl = 0; | |
232 | trace->ctrl_update(tr); | |
c7aafc54 IM |
233 | ftrace_enabled = 0; |
234 | ||
60a11774 SR |
235 | /* check the trace buffer */ |
236 | ret = trace_test_buffer(tr, &count); | |
237 | trace->reset(tr); | |
238 | ||
239 | if (!ret && !count) { | |
240 | printk(KERN_CONT ".. no entries found .."); | |
241 | ret = -1; | |
77a2b37d | 242 | goto out; |
60a11774 SR |
243 | } |
244 | ||
77a2b37d SR |
245 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, |
246 | DYN_FTRACE_TEST_NAME); | |
247 | ||
248 | out: | |
249 | ftrace_enabled = save_ftrace_enabled; | |
250 | tracer_enabled = save_tracer_enabled; | |
251 | ||
4eebcc81 SR |
252 | /* kill ftrace totally if we failed */ |
253 | if (ret) | |
254 | ftrace_kill(); | |
255 | ||
60a11774 SR |
256 | return ret; |
257 | } | |
258 | #endif /* CONFIG_FTRACE */ | |
259 | ||
260 | #ifdef CONFIG_IRQSOFF_TRACER | |
261 | int | |
262 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |
263 | { | |
264 | unsigned long save_max = tracing_max_latency; | |
265 | unsigned long count; | |
266 | int ret; | |
267 | ||
268 | /* start the tracing */ | |
269 | tr->ctrl = 1; | |
270 | trace->init(tr); | |
271 | /* reset the max latency */ | |
272 | tracing_max_latency = 0; | |
273 | /* disable interrupts for a bit */ | |
274 | local_irq_disable(); | |
275 | udelay(100); | |
276 | local_irq_enable(); | |
277 | /* stop the tracing. */ | |
278 | tr->ctrl = 0; | |
279 | trace->ctrl_update(tr); | |
280 | /* check both trace buffers */ | |
281 | ret = trace_test_buffer(tr, NULL); | |
282 | if (!ret) | |
283 | ret = trace_test_buffer(&max_tr, &count); | |
284 | trace->reset(tr); | |
285 | ||
286 | if (!ret && !count) { | |
287 | printk(KERN_CONT ".. no entries found .."); | |
288 | ret = -1; | |
289 | } | |
290 | ||
291 | tracing_max_latency = save_max; | |
292 | ||
293 | return ret; | |
294 | } | |
295 | #endif /* CONFIG_IRQSOFF_TRACER */ | |
296 | ||
297 | #ifdef CONFIG_PREEMPT_TRACER | |
298 | int | |
299 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |
300 | { | |
301 | unsigned long save_max = tracing_max_latency; | |
302 | unsigned long count; | |
303 | int ret; | |
304 | ||
305 | /* start the tracing */ | |
306 | tr->ctrl = 1; | |
307 | trace->init(tr); | |
308 | /* reset the max latency */ | |
309 | tracing_max_latency = 0; | |
310 | /* disable preemption for a bit */ | |
311 | preempt_disable(); | |
312 | udelay(100); | |
313 | preempt_enable(); | |
314 | /* stop the tracing. */ | |
315 | tr->ctrl = 0; | |
316 | trace->ctrl_update(tr); | |
317 | /* check both trace buffers */ | |
318 | ret = trace_test_buffer(tr, NULL); | |
319 | if (!ret) | |
320 | ret = trace_test_buffer(&max_tr, &count); | |
321 | trace->reset(tr); | |
322 | ||
323 | if (!ret && !count) { | |
324 | printk(KERN_CONT ".. no entries found .."); | |
325 | ret = -1; | |
326 | } | |
327 | ||
328 | tracing_max_latency = save_max; | |
329 | ||
330 | return ret; | |
331 | } | |
332 | #endif /* CONFIG_PREEMPT_TRACER */ | |
333 | ||
334 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | |
335 | int | |
336 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | |
337 | { | |
338 | unsigned long save_max = tracing_max_latency; | |
339 | unsigned long count; | |
340 | int ret; | |
341 | ||
342 | /* start the tracing */ | |
343 | tr->ctrl = 1; | |
344 | trace->init(tr); | |
345 | ||
346 | /* reset the max latency */ | |
347 | tracing_max_latency = 0; | |
348 | ||
349 | /* disable preemption and interrupts for a bit */ | |
350 | preempt_disable(); | |
351 | local_irq_disable(); | |
352 | udelay(100); | |
353 | preempt_enable(); | |
354 | /* reverse the order of preempt vs irqs */ | |
355 | local_irq_enable(); | |
356 | ||
357 | /* stop the tracing. */ | |
358 | tr->ctrl = 0; | |
359 | trace->ctrl_update(tr); | |
360 | /* check both trace buffers */ | |
361 | ret = trace_test_buffer(tr, NULL); | |
362 | if (ret) | |
363 | goto out; | |
364 | ||
365 | ret = trace_test_buffer(&max_tr, &count); | |
366 | if (ret) | |
367 | goto out; | |
368 | ||
369 | if (!ret && !count) { | |
370 | printk(KERN_CONT ".. no entries found .."); | |
371 | ret = -1; | |
372 | goto out; | |
373 | } | |
374 | ||
375 | /* do the test by disabling interrupts first this time */ | |
376 | tracing_max_latency = 0; | |
377 | tr->ctrl = 1; | |
378 | trace->ctrl_update(tr); | |
379 | preempt_disable(); | |
380 | local_irq_disable(); | |
381 | udelay(100); | |
382 | preempt_enable(); | |
383 | /* reverse the order of preempt vs irqs */ | |
384 | local_irq_enable(); | |
385 | ||
386 | /* stop the tracing. */ | |
387 | tr->ctrl = 0; | |
388 | trace->ctrl_update(tr); | |
389 | /* check both trace buffers */ | |
390 | ret = trace_test_buffer(tr, NULL); | |
391 | if (ret) | |
392 | goto out; | |
393 | ||
394 | ret = trace_test_buffer(&max_tr, &count); | |
395 | ||
396 | if (!ret && !count) { | |
397 | printk(KERN_CONT ".. no entries found .."); | |
398 | ret = -1; | |
399 | goto out; | |
400 | } | |
401 | ||
402 | out: | |
403 | trace->reset(tr); | |
404 | tracing_max_latency = save_max; | |
405 | ||
406 | return ret; | |
407 | } | |
408 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | |
409 | ||
410 | #ifdef CONFIG_SCHED_TRACER | |
411 | static int trace_wakeup_test_thread(void *data) | |
412 | { | |
60a11774 | 413 | /* Make this a RT thread, doesn't need to be too high */ |
05bd68c5 SR |
414 | struct sched_param param = { .sched_priority = 5 }; |
415 | struct completion *x = data; | |
60a11774 | 416 | |
05bd68c5 | 417 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
60a11774 SR |
418 | |
419 | /* Make it know we have a new prio */ | |
420 | complete(x); | |
421 | ||
422 | /* now go to sleep and let the test wake us up */ | |
423 | set_current_state(TASK_INTERRUPTIBLE); | |
424 | schedule(); | |
425 | ||
426 | /* we are awake, now wait to disappear */ | |
427 | while (!kthread_should_stop()) { | |
428 | /* | |
429 | * This is an RT task, do short sleeps to let | |
430 | * others run. | |
431 | */ | |
432 | msleep(100); | |
433 | } | |
434 | ||
435 | return 0; | |
436 | } | |
437 | ||
438 | int | |
439 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |
440 | { | |
441 | unsigned long save_max = tracing_max_latency; | |
442 | struct task_struct *p; | |
443 | struct completion isrt; | |
444 | unsigned long count; | |
445 | int ret; | |
446 | ||
447 | init_completion(&isrt); | |
448 | ||
449 | /* create a high prio thread */ | |
450 | p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | |
c7aafc54 | 451 | if (IS_ERR(p)) { |
60a11774 SR |
452 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); |
453 | return -1; | |
454 | } | |
455 | ||
456 | /* make sure the thread is running at an RT prio */ | |
457 | wait_for_completion(&isrt); | |
458 | ||
459 | /* start the tracing */ | |
460 | tr->ctrl = 1; | |
461 | trace->init(tr); | |
462 | /* reset the max latency */ | |
463 | tracing_max_latency = 0; | |
464 | ||
465 | /* sleep to let the RT thread sleep too */ | |
466 | msleep(100); | |
467 | ||
468 | /* | |
469 | * Yes this is slightly racy. It is possible that for some | |
470 | * strange reason that the RT thread we created, did not | |
471 | * call schedule for 100ms after doing the completion, | |
472 | * and we do a wakeup on a task that already is awake. | |
473 | * But that is extremely unlikely, and the worst thing that | |
474 | * happens in such a case, is that we disable tracing. | |
475 | * Honestly, if this race does happen something is horrible | |
476 | * wrong with the system. | |
477 | */ | |
478 | ||
479 | wake_up_process(p); | |
480 | ||
481 | /* stop the tracing. */ | |
482 | tr->ctrl = 0; | |
483 | trace->ctrl_update(tr); | |
484 | /* check both trace buffers */ | |
485 | ret = trace_test_buffer(tr, NULL); | |
486 | if (!ret) | |
487 | ret = trace_test_buffer(&max_tr, &count); | |
488 | ||
489 | ||
490 | trace->reset(tr); | |
491 | ||
492 | tracing_max_latency = save_max; | |
493 | ||
494 | /* kill the thread */ | |
495 | kthread_stop(p); | |
496 | ||
497 | if (!ret && !count) { | |
498 | printk(KERN_CONT ".. no entries found .."); | |
499 | ret = -1; | |
500 | } | |
501 | ||
502 | return ret; | |
503 | } | |
504 | #endif /* CONFIG_SCHED_TRACER */ | |
505 | ||
506 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | |
507 | int | |
508 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | |
509 | { | |
510 | unsigned long count; | |
511 | int ret; | |
512 | ||
513 | /* start the tracing */ | |
514 | tr->ctrl = 1; | |
515 | trace->init(tr); | |
516 | /* Sleep for a 1/10 of a second */ | |
517 | msleep(100); | |
518 | /* stop the tracing. */ | |
519 | tr->ctrl = 0; | |
520 | trace->ctrl_update(tr); | |
521 | /* check the trace buffer */ | |
522 | ret = trace_test_buffer(tr, &count); | |
523 | trace->reset(tr); | |
524 | ||
525 | if (!ret && !count) { | |
526 | printk(KERN_CONT ".. no entries found .."); | |
527 | ret = -1; | |
528 | } | |
529 | ||
530 | return ret; | |
531 | } | |
532 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |