]>
Commit | Line | Data |
---|---|---|
1b29b018 SR |
1 | /* |
2 | * ring buffer based function tracer | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <[email protected]> | |
5 | * Copyright (C) 2008 Ingo Molnar <[email protected]> | |
6 | * | |
7 | * Based on code from the latency_tracer, that is: | |
8 | * | |
9 | * Copyright (C) 2004-2006 Ingo Molnar | |
6d49e352 | 10 | * Copyright (C) 2004 Nadia Yvette Chambers |
1b29b018 | 11 | */ |
23b4ff3a | 12 | #include <linux/ring_buffer.h> |
1b29b018 SR |
13 | #include <linux/debugfs.h> |
14 | #include <linux/uaccess.h> | |
15 | #include <linux/ftrace.h> | |
f20a5806 | 16 | #include <linux/slab.h> |
2e0f5761 | 17 | #include <linux/fs.h> |
1b29b018 SR |
18 | |
19 | #include "trace.h" | |
20 | ||
f20a5806 SRRH |
21 | static void tracing_start_function_trace(struct trace_array *tr); |
22 | static void tracing_stop_function_trace(struct trace_array *tr); | |
23 | static void | |
24 | function_trace_call(unsigned long ip, unsigned long parent_ip, | |
25 | struct ftrace_ops *op, struct pt_regs *pt_regs); | |
26 | static void | |
27 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | |
28 | struct ftrace_ops *op, struct pt_regs *pt_regs); | |
f20a5806 SRRH |
29 | static struct tracer_flags func_flags; |
30 | ||
31 | /* Our option */ | |
32 | enum { | |
33 | TRACE_FUNC_OPT_STACK = 0x1, | |
34 | }; | |
35 | ||
36 | static int allocate_ftrace_ops(struct trace_array *tr) | |
37 | { | |
38 | struct ftrace_ops *ops; | |
a225cdd2 | 39 | |
f20a5806 SRRH |
40 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); |
41 | if (!ops) | |
42 | return -ENOMEM; | |
53614991 | 43 | |
f20a5806 SRRH |
44 | /* Currently only the non stack verision is supported */ |
45 | ops->func = function_trace_call; | |
46 | ops->flags = FTRACE_OPS_FL_RECURSION_SAFE; | |
47 | ||
48 | tr->ops = ops; | |
49 | ops->private = tr; | |
50 | return 0; | |
51 | } | |
a225cdd2 | 52 | |
591dffda SRRH |
53 | |
54 | int ftrace_create_function_files(struct trace_array *tr, | |
55 | struct dentry *parent) | |
56 | { | |
57 | int ret; | |
58 | ||
5d6c97c5 SRRH |
59 | /* |
60 | * The top level array uses the "global_ops", and the files are | |
61 | * created on boot up. | |
62 | */ | |
63 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) | |
64 | return 0; | |
65 | ||
66 | ret = allocate_ftrace_ops(tr); | |
67 | if (ret) | |
68 | return ret; | |
591dffda SRRH |
69 | |
70 | ftrace_create_filter_files(tr->ops, parent); | |
71 | ||
72 | return 0; | |
73 | } | |
74 | ||
75 | void ftrace_destroy_function_files(struct trace_array *tr) | |
76 | { | |
77 | ftrace_destroy_filter_files(tr->ops); | |
78 | kfree(tr->ops); | |
79 | tr->ops = NULL; | |
80 | } | |
81 | ||
b6f11df2 | 82 | static int function_trace_init(struct trace_array *tr) |
1b29b018 | 83 | { |
4104d326 | 84 | ftrace_func_t func; |
f20a5806 | 85 | |
4104d326 SRRH |
86 | /* |
87 | * Instance trace_arrays get their ops allocated | |
88 | * at instance creation. Unless it failed | |
89 | * the allocation. | |
90 | */ | |
91 | if (!tr->ops) | |
591dffda | 92 | return -ENOMEM; |
4104d326 SRRH |
93 | |
94 | /* Currently only the global instance can do stack tracing */ | |
95 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL && | |
96 | func_flags.val & TRACE_FUNC_OPT_STACK) | |
97 | func = function_stack_trace_call; | |
98 | else | |
99 | func = function_trace_call; | |
100 | ||
101 | ftrace_init_array_ops(tr, func); | |
f20a5806 | 102 | |
12883efb | 103 | tr->trace_buffer.cpu = get_cpu(); |
26bc83f4 SR |
104 | put_cpu(); |
105 | ||
41bc8144 | 106 | tracing_start_cmdline_record(); |
f20a5806 | 107 | tracing_start_function_trace(tr); |
1c80025a | 108 | return 0; |
1b29b018 SR |
109 | } |
110 | ||
e309b41d | 111 | static void function_trace_reset(struct trace_array *tr) |
1b29b018 | 112 | { |
f20a5806 | 113 | tracing_stop_function_trace(tr); |
b6f11df2 | 114 | tracing_stop_cmdline_record(); |
4104d326 | 115 | ftrace_reset_array_ops(tr); |
1b29b018 SR |
116 | } |
117 | ||
9036990d SR |
118 | static void function_trace_start(struct trace_array *tr) |
119 | { | |
12883efb | 120 | tracing_reset_online_cpus(&tr->trace_buffer); |
9036990d SR |
121 | } |
122 | ||
bb3c3c95 | 123 | static void |
2f5f6ad9 | 124 | function_trace_call(unsigned long ip, unsigned long parent_ip, |
a1e2e31d | 125 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
bb3c3c95 | 126 | { |
f20a5806 | 127 | struct trace_array *tr = op->private; |
bb3c3c95 SR |
128 | struct trace_array_cpu *data; |
129 | unsigned long flags; | |
d41032a8 | 130 | int bit; |
bb3c3c95 SR |
131 | int cpu; |
132 | int pc; | |
133 | ||
f20a5806 | 134 | if (unlikely(!tr->function_enabled)) |
bb3c3c95 SR |
135 | return; |
136 | ||
897f68a4 SR |
137 | pc = preempt_count(); |
138 | preempt_disable_notrace(); | |
bb3c3c95 | 139 | |
897f68a4 SR |
140 | bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX); |
141 | if (bit < 0) | |
142 | goto out; | |
143 | ||
144 | cpu = smp_processor_id(); | |
12883efb | 145 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
897f68a4 SR |
146 | if (!atomic_read(&data->disabled)) { |
147 | local_save_flags(flags); | |
7be42151 | 148 | trace_function(tr, ip, parent_ip, flags, pc); |
bb3c3c95 | 149 | } |
897f68a4 | 150 | trace_clear_recursion(bit); |
bb3c3c95 | 151 | |
897f68a4 SR |
152 | out: |
153 | preempt_enable_notrace(); | |
bb3c3c95 SR |
154 | } |
155 | ||
53614991 | 156 | static void |
2f5f6ad9 | 157 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, |
a1e2e31d | 158 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
53614991 | 159 | { |
f20a5806 | 160 | struct trace_array *tr = op->private; |
53614991 SR |
161 | struct trace_array_cpu *data; |
162 | unsigned long flags; | |
163 | long disabled; | |
164 | int cpu; | |
165 | int pc; | |
166 | ||
f20a5806 | 167 | if (unlikely(!tr->function_enabled)) |
53614991 SR |
168 | return; |
169 | ||
170 | /* | |
171 | * Need to use raw, since this must be called before the | |
172 | * recursive protection is performed. | |
173 | */ | |
174 | local_irq_save(flags); | |
175 | cpu = raw_smp_processor_id(); | |
12883efb | 176 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
53614991 SR |
177 | disabled = atomic_inc_return(&data->disabled); |
178 | ||
179 | if (likely(disabled == 1)) { | |
180 | pc = preempt_count(); | |
7be42151 | 181 | trace_function(tr, ip, parent_ip, flags, pc); |
53614991 SR |
182 | /* |
183 | * skip over 5 funcs: | |
184 | * __ftrace_trace_stack, | |
185 | * __trace_stack, | |
186 | * function_stack_trace_call | |
187 | * ftrace_list_func | |
188 | * ftrace_call | |
189 | */ | |
7be42151 | 190 | __trace_stack(tr, flags, 5, pc); |
53614991 SR |
191 | } |
192 | ||
193 | atomic_dec(&data->disabled); | |
194 | local_irq_restore(flags); | |
195 | } | |
196 | ||
53614991 SR |
197 | static struct tracer_opt func_opts[] = { |
198 | #ifdef CONFIG_STACKTRACE | |
199 | { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, | |
200 | #endif | |
201 | { } /* Always set a last empty entry */ | |
202 | }; | |
203 | ||
204 | static struct tracer_flags func_flags = { | |
205 | .val = 0, /* By default: all flags disabled */ | |
206 | .opts = func_opts | |
207 | }; | |
208 | ||
f20a5806 | 209 | static void tracing_start_function_trace(struct trace_array *tr) |
3eb36aa0 | 210 | { |
f20a5806 SRRH |
211 | tr->function_enabled = 0; |
212 | register_ftrace_function(tr->ops); | |
213 | tr->function_enabled = 1; | |
3eb36aa0 SR |
214 | } |
215 | ||
f20a5806 | 216 | static void tracing_stop_function_trace(struct trace_array *tr) |
3eb36aa0 | 217 | { |
f20a5806 SRRH |
218 | tr->function_enabled = 0; |
219 | unregister_ftrace_function(tr->ops); | |
3eb36aa0 SR |
220 | } |
221 | ||
8c1a49ae SRRH |
222 | static int |
223 | func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | |
53614991 | 224 | { |
f555f123 AV |
225 | switch (bit) { |
226 | case TRACE_FUNC_OPT_STACK: | |
53614991 SR |
227 | /* do nothing if already set */ |
228 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) | |
f555f123 | 229 | break; |
53614991 | 230 | |
f20a5806 SRRH |
231 | unregister_ftrace_function(tr->ops); |
232 | ||
3eb36aa0 | 233 | if (set) { |
4104d326 | 234 | tr->ops->func = function_stack_trace_call; |
f20a5806 | 235 | register_ftrace_function(tr->ops); |
3eb36aa0 | 236 | } else { |
4104d326 | 237 | tr->ops->func = function_trace_call; |
f20a5806 | 238 | register_ftrace_function(tr->ops); |
3eb36aa0 | 239 | } |
53614991 | 240 | |
f555f123 AV |
241 | break; |
242 | default: | |
243 | return -EINVAL; | |
53614991 SR |
244 | } |
245 | ||
f555f123 | 246 | return 0; |
53614991 SR |
247 | } |
248 | ||
8f768993 | 249 | static struct tracer function_trace __tracer_data = |
1b29b018 | 250 | { |
3eb36aa0 SR |
251 | .name = "function", |
252 | .init = function_trace_init, | |
253 | .reset = function_trace_reset, | |
254 | .start = function_trace_start, | |
53614991 SR |
255 | .flags = &func_flags, |
256 | .set_flag = func_set_flag, | |
f20a5806 | 257 | .allow_instances = true, |
60a11774 | 258 | #ifdef CONFIG_FTRACE_SELFTEST |
3eb36aa0 | 259 | .selftest = trace_selftest_startup_function, |
60a11774 | 260 | #endif |
1b29b018 SR |
261 | }; |
262 | ||
23b4ff3a | 263 | #ifdef CONFIG_DYNAMIC_FTRACE |
0af26492 | 264 | static void update_traceon_count(void **data, bool on) |
23b4ff3a | 265 | { |
a9ce7c36 SRRH |
266 | long *count = (long *)data; |
267 | long old_count = *count; | |
23b4ff3a | 268 | |
a9ce7c36 SRRH |
269 | /* |
270 | * Tracing gets disabled (or enabled) once per count. | |
0af26492 | 271 | * This function can be called at the same time on multiple CPUs. |
a9ce7c36 SRRH |
272 | * It is fine if both disable (or enable) tracing, as disabling |
273 | * (or enabling) the second time doesn't do anything as the | |
274 | * state of the tracer is already disabled (or enabled). | |
275 | * What needs to be synchronized in this case is that the count | |
276 | * only gets decremented once, even if the tracer is disabled | |
277 | * (or enabled) twice, as the second one is really a nop. | |
278 | * | |
279 | * The memory barriers guarantee that we only decrement the | |
280 | * counter once. First the count is read to a local variable | |
281 | * and a read barrier is used to make sure that it is loaded | |
282 | * before checking if the tracer is in the state we want. | |
283 | * If the tracer is not in the state we want, then the count | |
284 | * is guaranteed to be the old count. | |
285 | * | |
286 | * Next the tracer is set to the state we want (disabled or enabled) | |
287 | * then a write memory barrier is used to make sure that | |
288 | * the new state is visible before changing the counter by | |
289 | * one minus the old counter. This guarantees that another CPU | |
290 | * executing this code will see the new state before seeing | |
0af26492 | 291 | * the new counter value, and would not do anything if the new |
a9ce7c36 SRRH |
292 | * counter is seen. |
293 | * | |
294 | * Note, there is no synchronization between this and a user | |
295 | * setting the tracing_on file. But we currently don't care | |
296 | * about that. | |
297 | */ | |
298 | if (!old_count) | |
299 | return; | |
23b4ff3a | 300 | |
a9ce7c36 SRRH |
301 | /* Make sure we see count before checking tracing state */ |
302 | smp_rmb(); | |
23b4ff3a | 303 | |
a9ce7c36 SRRH |
304 | if (on == !!tracing_is_on()) |
305 | return; | |
306 | ||
307 | if (on) | |
308 | tracing_on(); | |
309 | else | |
310 | tracing_off(); | |
311 | ||
312 | /* unlimited? */ | |
313 | if (old_count == -1) | |
314 | return; | |
315 | ||
316 | /* Make sure tracing state is visible before updating count */ | |
317 | smp_wmb(); | |
318 | ||
319 | *count = old_count - 1; | |
23b4ff3a SR |
320 | } |
321 | ||
322 | static void | |
8380d248 | 323 | ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data) |
23b4ff3a | 324 | { |
a9ce7c36 | 325 | update_traceon_count(data, 1); |
1c317143 | 326 | } |
23b4ff3a | 327 | |
1c317143 | 328 | static void |
8380d248 | 329 | ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data) |
1c317143 | 330 | { |
a9ce7c36 | 331 | update_traceon_count(data, 0); |
23b4ff3a SR |
332 | } |
333 | ||
8380d248 SRRH |
334 | static void |
335 | ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data) | |
336 | { | |
337 | if (tracing_is_on()) | |
338 | return; | |
339 | ||
340 | tracing_on(); | |
341 | } | |
342 | ||
343 | static void | |
344 | ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) | |
345 | { | |
346 | if (!tracing_is_on()) | |
347 | return; | |
348 | ||
349 | tracing_off(); | |
350 | } | |
351 | ||
dd42cd3e SRRH |
352 | /* |
353 | * Skip 4: | |
354 | * ftrace_stacktrace() | |
355 | * function_trace_probe_call() | |
356 | * ftrace_ops_list_func() | |
357 | * ftrace_call() | |
358 | */ | |
359 | #define STACK_SKIP 4 | |
360 | ||
361 | static void | |
362 | ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data) | |
363 | { | |
364 | trace_dump_stack(STACK_SKIP); | |
365 | } | |
366 | ||
367 | static void | |
368 | ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data) | |
369 | { | |
a9ce7c36 SRRH |
370 | long *count = (long *)data; |
371 | long old_count; | |
372 | long new_count; | |
373 | ||
374 | /* | |
375 | * Stack traces should only execute the number of times the | |
376 | * user specified in the counter. | |
377 | */ | |
378 | do { | |
379 | ||
380 | if (!tracing_is_on()) | |
381 | return; | |
dd42cd3e | 382 | |
a9ce7c36 SRRH |
383 | old_count = *count; |
384 | ||
385 | if (!old_count) | |
386 | return; | |
387 | ||
388 | /* unlimited? */ | |
389 | if (old_count == -1) { | |
390 | trace_dump_stack(STACK_SKIP); | |
391 | return; | |
392 | } | |
393 | ||
394 | new_count = old_count - 1; | |
395 | new_count = cmpxchg(count, old_count, new_count); | |
396 | if (new_count == old_count) | |
397 | trace_dump_stack(STACK_SKIP); | |
398 | ||
399 | } while (new_count != old_count); | |
400 | } | |
401 | ||
402 | static int update_count(void **data) | |
403 | { | |
404 | unsigned long *count = (long *)data; | |
405 | ||
406 | if (!*count) | |
407 | return 0; | |
408 | ||
409 | if (*count != -1) | |
410 | (*count)--; | |
411 | ||
412 | return 1; | |
dd42cd3e SRRH |
413 | } |
414 | ||
ad71d889 SRRH |
415 | static void |
416 | ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data) | |
417 | { | |
418 | if (update_count(data)) | |
419 | ftrace_dump(DUMP_ALL); | |
420 | } | |
421 | ||
90e3c03c SRRH |
422 | /* Only dump the current CPU buffer. */ |
423 | static void | |
424 | ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data) | |
425 | { | |
426 | if (update_count(data)) | |
427 | ftrace_dump(DUMP_ORIG); | |
428 | } | |
429 | ||
e110e3d1 | 430 | static int |
dd42cd3e SRRH |
431 | ftrace_probe_print(const char *name, struct seq_file *m, |
432 | unsigned long ip, void *data) | |
433 | { | |
434 | long count = (long)data; | |
435 | ||
436 | seq_printf(m, "%ps:%s", (void *)ip, name); | |
437 | ||
438 | if (count == -1) | |
fa6f0cc7 | 439 | seq_puts(m, ":unlimited\n"); |
dd42cd3e SRRH |
440 | else |
441 | seq_printf(m, ":count=%ld\n", count); | |
442 | ||
443 | return 0; | |
444 | } | |
445 | ||
446 | static int | |
447 | ftrace_traceon_print(struct seq_file *m, unsigned long ip, | |
448 | struct ftrace_probe_ops *ops, void *data) | |
449 | { | |
450 | return ftrace_probe_print("traceon", m, ip, data); | |
451 | } | |
452 | ||
453 | static int | |
454 | ftrace_traceoff_print(struct seq_file *m, unsigned long ip, | |
455 | struct ftrace_probe_ops *ops, void *data) | |
456 | { | |
457 | return ftrace_probe_print("traceoff", m, ip, data); | |
458 | } | |
459 | ||
460 | static int | |
461 | ftrace_stacktrace_print(struct seq_file *m, unsigned long ip, | |
462 | struct ftrace_probe_ops *ops, void *data) | |
463 | { | |
464 | return ftrace_probe_print("stacktrace", m, ip, data); | |
465 | } | |
e110e3d1 | 466 | |
ad71d889 SRRH |
467 | static int |
468 | ftrace_dump_print(struct seq_file *m, unsigned long ip, | |
469 | struct ftrace_probe_ops *ops, void *data) | |
470 | { | |
471 | return ftrace_probe_print("dump", m, ip, data); | |
472 | } | |
473 | ||
90e3c03c SRRH |
474 | static int |
475 | ftrace_cpudump_print(struct seq_file *m, unsigned long ip, | |
476 | struct ftrace_probe_ops *ops, void *data) | |
477 | { | |
478 | return ftrace_probe_print("cpudump", m, ip, data); | |
479 | } | |
480 | ||
8380d248 SRRH |
481 | static struct ftrace_probe_ops traceon_count_probe_ops = { |
482 | .func = ftrace_traceon_count, | |
dd42cd3e | 483 | .print = ftrace_traceon_print, |
8380d248 SRRH |
484 | }; |
485 | ||
486 | static struct ftrace_probe_ops traceoff_count_probe_ops = { | |
487 | .func = ftrace_traceoff_count, | |
dd42cd3e SRRH |
488 | .print = ftrace_traceoff_print, |
489 | }; | |
490 | ||
491 | static struct ftrace_probe_ops stacktrace_count_probe_ops = { | |
492 | .func = ftrace_stacktrace_count, | |
493 | .print = ftrace_stacktrace_print, | |
8380d248 SRRH |
494 | }; |
495 | ||
ad71d889 SRRH |
496 | static struct ftrace_probe_ops dump_probe_ops = { |
497 | .func = ftrace_dump_probe, | |
498 | .print = ftrace_dump_print, | |
499 | }; | |
500 | ||
90e3c03c SRRH |
501 | static struct ftrace_probe_ops cpudump_probe_ops = { |
502 | .func = ftrace_cpudump_probe, | |
503 | .print = ftrace_cpudump_print, | |
504 | }; | |
505 | ||
b6887d79 | 506 | static struct ftrace_probe_ops traceon_probe_ops = { |
23b4ff3a | 507 | .func = ftrace_traceon, |
dd42cd3e | 508 | .print = ftrace_traceon_print, |
23b4ff3a SR |
509 | }; |
510 | ||
b6887d79 | 511 | static struct ftrace_probe_ops traceoff_probe_ops = { |
23b4ff3a | 512 | .func = ftrace_traceoff, |
dd42cd3e | 513 | .print = ftrace_traceoff_print, |
23b4ff3a SR |
514 | }; |
515 | ||
dd42cd3e SRRH |
516 | static struct ftrace_probe_ops stacktrace_probe_ops = { |
517 | .func = ftrace_stacktrace, | |
518 | .print = ftrace_stacktrace_print, | |
519 | }; | |
e110e3d1 | 520 | |
23b4ff3a | 521 | static int |
dd42cd3e SRRH |
522 | ftrace_trace_probe_callback(struct ftrace_probe_ops *ops, |
523 | struct ftrace_hash *hash, char *glob, | |
524 | char *cmd, char *param, int enable) | |
23b4ff3a | 525 | { |
23b4ff3a SR |
526 | void *count = (void *)-1; |
527 | char *number; | |
528 | int ret; | |
529 | ||
530 | /* hash funcs only work with set_ftrace_filter */ | |
531 | if (!enable) | |
532 | return -EINVAL; | |
533 | ||
8b8fa62c SRRH |
534 | if (glob[0] == '!') { |
535 | unregister_ftrace_function_probe_func(glob+1, ops); | |
536 | return 0; | |
537 | } | |
538 | ||
23b4ff3a SR |
539 | if (!param) |
540 | goto out_reg; | |
541 | ||
542 | number = strsep(¶m, ":"); | |
543 | ||
544 | if (!strlen(number)) | |
545 | goto out_reg; | |
546 | ||
547 | /* | |
548 | * We use the callback data field (which is a pointer) | |
549 | * as our counter. | |
550 | */ | |
bcd83ea6 | 551 | ret = kstrtoul(number, 0, (unsigned long *)&count); |
23b4ff3a SR |
552 | if (ret) |
553 | return ret; | |
554 | ||
555 | out_reg: | |
b6887d79 | 556 | ret = register_ftrace_function_probe(glob, ops, count); |
23b4ff3a | 557 | |
04aef32d | 558 | return ret < 0 ? ret : 0; |
23b4ff3a SR |
559 | } |
560 | ||
dd42cd3e SRRH |
561 | static int |
562 | ftrace_trace_onoff_callback(struct ftrace_hash *hash, | |
563 | char *glob, char *cmd, char *param, int enable) | |
564 | { | |
565 | struct ftrace_probe_ops *ops; | |
566 | ||
567 | /* we register both traceon and traceoff to this callback */ | |
568 | if (strcmp(cmd, "traceon") == 0) | |
569 | ops = param ? &traceon_count_probe_ops : &traceon_probe_ops; | |
570 | else | |
571 | ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops; | |
572 | ||
573 | return ftrace_trace_probe_callback(ops, hash, glob, cmd, | |
574 | param, enable); | |
575 | } | |
576 | ||
577 | static int | |
578 | ftrace_stacktrace_callback(struct ftrace_hash *hash, | |
579 | char *glob, char *cmd, char *param, int enable) | |
580 | { | |
581 | struct ftrace_probe_ops *ops; | |
582 | ||
583 | ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops; | |
584 | ||
585 | return ftrace_trace_probe_callback(ops, hash, glob, cmd, | |
586 | param, enable); | |
587 | } | |
588 | ||
ad71d889 SRRH |
589 | static int |
590 | ftrace_dump_callback(struct ftrace_hash *hash, | |
591 | char *glob, char *cmd, char *param, int enable) | |
592 | { | |
593 | struct ftrace_probe_ops *ops; | |
594 | ||
595 | ops = &dump_probe_ops; | |
596 | ||
597 | /* Only dump once. */ | |
598 | return ftrace_trace_probe_callback(ops, hash, glob, cmd, | |
599 | "1", enable); | |
600 | } | |
601 | ||
90e3c03c SRRH |
602 | static int |
603 | ftrace_cpudump_callback(struct ftrace_hash *hash, | |
604 | char *glob, char *cmd, char *param, int enable) | |
605 | { | |
606 | struct ftrace_probe_ops *ops; | |
607 | ||
608 | ops = &cpudump_probe_ops; | |
609 | ||
610 | /* Only dump once. */ | |
611 | return ftrace_trace_probe_callback(ops, hash, glob, cmd, | |
612 | "1", enable); | |
613 | } | |
614 | ||
23b4ff3a SR |
615 | static struct ftrace_func_command ftrace_traceon_cmd = { |
616 | .name = "traceon", | |
617 | .func = ftrace_trace_onoff_callback, | |
618 | }; | |
619 | ||
620 | static struct ftrace_func_command ftrace_traceoff_cmd = { | |
621 | .name = "traceoff", | |
622 | .func = ftrace_trace_onoff_callback, | |
623 | }; | |
624 | ||
dd42cd3e SRRH |
625 | static struct ftrace_func_command ftrace_stacktrace_cmd = { |
626 | .name = "stacktrace", | |
627 | .func = ftrace_stacktrace_callback, | |
628 | }; | |
629 | ||
ad71d889 SRRH |
630 | static struct ftrace_func_command ftrace_dump_cmd = { |
631 | .name = "dump", | |
632 | .func = ftrace_dump_callback, | |
633 | }; | |
634 | ||
90e3c03c SRRH |
635 | static struct ftrace_func_command ftrace_cpudump_cmd = { |
636 | .name = "cpudump", | |
637 | .func = ftrace_cpudump_callback, | |
638 | }; | |
639 | ||
23b4ff3a SR |
640 | static int __init init_func_cmd_traceon(void) |
641 | { | |
642 | int ret; | |
643 | ||
644 | ret = register_ftrace_command(&ftrace_traceoff_cmd); | |
645 | if (ret) | |
646 | return ret; | |
647 | ||
648 | ret = register_ftrace_command(&ftrace_traceon_cmd); | |
649 | if (ret) | |
ad71d889 | 650 | goto out_free_traceoff; |
dd42cd3e SRRH |
651 | |
652 | ret = register_ftrace_command(&ftrace_stacktrace_cmd); | |
ad71d889 SRRH |
653 | if (ret) |
654 | goto out_free_traceon; | |
655 | ||
656 | ret = register_ftrace_command(&ftrace_dump_cmd); | |
657 | if (ret) | |
658 | goto out_free_stacktrace; | |
659 | ||
90e3c03c SRRH |
660 | ret = register_ftrace_command(&ftrace_cpudump_cmd); |
661 | if (ret) | |
662 | goto out_free_dump; | |
663 | ||
ad71d889 SRRH |
664 | return 0; |
665 | ||
90e3c03c SRRH |
666 | out_free_dump: |
667 | unregister_ftrace_command(&ftrace_dump_cmd); | |
ad71d889 SRRH |
668 | out_free_stacktrace: |
669 | unregister_ftrace_command(&ftrace_stacktrace_cmd); | |
670 | out_free_traceon: | |
671 | unregister_ftrace_command(&ftrace_traceon_cmd); | |
672 | out_free_traceoff: | |
673 | unregister_ftrace_command(&ftrace_traceoff_cmd); | |
674 | ||
23b4ff3a SR |
675 | return ret; |
676 | } | |
677 | #else | |
678 | static inline int init_func_cmd_traceon(void) | |
679 | { | |
680 | return 0; | |
681 | } | |
682 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
683 | ||
1b29b018 SR |
684 | static __init int init_function_trace(void) |
685 | { | |
23b4ff3a | 686 | init_func_cmd_traceon(); |
1b29b018 SR |
687 | return register_tracer(&function_trace); |
688 | } | |
6f415672 | 689 | core_initcall(init_function_trace); |