]> Git Repo - linux.git/blame - kernel/trace/trace_functions.c
Merge tag 'for-5.10-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux.git] / kernel / trace / trace_functions.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1b29b018
SR
2/*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <[email protected]>
6 * Copyright (C) 2008 Ingo Molnar <[email protected]>
7 *
8 * Based on code from the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 11 * Copyright (C) 2004 Nadia Yvette Chambers
1b29b018 12 */
23b4ff3a 13#include <linux/ring_buffer.h>
1b29b018
SR
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/ftrace.h>
f20a5806 17#include <linux/slab.h>
2e0f5761 18#include <linux/fs.h>
1b29b018
SR
19
20#include "trace.h"
21
f20a5806
SRRH
22static void tracing_start_function_trace(struct trace_array *tr);
23static void tracing_stop_function_trace(struct trace_array *tr);
24static void
25function_trace_call(unsigned long ip, unsigned long parent_ip,
26 struct ftrace_ops *op, struct pt_regs *pt_regs);
27static void
28function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 struct ftrace_ops *op, struct pt_regs *pt_regs);
f20a5806
SRRH
30static struct tracer_flags func_flags;
31
32/* Our option */
33enum {
34 TRACE_FUNC_OPT_STACK = 0x1,
35};
36
4114fbfd 37int ftrace_allocate_ftrace_ops(struct trace_array *tr)
f20a5806
SRRH
38{
39 struct ftrace_ops *ops;
a225cdd2 40
4114fbfd
MH
41 /* The top level array uses the "global_ops" */
42 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
43 return 0;
44
f20a5806
SRRH
45 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
46 if (!ops)
47 return -ENOMEM;
53614991 48
48a42f5d 49 /* Currently only the non stack version is supported */
f20a5806 50 ops->func = function_trace_call;
345ddcc8 51 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
f20a5806
SRRH
52
53 tr->ops = ops;
54 ops->private = tr;
4114fbfd 55
f20a5806
SRRH
56 return 0;
57}
a225cdd2 58
4114fbfd
MH
59void ftrace_free_ftrace_ops(struct trace_array *tr)
60{
61 kfree(tr->ops);
62 tr->ops = NULL;
63}
591dffda
SRRH
64
65int ftrace_create_function_files(struct trace_array *tr,
66 struct dentry *parent)
67{
5d6c97c5
SRRH
68 /*
69 * The top level array uses the "global_ops", and the files are
70 * created on boot up.
71 */
72 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
73 return 0;
74
4114fbfd
MH
75 if (!tr->ops)
76 return -EINVAL;
591dffda
SRRH
77
78 ftrace_create_filter_files(tr->ops, parent);
79
80 return 0;
81}
82
83void ftrace_destroy_function_files(struct trace_array *tr)
84{
85 ftrace_destroy_filter_files(tr->ops);
4114fbfd 86 ftrace_free_ftrace_ops(tr);
591dffda
SRRH
87}
88
b6f11df2 89static int function_trace_init(struct trace_array *tr)
1b29b018 90{
4104d326 91 ftrace_func_t func;
f20a5806 92
4104d326
SRRH
93 /*
94 * Instance trace_arrays get their ops allocated
95 * at instance creation. Unless it failed
96 * the allocation.
97 */
98 if (!tr->ops)
591dffda 99 return -ENOMEM;
4104d326
SRRH
100
101 /* Currently only the global instance can do stack tracing */
102 if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
103 func_flags.val & TRACE_FUNC_OPT_STACK)
104 func = function_stack_trace_call;
105 else
106 func = function_trace_call;
107
108 ftrace_init_array_ops(tr, func);
f20a5806 109
1c5eb448 110 tr->array_buffer.cpu = get_cpu();
26bc83f4
SR
111 put_cpu();
112
41bc8144 113 tracing_start_cmdline_record();
f20a5806 114 tracing_start_function_trace(tr);
1c80025a 115 return 0;
1b29b018
SR
116}
117
e309b41d 118static void function_trace_reset(struct trace_array *tr)
1b29b018 119{
f20a5806 120 tracing_stop_function_trace(tr);
b6f11df2 121 tracing_stop_cmdline_record();
4104d326 122 ftrace_reset_array_ops(tr);
1b29b018
SR
123}
124
9036990d
SR
125static void function_trace_start(struct trace_array *tr)
126{
1c5eb448 127 tracing_reset_online_cpus(&tr->array_buffer);
9036990d
SR
128}
129
bb3c3c95 130static void
2f5f6ad9 131function_trace_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d 132 struct ftrace_ops *op, struct pt_regs *pt_regs)
bb3c3c95 133{
f20a5806 134 struct trace_array *tr = op->private;
bb3c3c95
SR
135 struct trace_array_cpu *data;
136 unsigned long flags;
d41032a8 137 int bit;
bb3c3c95
SR
138 int cpu;
139 int pc;
140
f20a5806 141 if (unlikely(!tr->function_enabled))
bb3c3c95
SR
142 return;
143
897f68a4
SR
144 pc = preempt_count();
145 preempt_disable_notrace();
bb3c3c95 146
897f68a4
SR
147 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
148 if (bit < 0)
149 goto out;
150
151 cpu = smp_processor_id();
1c5eb448 152 data = per_cpu_ptr(tr->array_buffer.data, cpu);
897f68a4
SR
153 if (!atomic_read(&data->disabled)) {
154 local_save_flags(flags);
7be42151 155 trace_function(tr, ip, parent_ip, flags, pc);
bb3c3c95 156 }
897f68a4 157 trace_clear_recursion(bit);
bb3c3c95 158
897f68a4
SR
159 out:
160 preempt_enable_notrace();
bb3c3c95
SR
161}
162
2ee5b92a
SRV
163#ifdef CONFIG_UNWINDER_ORC
164/*
165 * Skip 2:
166 *
167 * function_stack_trace_call()
168 * ftrace_call()
169 */
170#define STACK_SKIP 2
171#else
172/*
173 * Skip 3:
174 * __trace_stack()
175 * function_stack_trace_call()
176 * ftrace_call()
177 */
178#define STACK_SKIP 3
179#endif
180
53614991 181static void
2f5f6ad9 182function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d 183 struct ftrace_ops *op, struct pt_regs *pt_regs)
53614991 184{
f20a5806 185 struct trace_array *tr = op->private;
53614991
SR
186 struct trace_array_cpu *data;
187 unsigned long flags;
188 long disabled;
189 int cpu;
190 int pc;
191
f20a5806 192 if (unlikely(!tr->function_enabled))
53614991
SR
193 return;
194
195 /*
196 * Need to use raw, since this must be called before the
197 * recursive protection is performed.
198 */
199 local_irq_save(flags);
200 cpu = raw_smp_processor_id();
1c5eb448 201 data = per_cpu_ptr(tr->array_buffer.data, cpu);
53614991
SR
202 disabled = atomic_inc_return(&data->disabled);
203
204 if (likely(disabled == 1)) {
205 pc = preempt_count();
7be42151 206 trace_function(tr, ip, parent_ip, flags, pc);
2ee5b92a 207 __trace_stack(tr, flags, STACK_SKIP, pc);
53614991
SR
208 }
209
210 atomic_dec(&data->disabled);
211 local_irq_restore(flags);
212}
213
53614991
SR
214static struct tracer_opt func_opts[] = {
215#ifdef CONFIG_STACKTRACE
216 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
217#endif
218 { } /* Always set a last empty entry */
219};
220
221static struct tracer_flags func_flags = {
222 .val = 0, /* By default: all flags disabled */
223 .opts = func_opts
224};
225
f20a5806 226static void tracing_start_function_trace(struct trace_array *tr)
3eb36aa0 227{
f20a5806
SRRH
228 tr->function_enabled = 0;
229 register_ftrace_function(tr->ops);
230 tr->function_enabled = 1;
3eb36aa0
SR
231}
232
f20a5806 233static void tracing_stop_function_trace(struct trace_array *tr)
3eb36aa0 234{
f20a5806
SRRH
235 tr->function_enabled = 0;
236 unregister_ftrace_function(tr->ops);
3eb36aa0
SR
237}
238
d39cdd20
CH
239static struct tracer function_trace;
240
8c1a49ae
SRRH
241static int
242func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
53614991 243{
f555f123
AV
244 switch (bit) {
245 case TRACE_FUNC_OPT_STACK:
53614991
SR
246 /* do nothing if already set */
247 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
f555f123 248 break;
53614991 249
d39cdd20
CH
250 /* We can change this flag when not running. */
251 if (tr->current_trace != &function_trace)
252 break;
253
f20a5806
SRRH
254 unregister_ftrace_function(tr->ops);
255
3eb36aa0 256 if (set) {
4104d326 257 tr->ops->func = function_stack_trace_call;
f20a5806 258 register_ftrace_function(tr->ops);
3eb36aa0 259 } else {
4104d326 260 tr->ops->func = function_trace_call;
f20a5806 261 register_ftrace_function(tr->ops);
3eb36aa0 262 }
53614991 263
f555f123
AV
264 break;
265 default:
266 return -EINVAL;
53614991
SR
267 }
268
f555f123 269 return 0;
53614991
SR
270}
271
8f768993 272static struct tracer function_trace __tracer_data =
1b29b018 273{
3eb36aa0
SR
274 .name = "function",
275 .init = function_trace_init,
276 .reset = function_trace_reset,
277 .start = function_trace_start,
53614991
SR
278 .flags = &func_flags,
279 .set_flag = func_set_flag,
f20a5806 280 .allow_instances = true,
60a11774 281#ifdef CONFIG_FTRACE_SELFTEST
3eb36aa0 282 .selftest = trace_selftest_startup_function,
60a11774 283#endif
1b29b018
SR
284};
285
23b4ff3a 286#ifdef CONFIG_DYNAMIC_FTRACE
fe014e24 287static void update_traceon_count(struct ftrace_probe_ops *ops,
2290f2c5
SRV
288 unsigned long ip,
289 struct trace_array *tr, bool on,
6e444319 290 void *data)
23b4ff3a 291{
6e444319 292 struct ftrace_func_mapper *mapper = data;
fe014e24
SRV
293 long *count;
294 long old_count;
23b4ff3a 295
a9ce7c36
SRRH
296 /*
297 * Tracing gets disabled (or enabled) once per count.
0af26492 298 * This function can be called at the same time on multiple CPUs.
a9ce7c36
SRRH
299 * It is fine if both disable (or enable) tracing, as disabling
300 * (or enabling) the second time doesn't do anything as the
301 * state of the tracer is already disabled (or enabled).
302 * What needs to be synchronized in this case is that the count
303 * only gets decremented once, even if the tracer is disabled
304 * (or enabled) twice, as the second one is really a nop.
305 *
306 * The memory barriers guarantee that we only decrement the
307 * counter once. First the count is read to a local variable
308 * and a read barrier is used to make sure that it is loaded
309 * before checking if the tracer is in the state we want.
310 * If the tracer is not in the state we want, then the count
311 * is guaranteed to be the old count.
312 *
313 * Next the tracer is set to the state we want (disabled or enabled)
314 * then a write memory barrier is used to make sure that
315 * the new state is visible before changing the counter by
316 * one minus the old counter. This guarantees that another CPU
317 * executing this code will see the new state before seeing
0af26492 318 * the new counter value, and would not do anything if the new
a9ce7c36
SRRH
319 * counter is seen.
320 *
321 * Note, there is no synchronization between this and a user
322 * setting the tracing_on file. But we currently don't care
323 * about that.
324 */
fe014e24
SRV
325 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
326 old_count = *count;
327
328 if (old_count <= 0)
a9ce7c36 329 return;
23b4ff3a 330
a9ce7c36
SRRH
331 /* Make sure we see count before checking tracing state */
332 smp_rmb();
23b4ff3a 333
2290f2c5 334 if (on == !!tracer_tracing_is_on(tr))
a9ce7c36
SRRH
335 return;
336
337 if (on)
2290f2c5 338 tracer_tracing_on(tr);
a9ce7c36 339 else
2290f2c5 340 tracer_tracing_off(tr);
a9ce7c36 341
a9ce7c36
SRRH
342 /* Make sure tracing state is visible before updating count */
343 smp_wmb();
344
345 *count = old_count - 1;
23b4ff3a
SR
346}
347
348static void
bca6c8d0 349ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
b5f081b5 350 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 351 void *data)
23b4ff3a 352{
2290f2c5 353 update_traceon_count(ops, ip, tr, 1, data);
1c317143 354}
23b4ff3a 355
1c317143 356static void
bca6c8d0 357ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
b5f081b5 358 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 359 void *data)
1c317143 360{
2290f2c5 361 update_traceon_count(ops, ip, tr, 0, data);
23b4ff3a
SR
362}
363
8380d248 364static void
bca6c8d0 365ftrace_traceon(unsigned long ip, unsigned long parent_ip,
b5f081b5 366 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 367 void *data)
8380d248 368{
2290f2c5 369 if (tracer_tracing_is_on(tr))
8380d248
SRRH
370 return;
371
2290f2c5 372 tracer_tracing_on(tr);
8380d248
SRRH
373}
374
375static void
bca6c8d0 376ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
b5f081b5 377 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 378 void *data)
8380d248 379{
2290f2c5 380 if (!tracer_tracing_is_on(tr))
8380d248
SRRH
381 return;
382
2290f2c5 383 tracer_tracing_off(tr);
8380d248
SRRH
384}
385
2ee5b92a
SRV
386#ifdef CONFIG_UNWINDER_ORC
387/*
388 * Skip 3:
389 *
390 * function_trace_probe_call()
391 * ftrace_ops_assist_func()
392 * ftrace_call()
393 */
394#define FTRACE_STACK_SKIP 3
395#else
dd42cd3e 396/*
2ee5b92a
SRV
397 * Skip 5:
398 *
399 * __trace_stack()
dd42cd3e
SRRH
400 * ftrace_stacktrace()
401 * function_trace_probe_call()
2ee5b92a 402 * ftrace_ops_assist_func()
dd42cd3e
SRRH
403 * ftrace_call()
404 */
2ee5b92a
SRV
405#define FTRACE_STACK_SKIP 5
406#endif
dd42cd3e 407
dcc19d28
SRV
408static __always_inline void trace_stack(struct trace_array *tr)
409{
410 unsigned long flags;
411 int pc;
412
413 local_save_flags(flags);
414 pc = preempt_count();
415
2ee5b92a 416 __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
dcc19d28
SRV
417}
418
dd42cd3e 419static void
bca6c8d0 420ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
b5f081b5 421 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 422 void *data)
dd42cd3e 423{
dcc19d28 424 trace_stack(tr);
dd42cd3e
SRRH
425}
426
427static void
bca6c8d0 428ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
b5f081b5 429 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 430 void *data)
dd42cd3e 431{
6e444319 432 struct ftrace_func_mapper *mapper = data;
fe014e24 433 long *count;
a9ce7c36
SRRH
434 long old_count;
435 long new_count;
436
fe014e24
SRV
437 if (!tracing_is_on())
438 return;
439
440 /* unlimited? */
441 if (!mapper) {
dcc19d28 442 trace_stack(tr);
fe014e24
SRV
443 return;
444 }
445
446 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
447
a9ce7c36
SRRH
448 /*
449 * Stack traces should only execute the number of times the
450 * user specified in the counter.
451 */
452 do {
a9ce7c36
SRRH
453 old_count = *count;
454
455 if (!old_count)
456 return;
457
a9ce7c36
SRRH
458 new_count = old_count - 1;
459 new_count = cmpxchg(count, old_count, new_count);
460 if (new_count == old_count)
dcc19d28 461 trace_stack(tr);
a9ce7c36 462
fe014e24
SRV
463 if (!tracing_is_on())
464 return;
465
a9ce7c36
SRRH
466 } while (new_count != old_count);
467}
468
6e444319
SRV
469static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
470 void *data)
a9ce7c36 471{
6e444319 472 struct ftrace_func_mapper *mapper = data;
fe014e24 473 long *count = NULL;
a9ce7c36 474
fe014e24
SRV
475 if (mapper)
476 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
a9ce7c36 477
fe014e24
SRV
478 if (count) {
479 if (*count <= 0)
480 return 0;
a9ce7c36 481 (*count)--;
fe014e24 482 }
a9ce7c36
SRRH
483
484 return 1;
dd42cd3e
SRRH
485}
486
ad71d889 487static void
bca6c8d0 488ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
b5f081b5 489 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 490 void *data)
ad71d889 491{
6e444319 492 if (update_count(ops, ip, data))
ad71d889
SRRH
493 ftrace_dump(DUMP_ALL);
494}
495
90e3c03c
SRRH
496/* Only dump the current CPU buffer. */
497static void
bca6c8d0 498ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
b5f081b5 499 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 500 void *data)
90e3c03c 501{
6e444319 502 if (update_count(ops, ip, data))
90e3c03c
SRRH
503 ftrace_dump(DUMP_ORIG);
504}
505
e110e3d1 506static int
dd42cd3e 507ftrace_probe_print(const char *name, struct seq_file *m,
6e444319
SRV
508 unsigned long ip, struct ftrace_probe_ops *ops,
509 void *data)
dd42cd3e 510{
6e444319 511 struct ftrace_func_mapper *mapper = data;
fe014e24 512 long *count = NULL;
dd42cd3e
SRRH
513
514 seq_printf(m, "%ps:%s", (void *)ip, name);
515
fe014e24
SRV
516 if (mapper)
517 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
518
519 if (count)
520 seq_printf(m, ":count=%ld\n", *count);
dd42cd3e 521 else
fe014e24 522 seq_puts(m, ":unlimited\n");
dd42cd3e
SRRH
523
524 return 0;
525}
526
527static int
528ftrace_traceon_print(struct seq_file *m, unsigned long ip,
b5f081b5
SRV
529 struct ftrace_probe_ops *ops,
530 void *data)
dd42cd3e 531{
6e444319 532 return ftrace_probe_print("traceon", m, ip, ops, data);
dd42cd3e
SRRH
533}
534
535static int
536ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
537 struct ftrace_probe_ops *ops, void *data)
538{
6e444319 539 return ftrace_probe_print("traceoff", m, ip, ops, data);
dd42cd3e
SRRH
540}
541
542static int
543ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
544 struct ftrace_probe_ops *ops, void *data)
545{
6e444319 546 return ftrace_probe_print("stacktrace", m, ip, ops, data);
dd42cd3e 547}
e110e3d1 548
ad71d889
SRRH
549static int
550ftrace_dump_print(struct seq_file *m, unsigned long ip,
551 struct ftrace_probe_ops *ops, void *data)
552{
6e444319 553 return ftrace_probe_print("dump", m, ip, ops, data);
ad71d889
SRRH
554}
555
90e3c03c
SRRH
556static int
557ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
558 struct ftrace_probe_ops *ops, void *data)
559{
6e444319 560 return ftrace_probe_print("cpudump", m, ip, ops, data);
fe014e24
SRV
561}
562
563
564static int
b5f081b5 565ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 566 unsigned long ip, void *init_data, void **data)
fe014e24 567{
6e444319
SRV
568 struct ftrace_func_mapper *mapper = *data;
569
570 if (!mapper) {
571 mapper = allocate_ftrace_func_mapper();
572 if (!mapper)
573 return -ENOMEM;
574 *data = mapper;
575 }
fe014e24 576
6e444319 577 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
fe014e24
SRV
578}
579
580static void
b5f081b5 581ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 582 unsigned long ip, void *data)
fe014e24 583{
6e444319
SRV
584 struct ftrace_func_mapper *mapper = data;
585
586 if (!ip) {
587 free_ftrace_func_mapper(mapper, NULL);
588 return;
589 }
fe014e24
SRV
590
591 ftrace_func_mapper_remove_ip(mapper, ip);
90e3c03c
SRRH
592}
593
8380d248
SRRH
594static struct ftrace_probe_ops traceon_count_probe_ops = {
595 .func = ftrace_traceon_count,
dd42cd3e 596 .print = ftrace_traceon_print,
fe014e24
SRV
597 .init = ftrace_count_init,
598 .free = ftrace_count_free,
8380d248
SRRH
599};
600
601static struct ftrace_probe_ops traceoff_count_probe_ops = {
602 .func = ftrace_traceoff_count,
dd42cd3e 603 .print = ftrace_traceoff_print,
fe014e24
SRV
604 .init = ftrace_count_init,
605 .free = ftrace_count_free,
dd42cd3e
SRRH
606};
607
608static struct ftrace_probe_ops stacktrace_count_probe_ops = {
609 .func = ftrace_stacktrace_count,
610 .print = ftrace_stacktrace_print,
fe014e24
SRV
611 .init = ftrace_count_init,
612 .free = ftrace_count_free,
8380d248
SRRH
613};
614
ad71d889
SRRH
615static struct ftrace_probe_ops dump_probe_ops = {
616 .func = ftrace_dump_probe,
617 .print = ftrace_dump_print,
fe014e24
SRV
618 .init = ftrace_count_init,
619 .free = ftrace_count_free,
ad71d889
SRRH
620};
621
90e3c03c
SRRH
622static struct ftrace_probe_ops cpudump_probe_ops = {
623 .func = ftrace_cpudump_probe,
624 .print = ftrace_cpudump_print,
625};
626
b6887d79 627static struct ftrace_probe_ops traceon_probe_ops = {
23b4ff3a 628 .func = ftrace_traceon,
dd42cd3e 629 .print = ftrace_traceon_print,
23b4ff3a
SR
630};
631
b6887d79 632static struct ftrace_probe_ops traceoff_probe_ops = {
23b4ff3a 633 .func = ftrace_traceoff,
dd42cd3e 634 .print = ftrace_traceoff_print,
23b4ff3a
SR
635};
636
dd42cd3e
SRRH
637static struct ftrace_probe_ops stacktrace_probe_ops = {
638 .func = ftrace_stacktrace,
639 .print = ftrace_stacktrace_print,
640};
e110e3d1 641
23b4ff3a 642static int
04ec7bb6
SRV
643ftrace_trace_probe_callback(struct trace_array *tr,
644 struct ftrace_probe_ops *ops,
dd42cd3e
SRRH
645 struct ftrace_hash *hash, char *glob,
646 char *cmd, char *param, int enable)
23b4ff3a 647{
23b4ff3a
SR
648 void *count = (void *)-1;
649 char *number;
650 int ret;
651
652 /* hash funcs only work with set_ftrace_filter */
653 if (!enable)
654 return -EINVAL;
655
d3d532d7 656 if (glob[0] == '!')
7b60f3d8 657 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8b8fa62c 658
23b4ff3a
SR
659 if (!param)
660 goto out_reg;
661
662 number = strsep(&param, ":");
663
664 if (!strlen(number))
665 goto out_reg;
666
667 /*
668 * We use the callback data field (which is a pointer)
669 * as our counter.
670 */
bcd83ea6 671 ret = kstrtoul(number, 0, (unsigned long *)&count);
23b4ff3a
SR
672 if (ret)
673 return ret;
674
675 out_reg:
04ec7bb6 676 ret = register_ftrace_function_probe(glob, tr, ops, count);
23b4ff3a 677
04aef32d 678 return ret < 0 ? ret : 0;
23b4ff3a
SR
679}
680
dd42cd3e 681static int
04ec7bb6 682ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
dd42cd3e
SRRH
683 char *glob, char *cmd, char *param, int enable)
684{
685 struct ftrace_probe_ops *ops;
686
0f179765
SRV
687 if (!tr)
688 return -ENODEV;
689
dd42cd3e
SRRH
690 /* we register both traceon and traceoff to this callback */
691 if (strcmp(cmd, "traceon") == 0)
692 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
693 else
694 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
695
04ec7bb6 696 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
dd42cd3e
SRRH
697 param, enable);
698}
699
700static int
04ec7bb6 701ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
dd42cd3e
SRRH
702 char *glob, char *cmd, char *param, int enable)
703{
704 struct ftrace_probe_ops *ops;
705
0f179765
SRV
706 if (!tr)
707 return -ENODEV;
708
dd42cd3e
SRRH
709 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
710
04ec7bb6 711 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
dd42cd3e
SRRH
712 param, enable);
713}
714
ad71d889 715static int
04ec7bb6 716ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
ad71d889
SRRH
717 char *glob, char *cmd, char *param, int enable)
718{
719 struct ftrace_probe_ops *ops;
720
0f179765
SRV
721 if (!tr)
722 return -ENODEV;
723
ad71d889
SRRH
724 ops = &dump_probe_ops;
725
726 /* Only dump once. */
04ec7bb6 727 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
ad71d889
SRRH
728 "1", enable);
729}
730
90e3c03c 731static int
04ec7bb6 732ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
90e3c03c
SRRH
733 char *glob, char *cmd, char *param, int enable)
734{
735 struct ftrace_probe_ops *ops;
736
0f179765
SRV
737 if (!tr)
738 return -ENODEV;
739
90e3c03c
SRRH
740 ops = &cpudump_probe_ops;
741
742 /* Only dump once. */
04ec7bb6 743 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
90e3c03c
SRRH
744 "1", enable);
745}
746
23b4ff3a
SR
747static struct ftrace_func_command ftrace_traceon_cmd = {
748 .name = "traceon",
749 .func = ftrace_trace_onoff_callback,
750};
751
752static struct ftrace_func_command ftrace_traceoff_cmd = {
753 .name = "traceoff",
754 .func = ftrace_trace_onoff_callback,
755};
756
dd42cd3e
SRRH
757static struct ftrace_func_command ftrace_stacktrace_cmd = {
758 .name = "stacktrace",
759 .func = ftrace_stacktrace_callback,
760};
761
ad71d889
SRRH
762static struct ftrace_func_command ftrace_dump_cmd = {
763 .name = "dump",
764 .func = ftrace_dump_callback,
765};
766
90e3c03c
SRRH
767static struct ftrace_func_command ftrace_cpudump_cmd = {
768 .name = "cpudump",
769 .func = ftrace_cpudump_callback,
770};
771
23b4ff3a
SR
772static int __init init_func_cmd_traceon(void)
773{
774 int ret;
775
776 ret = register_ftrace_command(&ftrace_traceoff_cmd);
777 if (ret)
778 return ret;
779
780 ret = register_ftrace_command(&ftrace_traceon_cmd);
781 if (ret)
ad71d889 782 goto out_free_traceoff;
dd42cd3e
SRRH
783
784 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
ad71d889
SRRH
785 if (ret)
786 goto out_free_traceon;
787
788 ret = register_ftrace_command(&ftrace_dump_cmd);
789 if (ret)
790 goto out_free_stacktrace;
791
90e3c03c
SRRH
792 ret = register_ftrace_command(&ftrace_cpudump_cmd);
793 if (ret)
794 goto out_free_dump;
795
ad71d889
SRRH
796 return 0;
797
90e3c03c
SRRH
798 out_free_dump:
799 unregister_ftrace_command(&ftrace_dump_cmd);
ad71d889
SRRH
800 out_free_stacktrace:
801 unregister_ftrace_command(&ftrace_stacktrace_cmd);
802 out_free_traceon:
803 unregister_ftrace_command(&ftrace_traceon_cmd);
804 out_free_traceoff:
805 unregister_ftrace_command(&ftrace_traceoff_cmd);
806
23b4ff3a
SR
807 return ret;
808}
809#else
810static inline int init_func_cmd_traceon(void)
811{
812 return 0;
813}
814#endif /* CONFIG_DYNAMIC_FTRACE */
815
dbeafd0d 816__init int init_function_trace(void)
1b29b018 817{
23b4ff3a 818 init_func_cmd_traceon();
1b29b018
SR
819 return register_tracer(&function_trace);
820}
This page took 0.736186 seconds and 4 git commands to generate.