1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
8 * Based on code from the latency_tracer, that is:
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26 struct ftrace_ops *op, struct pt_regs *pt_regs);
28 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 struct ftrace_ops *op, struct pt_regs *pt_regs);
30 static struct tracer_flags func_flags;
34 TRACE_FUNC_OPT_STACK = 0x1,
37 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
39 struct ftrace_ops *ops;
41 /* The top level array uses the "global_ops" */
42 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
45 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
49 /* Currently only the non stack version is supported */
50 ops->func = function_trace_call;
51 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
59 void ftrace_free_ftrace_ops(struct trace_array *tr)
65 int ftrace_create_function_files(struct trace_array *tr,
66 struct dentry *parent)
69 * The top level array uses the "global_ops", and the files are
72 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
78 ftrace_create_filter_files(tr->ops, parent);
83 void ftrace_destroy_function_files(struct trace_array *tr)
85 ftrace_destroy_filter_files(tr->ops);
86 ftrace_free_ftrace_ops(tr);
89 static int function_trace_init(struct trace_array *tr)
94 * Instance trace_arrays get their ops allocated
95 * at instance creation. Unless it failed
101 /* Currently only the global instance can do stack tracing */
102 if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
103 func_flags.val & TRACE_FUNC_OPT_STACK)
104 func = function_stack_trace_call;
106 func = function_trace_call;
108 ftrace_init_array_ops(tr, func);
110 tr->array_buffer.cpu = get_cpu();
113 tracing_start_cmdline_record();
114 tracing_start_function_trace(tr);
118 static void function_trace_reset(struct trace_array *tr)
120 tracing_stop_function_trace(tr);
121 tracing_stop_cmdline_record();
122 ftrace_reset_array_ops(tr);
125 static void function_trace_start(struct trace_array *tr)
127 tracing_reset_online_cpus(&tr->array_buffer);
131 function_trace_call(unsigned long ip, unsigned long parent_ip,
132 struct ftrace_ops *op, struct pt_regs *pt_regs)
134 struct trace_array *tr = op->private;
135 struct trace_array_cpu *data;
141 if (unlikely(!tr->function_enabled))
144 pc = preempt_count();
145 preempt_disable_notrace();
147 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
151 cpu = smp_processor_id();
152 data = per_cpu_ptr(tr->array_buffer.data, cpu);
153 if (!atomic_read(&data->disabled)) {
154 local_save_flags(flags);
155 trace_function(tr, ip, parent_ip, flags, pc);
157 trace_clear_recursion(bit);
160 preempt_enable_notrace();
163 #ifdef CONFIG_UNWINDER_ORC
167 * function_stack_trace_call()
175 * function_stack_trace_call()
182 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
183 struct ftrace_ops *op, struct pt_regs *pt_regs)
185 struct trace_array *tr = op->private;
186 struct trace_array_cpu *data;
192 if (unlikely(!tr->function_enabled))
196 * Need to use raw, since this must be called before the
197 * recursive protection is performed.
199 local_irq_save(flags);
200 cpu = raw_smp_processor_id();
201 data = per_cpu_ptr(tr->array_buffer.data, cpu);
202 disabled = atomic_inc_return(&data->disabled);
204 if (likely(disabled == 1)) {
205 pc = preempt_count();
206 trace_function(tr, ip, parent_ip, flags, pc);
207 __trace_stack(tr, flags, STACK_SKIP, pc);
210 atomic_dec(&data->disabled);
211 local_irq_restore(flags);
214 static struct tracer_opt func_opts[] = {
215 #ifdef CONFIG_STACKTRACE
216 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
218 { } /* Always set a last empty entry */
221 static struct tracer_flags func_flags = {
222 .val = 0, /* By default: all flags disabled */
226 static void tracing_start_function_trace(struct trace_array *tr)
228 tr->function_enabled = 0;
229 register_ftrace_function(tr->ops);
230 tr->function_enabled = 1;
233 static void tracing_stop_function_trace(struct trace_array *tr)
235 tr->function_enabled = 0;
236 unregister_ftrace_function(tr->ops);
239 static struct tracer function_trace;
242 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
245 case TRACE_FUNC_OPT_STACK:
246 /* do nothing if already set */
247 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
250 /* We can change this flag when not running. */
251 if (tr->current_trace != &function_trace)
254 unregister_ftrace_function(tr->ops);
257 tr->ops->func = function_stack_trace_call;
258 register_ftrace_function(tr->ops);
260 tr->ops->func = function_trace_call;
261 register_ftrace_function(tr->ops);
272 static struct tracer function_trace __tracer_data =
275 .init = function_trace_init,
276 .reset = function_trace_reset,
277 .start = function_trace_start,
278 .flags = &func_flags,
279 .set_flag = func_set_flag,
280 .allow_instances = true,
281 #ifdef CONFIG_FTRACE_SELFTEST
282 .selftest = trace_selftest_startup_function,
286 #ifdef CONFIG_DYNAMIC_FTRACE
287 static void update_traceon_count(struct ftrace_probe_ops *ops,
289 struct trace_array *tr, bool on,
292 struct ftrace_func_mapper *mapper = data;
297 * Tracing gets disabled (or enabled) once per count.
298 * This function can be called at the same time on multiple CPUs.
299 * It is fine if both disable (or enable) tracing, as disabling
300 * (or enabling) the second time doesn't do anything as the
301 * state of the tracer is already disabled (or enabled).
302 * What needs to be synchronized in this case is that the count
303 * only gets decremented once, even if the tracer is disabled
304 * (or enabled) twice, as the second one is really a nop.
306 * The memory barriers guarantee that we only decrement the
307 * counter once. First the count is read to a local variable
308 * and a read barrier is used to make sure that it is loaded
309 * before checking if the tracer is in the state we want.
310 * If the tracer is not in the state we want, then the count
311 * is guaranteed to be the old count.
313 * Next the tracer is set to the state we want (disabled or enabled)
314 * then a write memory barrier is used to make sure that
315 * the new state is visible before changing the counter by
316 * one minus the old counter. This guarantees that another CPU
317 * executing this code will see the new state before seeing
318 * the new counter value, and would not do anything if the new
321 * Note, there is no synchronization between this and a user
322 * setting the tracing_on file. But we currently don't care
325 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
331 /* Make sure we see count before checking tracing state */
334 if (on == !!tracer_tracing_is_on(tr))
338 tracer_tracing_on(tr);
340 tracer_tracing_off(tr);
342 /* Make sure tracing state is visible before updating count */
345 *count = old_count - 1;
349 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
350 struct trace_array *tr, struct ftrace_probe_ops *ops,
353 update_traceon_count(ops, ip, tr, 1, data);
357 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
358 struct trace_array *tr, struct ftrace_probe_ops *ops,
361 update_traceon_count(ops, ip, tr, 0, data);
365 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
366 struct trace_array *tr, struct ftrace_probe_ops *ops,
369 if (tracer_tracing_is_on(tr))
372 tracer_tracing_on(tr);
376 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
377 struct trace_array *tr, struct ftrace_probe_ops *ops,
380 if (!tracer_tracing_is_on(tr))
383 tracer_tracing_off(tr);
386 #ifdef CONFIG_UNWINDER_ORC
390 * function_trace_probe_call()
391 * ftrace_ops_assist_func()
394 #define FTRACE_STACK_SKIP 3
400 * ftrace_stacktrace()
401 * function_trace_probe_call()
402 * ftrace_ops_assist_func()
405 #define FTRACE_STACK_SKIP 5
408 static __always_inline void trace_stack(struct trace_array *tr)
413 local_save_flags(flags);
414 pc = preempt_count();
416 __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
420 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
421 struct trace_array *tr, struct ftrace_probe_ops *ops,
428 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
429 struct trace_array *tr, struct ftrace_probe_ops *ops,
432 struct ftrace_func_mapper *mapper = data;
437 if (!tracing_is_on())
446 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
449 * Stack traces should only execute the number of times the
450 * user specified in the counter.
458 new_count = old_count - 1;
459 new_count = cmpxchg(count, old_count, new_count);
460 if (new_count == old_count)
463 if (!tracing_is_on())
466 } while (new_count != old_count);
469 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
472 struct ftrace_func_mapper *mapper = data;
476 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
488 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
489 struct trace_array *tr, struct ftrace_probe_ops *ops,
492 if (update_count(ops, ip, data))
493 ftrace_dump(DUMP_ALL);
496 /* Only dump the current CPU buffer. */
498 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
499 struct trace_array *tr, struct ftrace_probe_ops *ops,
502 if (update_count(ops, ip, data))
503 ftrace_dump(DUMP_ORIG);
507 ftrace_probe_print(const char *name, struct seq_file *m,
508 unsigned long ip, struct ftrace_probe_ops *ops,
511 struct ftrace_func_mapper *mapper = data;
514 seq_printf(m, "%ps:%s", (void *)ip, name);
517 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
520 seq_printf(m, ":count=%ld\n", *count);
522 seq_puts(m, ":unlimited\n");
528 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
529 struct ftrace_probe_ops *ops,
532 return ftrace_probe_print("traceon", m, ip, ops, data);
536 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
537 struct ftrace_probe_ops *ops, void *data)
539 return ftrace_probe_print("traceoff", m, ip, ops, data);
543 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
544 struct ftrace_probe_ops *ops, void *data)
546 return ftrace_probe_print("stacktrace", m, ip, ops, data);
550 ftrace_dump_print(struct seq_file *m, unsigned long ip,
551 struct ftrace_probe_ops *ops, void *data)
553 return ftrace_probe_print("dump", m, ip, ops, data);
557 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
558 struct ftrace_probe_ops *ops, void *data)
560 return ftrace_probe_print("cpudump", m, ip, ops, data);
565 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
566 unsigned long ip, void *init_data, void **data)
568 struct ftrace_func_mapper *mapper = *data;
571 mapper = allocate_ftrace_func_mapper();
577 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
581 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
582 unsigned long ip, void *data)
584 struct ftrace_func_mapper *mapper = data;
587 free_ftrace_func_mapper(mapper, NULL);
591 ftrace_func_mapper_remove_ip(mapper, ip);
594 static struct ftrace_probe_ops traceon_count_probe_ops = {
595 .func = ftrace_traceon_count,
596 .print = ftrace_traceon_print,
597 .init = ftrace_count_init,
598 .free = ftrace_count_free,
601 static struct ftrace_probe_ops traceoff_count_probe_ops = {
602 .func = ftrace_traceoff_count,
603 .print = ftrace_traceoff_print,
604 .init = ftrace_count_init,
605 .free = ftrace_count_free,
608 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
609 .func = ftrace_stacktrace_count,
610 .print = ftrace_stacktrace_print,
611 .init = ftrace_count_init,
612 .free = ftrace_count_free,
615 static struct ftrace_probe_ops dump_probe_ops = {
616 .func = ftrace_dump_probe,
617 .print = ftrace_dump_print,
618 .init = ftrace_count_init,
619 .free = ftrace_count_free,
622 static struct ftrace_probe_ops cpudump_probe_ops = {
623 .func = ftrace_cpudump_probe,
624 .print = ftrace_cpudump_print,
627 static struct ftrace_probe_ops traceon_probe_ops = {
628 .func = ftrace_traceon,
629 .print = ftrace_traceon_print,
632 static struct ftrace_probe_ops traceoff_probe_ops = {
633 .func = ftrace_traceoff,
634 .print = ftrace_traceoff_print,
637 static struct ftrace_probe_ops stacktrace_probe_ops = {
638 .func = ftrace_stacktrace,
639 .print = ftrace_stacktrace_print,
643 ftrace_trace_probe_callback(struct trace_array *tr,
644 struct ftrace_probe_ops *ops,
645 struct ftrace_hash *hash, char *glob,
646 char *cmd, char *param, int enable)
648 void *count = (void *)-1;
652 /* hash funcs only work with set_ftrace_filter */
657 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
662 number = strsep(¶m, ":");
668 * We use the callback data field (which is a pointer)
671 ret = kstrtoul(number, 0, (unsigned long *)&count);
676 ret = register_ftrace_function_probe(glob, tr, ops, count);
678 return ret < 0 ? ret : 0;
682 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
683 char *glob, char *cmd, char *param, int enable)
685 struct ftrace_probe_ops *ops;
690 /* we register both traceon and traceoff to this callback */
691 if (strcmp(cmd, "traceon") == 0)
692 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
694 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
696 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
701 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
702 char *glob, char *cmd, char *param, int enable)
704 struct ftrace_probe_ops *ops;
709 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
711 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
716 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
717 char *glob, char *cmd, char *param, int enable)
719 struct ftrace_probe_ops *ops;
724 ops = &dump_probe_ops;
726 /* Only dump once. */
727 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
732 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
733 char *glob, char *cmd, char *param, int enable)
735 struct ftrace_probe_ops *ops;
740 ops = &cpudump_probe_ops;
742 /* Only dump once. */
743 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
747 static struct ftrace_func_command ftrace_traceon_cmd = {
749 .func = ftrace_trace_onoff_callback,
752 static struct ftrace_func_command ftrace_traceoff_cmd = {
754 .func = ftrace_trace_onoff_callback,
757 static struct ftrace_func_command ftrace_stacktrace_cmd = {
758 .name = "stacktrace",
759 .func = ftrace_stacktrace_callback,
762 static struct ftrace_func_command ftrace_dump_cmd = {
764 .func = ftrace_dump_callback,
767 static struct ftrace_func_command ftrace_cpudump_cmd = {
769 .func = ftrace_cpudump_callback,
772 static int __init init_func_cmd_traceon(void)
776 ret = register_ftrace_command(&ftrace_traceoff_cmd);
780 ret = register_ftrace_command(&ftrace_traceon_cmd);
782 goto out_free_traceoff;
784 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
786 goto out_free_traceon;
788 ret = register_ftrace_command(&ftrace_dump_cmd);
790 goto out_free_stacktrace;
792 ret = register_ftrace_command(&ftrace_cpudump_cmd);
799 unregister_ftrace_command(&ftrace_dump_cmd);
801 unregister_ftrace_command(&ftrace_stacktrace_cmd);
803 unregister_ftrace_command(&ftrace_traceon_cmd);
805 unregister_ftrace_command(&ftrace_traceoff_cmd);
810 static inline int init_func_cmd_traceon(void)
814 #endif /* CONFIG_DYNAMIC_FTRACE */
816 __init int init_function_trace(void)
818 init_func_cmd_traceon();
819 return register_tracer(&function_trace);