2 * ring buffer based function tracer
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/pstore.h>
21 /* function tracing enabled */
22 static int ftrace_function_enabled;
24 static struct trace_array *func_trace;
26 static void tracing_start_function_trace(void);
27 static void tracing_stop_function_trace(void);
29 static int function_trace_init(struct trace_array *tr)
35 tracing_start_cmdline_record();
36 tracing_start_function_trace();
40 static void function_trace_reset(struct trace_array *tr)
42 tracing_stop_function_trace();
43 tracing_stop_cmdline_record();
46 static void function_trace_start(struct trace_array *tr)
48 tracing_reset_online_cpus(tr);
52 function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
53 struct ftrace_ops *op, struct pt_regs *pt_regs)
55 struct trace_array *tr = func_trace;
56 struct trace_array_cpu *data;
62 if (unlikely(!ftrace_function_enabled))
66 preempt_disable_notrace();
67 local_save_flags(flags);
68 cpu = raw_smp_processor_id();
70 disabled = atomic_inc_return(&data->disabled);
72 if (likely(disabled == 1))
73 trace_function(tr, ip, parent_ip, flags, pc);
75 atomic_dec(&data->disabled);
76 preempt_enable_notrace();
81 TRACE_FUNC_OPT_STACK = 0x1,
82 TRACE_FUNC_OPT_PSTORE = 0x2,
85 static struct tracer_flags func_flags;
88 function_trace_call(unsigned long ip, unsigned long parent_ip,
89 struct ftrace_ops *op, struct pt_regs *pt_regs)
92 struct trace_array *tr = func_trace;
93 struct trace_array_cpu *data;
99 if (unlikely(!ftrace_function_enabled))
103 * Need to use raw, since this must be called before the
104 * recursive protection is performed.
106 local_irq_save(flags);
107 cpu = raw_smp_processor_id();
108 data = tr->data[cpu];
109 disabled = atomic_inc_return(&data->disabled);
111 if (likely(disabled == 1)) {
113 * So far tracing doesn't support multiple buffers, so
114 * we make an explicit call for now.
116 if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
117 pstore_ftrace_call(ip, parent_ip);
118 pc = preempt_count();
119 trace_function(tr, ip, parent_ip, flags, pc);
122 atomic_dec(&data->disabled);
123 local_irq_restore(flags);
127 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
128 struct ftrace_ops *op, struct pt_regs *pt_regs)
130 struct trace_array *tr = func_trace;
131 struct trace_array_cpu *data;
137 if (unlikely(!ftrace_function_enabled))
141 * Need to use raw, since this must be called before the
142 * recursive protection is performed.
144 local_irq_save(flags);
145 cpu = raw_smp_processor_id();
146 data = tr->data[cpu];
147 disabled = atomic_inc_return(&data->disabled);
149 if (likely(disabled == 1)) {
150 pc = preempt_count();
151 trace_function(tr, ip, parent_ip, flags, pc);
154 * __ftrace_trace_stack,
156 * function_stack_trace_call
160 __trace_stack(tr, flags, 5, pc);
163 atomic_dec(&data->disabled);
164 local_irq_restore(flags);
168 static struct ftrace_ops trace_ops __read_mostly =
170 .func = function_trace_call,
171 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
174 static struct ftrace_ops trace_stack_ops __read_mostly =
176 .func = function_stack_trace_call,
177 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
180 static struct tracer_opt func_opts[] = {
181 #ifdef CONFIG_STACKTRACE
182 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
184 #ifdef CONFIG_PSTORE_FTRACE
185 { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
187 { } /* Always set a last empty entry */
190 static struct tracer_flags func_flags = {
191 .val = 0, /* By default: all flags disabled */
195 static void tracing_start_function_trace(void)
197 ftrace_function_enabled = 0;
199 if (trace_flags & TRACE_ITER_PREEMPTONLY)
200 trace_ops.func = function_trace_call_preempt_only;
202 trace_ops.func = function_trace_call;
204 if (func_flags.val & TRACE_FUNC_OPT_STACK)
205 register_ftrace_function(&trace_stack_ops);
207 register_ftrace_function(&trace_ops);
209 ftrace_function_enabled = 1;
212 static void tracing_stop_function_trace(void)
214 ftrace_function_enabled = 0;
216 if (func_flags.val & TRACE_FUNC_OPT_STACK)
217 unregister_ftrace_function(&trace_stack_ops);
219 unregister_ftrace_function(&trace_ops);
222 static int func_set_flag(u32 old_flags, u32 bit, int set)
225 case TRACE_FUNC_OPT_STACK:
226 /* do nothing if already set */
227 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
231 unregister_ftrace_function(&trace_ops);
232 register_ftrace_function(&trace_stack_ops);
234 unregister_ftrace_function(&trace_stack_ops);
235 register_ftrace_function(&trace_ops);
239 case TRACE_FUNC_OPT_PSTORE:
248 static struct tracer function_trace __read_mostly =
251 .init = function_trace_init,
252 .reset = function_trace_reset,
253 .start = function_trace_start,
254 .wait_pipe = poll_wait_pipe,
255 .flags = &func_flags,
256 .set_flag = func_set_flag,
257 #ifdef CONFIG_FTRACE_SELFTEST
258 .selftest = trace_selftest_startup_function,
262 #ifdef CONFIG_DYNAMIC_FTRACE
264 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
266 long *count = (long *)data;
281 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
283 long *count = (long *)data;
285 if (!tracing_is_on())
298 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
299 struct ftrace_probe_ops *ops, void *data);
301 static struct ftrace_probe_ops traceon_probe_ops = {
302 .func = ftrace_traceon,
303 .print = ftrace_trace_onoff_print,
306 static struct ftrace_probe_ops traceoff_probe_ops = {
307 .func = ftrace_traceoff,
308 .print = ftrace_trace_onoff_print,
312 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
313 struct ftrace_probe_ops *ops, void *data)
315 long count = (long)data;
317 seq_printf(m, "%ps:", (void *)ip);
319 if (ops == &traceon_probe_ops)
320 seq_printf(m, "traceon");
322 seq_printf(m, "traceoff");
325 seq_printf(m, ":unlimited\n");
327 seq_printf(m, ":count=%ld\n", count);
333 ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
335 struct ftrace_probe_ops *ops;
337 /* we register both traceon and traceoff to this callback */
338 if (strcmp(cmd, "traceon") == 0)
339 ops = &traceon_probe_ops;
341 ops = &traceoff_probe_ops;
343 unregister_ftrace_function_probe_func(glob, ops);
349 ftrace_trace_onoff_callback(struct ftrace_hash *hash,
350 char *glob, char *cmd, char *param, int enable)
352 struct ftrace_probe_ops *ops;
353 void *count = (void *)-1;
357 /* hash funcs only work with set_ftrace_filter */
362 return ftrace_trace_onoff_unreg(glob+1, cmd, param);
364 /* we register both traceon and traceoff to this callback */
365 if (strcmp(cmd, "traceon") == 0)
366 ops = &traceon_probe_ops;
368 ops = &traceoff_probe_ops;
373 number = strsep(¶m, ":");
379 * We use the callback data field (which is a pointer)
382 ret = strict_strtoul(number, 0, (unsigned long *)&count);
387 ret = register_ftrace_function_probe(glob, ops, count);
389 return ret < 0 ? ret : 0;
392 static struct ftrace_func_command ftrace_traceon_cmd = {
394 .func = ftrace_trace_onoff_callback,
397 static struct ftrace_func_command ftrace_traceoff_cmd = {
399 .func = ftrace_trace_onoff_callback,
402 static int __init init_func_cmd_traceon(void)
406 ret = register_ftrace_command(&ftrace_traceoff_cmd);
410 ret = register_ftrace_command(&ftrace_traceon_cmd);
412 unregister_ftrace_command(&ftrace_traceoff_cmd);
416 static inline int init_func_cmd_traceon(void)
420 #endif /* CONFIG_DYNAMIC_FTRACE */
422 static __init int init_function_trace(void)
424 init_func_cmd_traceon();
425 return register_tracer(&function_trace);
427 device_initcall(init_function_trace);