1 // SPDX-License-Identifier: GPL-2.0
3 * Infrastructure to took into function calls and returns.
5 * Mostly borrowed from function tracer which
8 * Highly modified by Steven Rostedt (VMware).
10 #include <linux/jump_label.h>
11 #include <linux/suspend.h>
12 #include <linux/ftrace.h>
13 #include <linux/slab.h>
15 #include <trace/events/sched.h>
17 #include "ftrace_internal.h"
20 #ifdef CONFIG_DYNAMIC_FTRACE
21 #define ASSIGN_OPS_HASH(opsname, val) \
23 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
25 #define ASSIGN_OPS_HASH(opsname, val)
28 DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
29 int ftrace_graph_active;
31 /* Both enabled by default (can be cleared by function_graph tracer flags */
32 static bool fgraph_sleep_time = true;
34 #ifdef CONFIG_DYNAMIC_FTRACE
36 * archs can override this function if they must do something
37 * to enable hook for graph tracer.
39 int __weak ftrace_enable_ftrace_graph_caller(void)
45 * archs can override this function if they must do something
46 * to disable hook for graph tracer.
48 int __weak ftrace_disable_ftrace_graph_caller(void)
55 * ftrace_graph_stop - set to permanently disable function graph tracing
57 * In case of an error int function graph tracing, this is called
58 * to try to keep function graph tracing from causing any more harm.
59 * Usually this is pretty severe and this is called to try to at least
60 * get a warning out to the user.
62 void ftrace_graph_stop(void)
64 static_branch_enable(&kill_ftrace_graph);
67 /* Add a function return address to the trace stack on thread info.*/
69 ftrace_push_return_trace(unsigned long ret, unsigned long func,
70 unsigned long frame_pointer, unsigned long *retp)
72 unsigned long long calltime;
75 if (unlikely(ftrace_graph_is_dead()))
78 if (!current->ret_stack)
82 * We must make sure the ret_stack is tested before we read
87 /* The return trace stack is full */
88 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
89 atomic_inc(¤t->trace_overrun);
93 calltime = trace_clock_local();
95 index = ++current->curr_ret_stack;
97 current->ret_stack[index].ret = ret;
98 current->ret_stack[index].func = func;
99 current->ret_stack[index].calltime = calltime;
100 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
101 current->ret_stack[index].fp = frame_pointer;
103 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
104 current->ret_stack[index].retp = retp;
110 * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
111 * functions. But those archs currently don't support direct functions
112 * anyway, and ftrace_find_rec_direct() is just a stub for them.
113 * Define MCOUNT_INSN_SIZE to keep those archs compiling.
115 #ifndef MCOUNT_INSN_SIZE
116 /* Make sure this only works without direct calls */
117 # ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
118 # error MCOUNT_INSN_SIZE not defined with direct calls enabled
120 # define MCOUNT_INSN_SIZE 0
123 int function_graph_enter(unsigned long ret, unsigned long func,
124 unsigned long frame_pointer, unsigned long *retp)
126 struct ftrace_graph_ent trace;
129 trace.depth = ++current->curr_ret_depth;
131 if (ftrace_push_return_trace(ret, func, frame_pointer, retp))
134 /* Only trace if the calling function expects to */
135 if (!ftrace_graph_entry(&trace))
140 current->curr_ret_stack--;
142 current->curr_ret_depth--;
146 /* Retrieve a function return address to the trace stack on thread info.*/
148 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
149 unsigned long frame_pointer)
153 index = current->curr_ret_stack;
155 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
158 /* Might as well panic, otherwise we have no where to go */
159 *ret = (unsigned long)panic;
163 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
165 * The arch may choose to record the frame pointer used
166 * and check it here to make sure that it is what we expect it
167 * to be. If gcc does not set the place holder of the return
168 * address in the frame pointer, and does a copy instead, then
169 * the function graph trace will fail. This test detects this
172 * Currently, x86_32 with optimize for size (-Os) makes the latest
175 * Note, -mfentry does not use frame pointers, and this test
176 * is not needed if CC_USING_FENTRY is set.
178 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
180 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
181 " from func %ps return to %lx\n",
182 current->ret_stack[index].fp,
184 (void *)current->ret_stack[index].func,
185 current->ret_stack[index].ret);
186 *ret = (unsigned long)panic;
191 *ret = current->ret_stack[index].ret;
192 trace->func = current->ret_stack[index].func;
193 trace->calltime = current->ret_stack[index].calltime;
194 trace->overrun = atomic_read(¤t->trace_overrun);
195 trace->depth = current->curr_ret_depth--;
197 * We still want to trace interrupts coming in if
198 * max_depth is set to 1. Make sure the decrement is
199 * seen before ftrace_graph_return.
205 * Hibernation protection.
206 * The state of the current task is too much unstable during
207 * suspend/restore to disk. We want to protect against that.
210 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
214 case PM_HIBERNATION_PREPARE:
215 pause_graph_tracing();
218 case PM_POST_HIBERNATION:
219 unpause_graph_tracing();
225 static struct notifier_block ftrace_suspend_notifier = {
226 .notifier_call = ftrace_suspend_notifier_call,
229 /* fgraph_ret_regs is not defined without CONFIG_FUNCTION_GRAPH_RETVAL */
230 struct fgraph_ret_regs;
233 * Send the trace to the ring-buffer.
234 * @return the original return address.
236 static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs,
237 unsigned long frame_pointer)
239 struct ftrace_graph_ret trace;
242 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
243 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
244 trace.retval = fgraph_ret_regs_return_value(ret_regs);
246 trace.rettime = trace_clock_local();
247 ftrace_graph_return(&trace);
249 * The ftrace_graph_return() may still access the current
250 * ret_stack structure, we need to make sure the update of
251 * curr_ret_stack is after that.
254 current->curr_ret_stack--;
256 if (unlikely(!ret)) {
259 /* Might as well panic. What else to do? */
260 ret = (unsigned long)panic;
267 * After all architecures have selected HAVE_FUNCTION_GRAPH_RETVAL, we can
268 * leave only ftrace_return_to_handler(ret_regs).
270 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
271 unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs)
273 return __ftrace_return_to_handler(ret_regs,
274 fgraph_ret_regs_frame_pointer(ret_regs));
277 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
279 return __ftrace_return_to_handler(NULL, frame_pointer);
284 * ftrace_graph_get_ret_stack - return the entry of the shadow stack
285 * @task: The task to read the shadow stack from
286 * @idx: Index down the shadow stack
288 * Return the ret_struct on the shadow stack of the @task at the
289 * call graph at @idx starting with zero. If @idx is zero, it
290 * will return the last saved ret_stack entry. If it is greater than
291 * zero, it will return the corresponding ret_stack for the depth
292 * of saved return addresses.
294 struct ftrace_ret_stack *
295 ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
297 idx = task->curr_ret_stack - idx;
299 if (idx >= 0 && idx <= task->curr_ret_stack)
300 return &task->ret_stack[idx];
306 * ftrace_graph_ret_addr - convert a potentially modified stack return address
307 * to its original value
309 * This function can be called by stack unwinding code to convert a found stack
310 * return address ('ret') to its original value, in case the function graph
311 * tracer has modified it to be 'return_to_handler'. If the address hasn't
312 * been modified, the unchanged value of 'ret' is returned.
314 * 'idx' is a state variable which should be initialized by the caller to zero
315 * before the first call.
317 * 'retp' is a pointer to the return address on the stack. It's ignored if
318 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
320 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
321 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
322 unsigned long ret, unsigned long *retp)
324 int index = task->curr_ret_stack;
327 if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
333 for (i = 0; i <= index; i++)
334 if (task->ret_stack[i].retp == retp)
335 return task->ret_stack[i].ret;
339 #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
340 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
341 unsigned long ret, unsigned long *retp)
345 if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
348 task_idx = task->curr_ret_stack;
350 if (!task->ret_stack || task_idx < *idx)
356 return task->ret_stack[task_idx].ret;
358 #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
360 static struct ftrace_ops graph_ops = {
361 .func = ftrace_graph_func,
362 .flags = FTRACE_OPS_FL_INITIALIZED |
364 FTRACE_OPS_GRAPH_STUB,
365 #ifdef FTRACE_GRAPH_TRAMP_ADDR
366 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
367 /* trampoline_size is only needed for dynamically allocated tramps */
369 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
372 void ftrace_graph_sleep_time_control(bool enable)
374 fgraph_sleep_time = enable;
377 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
383 * Simply points to ftrace_stub, but with the proper protocol.
384 * Defined by the linker script in linux/vmlinux.lds.h
386 extern void ftrace_stub_graph(struct ftrace_graph_ret *);
388 /* The callbacks that hook a function */
389 trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
390 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
391 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
393 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
394 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
398 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
399 struct task_struct *g, *t;
401 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
403 kmalloc_array(FTRACE_RETFUNC_DEPTH,
404 sizeof(struct ftrace_ret_stack),
406 if (!ret_stack_list[i]) {
415 for_each_process_thread(g, t) {
421 if (t->ret_stack == NULL) {
422 atomic_set(&t->trace_overrun, 0);
423 t->curr_ret_stack = -1;
424 t->curr_ret_depth = -1;
425 /* Make sure the tasks see the -1 first: */
427 t->ret_stack = ret_stack_list[start++];
434 for (i = start; i < end; i++)
435 kfree(ret_stack_list[i]);
440 ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
441 struct task_struct *prev,
442 struct task_struct *next,
443 unsigned int prev_state)
445 unsigned long long timestamp;
449 * Does the user want to count the time a function was asleep.
450 * If so, do not update the time stamps.
452 if (fgraph_sleep_time)
455 timestamp = trace_clock_local();
457 prev->ftrace_timestamp = timestamp;
459 /* only process tasks that we timestamped */
460 if (!next->ftrace_timestamp)
464 * Update all the counters in next to make up for the
465 * time next was sleeping.
467 timestamp -= next->ftrace_timestamp;
469 for (index = next->curr_ret_stack; index >= 0; index--)
470 next->ret_stack[index].calltime += timestamp;
473 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
475 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
477 return __ftrace_graph_entry(trace);
481 * The function graph tracer should only trace the functions defined
482 * by set_ftrace_filter and set_ftrace_notrace. If another function
483 * tracer ops is registered, the graph tracer requires testing the
484 * function against the global ops, and not just trace any function
485 * that any ftrace_ops registered.
487 void update_function_graph_func(void)
489 struct ftrace_ops *op;
490 bool do_test = false;
493 * The graph and global ops share the same set of functions
494 * to test. If any other ops is on the list, then
495 * the graph tracing needs to test if its the function
498 do_for_each_ftrace_op(op, ftrace_ops_list) {
499 if (op != &global_ops && op != &graph_ops &&
500 op != &ftrace_list_end) {
502 /* in double loop, break out with goto */
505 } while_for_each_ftrace_op(op);
508 ftrace_graph_entry = ftrace_graph_entry_test;
510 ftrace_graph_entry = __ftrace_graph_entry;
513 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
516 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
518 atomic_set(&t->trace_overrun, 0);
519 t->ftrace_timestamp = 0;
520 /* make curr_ret_stack visible before we add the ret_stack */
522 t->ret_stack = ret_stack;
526 * Allocate a return stack for the idle task. May be the first
527 * time through, or it may be done by CPU hotplug online.
529 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
531 t->curr_ret_stack = -1;
532 t->curr_ret_depth = -1;
534 * The idle task has no parent, it either has its own
535 * stack or no stack at all.
538 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
540 if (ftrace_graph_active) {
541 struct ftrace_ret_stack *ret_stack;
543 ret_stack = per_cpu(idle_ret_stack, cpu);
546 kmalloc_array(FTRACE_RETFUNC_DEPTH,
547 sizeof(struct ftrace_ret_stack),
551 per_cpu(idle_ret_stack, cpu) = ret_stack;
553 graph_init_task(t, ret_stack);
557 /* Allocate a return stack for newly created task */
558 void ftrace_graph_init_task(struct task_struct *t)
560 /* Make sure we do not use the parent ret_stack */
562 t->curr_ret_stack = -1;
563 t->curr_ret_depth = -1;
565 if (ftrace_graph_active) {
566 struct ftrace_ret_stack *ret_stack;
568 ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
569 sizeof(struct ftrace_ret_stack),
573 graph_init_task(t, ret_stack);
577 void ftrace_graph_exit_task(struct task_struct *t)
579 struct ftrace_ret_stack *ret_stack = t->ret_stack;
582 /* NULL must become visible to IRQs before we free it: */
588 /* Allocate a return stack for each task */
589 static int start_graph_tracing(void)
591 struct ftrace_ret_stack **ret_stack_list;
594 ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
595 sizeof(struct ftrace_ret_stack *),
601 /* The cpu_boot init_task->ret_stack will never be freed */
602 for_each_online_cpu(cpu) {
603 if (!idle_task(cpu)->ret_stack)
604 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
608 ret = alloc_retstack_tasklist(ret_stack_list);
609 } while (ret == -EAGAIN);
612 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
614 pr_info("ftrace_graph: Couldn't activate tracepoint"
615 " probe to kernel_sched_switch\n");
618 kfree(ret_stack_list);
622 int register_ftrace_graph(struct fgraph_ops *gops)
626 mutex_lock(&ftrace_lock);
628 /* we currently allow only one tracer registered at a time */
629 if (ftrace_graph_active) {
634 register_pm_notifier(&ftrace_suspend_notifier);
636 ftrace_graph_active++;
637 ret = start_graph_tracing();
639 ftrace_graph_active--;
643 ftrace_graph_return = gops->retfunc;
646 * Update the indirect function to the entryfunc, and the
647 * function that gets called to the entry_test first. Then
648 * call the update fgraph entry function to determine if
649 * the entryfunc should be called directly or not.
651 __ftrace_graph_entry = gops->entryfunc;
652 ftrace_graph_entry = ftrace_graph_entry_test;
653 update_function_graph_func();
655 ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
657 mutex_unlock(&ftrace_lock);
661 void unregister_ftrace_graph(struct fgraph_ops *gops)
663 mutex_lock(&ftrace_lock);
665 if (unlikely(!ftrace_graph_active))
668 ftrace_graph_active--;
669 ftrace_graph_return = ftrace_stub_graph;
670 ftrace_graph_entry = ftrace_graph_entry_stub;
671 __ftrace_graph_entry = ftrace_graph_entry_stub;
672 ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
673 unregister_pm_notifier(&ftrace_suspend_notifier);
674 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
677 mutex_unlock(&ftrace_lock);