]>
Commit | Line | Data |
---|---|---|
d864a3ca SRV |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Infrastructure to took into function calls and returns. | |
4 | * Copyright (c) 2008-2009 Frederic Weisbecker <[email protected]> | |
5 | * Mostly borrowed from function tracer which | |
6 | * is Copyright (c) Steven Rostedt <[email protected]> | |
7 | * | |
8 | * Highly modified by Steven Rostedt (VMware). | |
9 | */ | |
e73e679f | 10 | #include <linux/suspend.h> |
d864a3ca | 11 | #include <linux/ftrace.h> |
e73e679f | 12 | #include <linux/slab.h> |
d864a3ca | 13 | |
e73e679f SRV |
14 | #include <trace/events/sched.h> |
15 | ||
16 | #include "ftrace_internal.h" | |
17 | ||
18 | #ifdef CONFIG_DYNAMIC_FTRACE | |
19 | #define ASSIGN_OPS_HASH(opsname, val) \ | |
20 | .func_hash = val, \ | |
21 | .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), | |
22 | #else | |
23 | #define ASSIGN_OPS_HASH(opsname, val) | |
24 | #endif | |
d864a3ca SRV |
25 | |
26 | static bool kill_ftrace_graph; | |
e73e679f SRV |
27 | int ftrace_graph_active; |
28 | ||
29 | /* Both enabled by default (can be cleared by function_graph tracer flags */ | |
30 | static bool fgraph_sleep_time = true; | |
d864a3ca SRV |
31 | |
32 | /** | |
33 | * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called | |
34 | * | |
35 | * ftrace_graph_stop() is called when a severe error is detected in | |
36 | * the function graph tracing. This function is called by the critical | |
37 | * paths of function graph to keep those paths from doing any more harm. | |
38 | */ | |
39 | bool ftrace_graph_is_dead(void) | |
40 | { | |
41 | return kill_ftrace_graph; | |
42 | } | |
43 | ||
44 | /** | |
45 | * ftrace_graph_stop - set to permanently disable function graph tracincg | |
46 | * | |
47 | * In case of an error int function graph tracing, this is called | |
48 | * to try to keep function graph tracing from causing any more harm. | |
49 | * Usually this is pretty severe and this is called to try to at least | |
50 | * get a warning out to the user. | |
51 | */ | |
52 | void ftrace_graph_stop(void) | |
53 | { | |
54 | kill_ftrace_graph = true; | |
55 | } | |
56 | ||
57 | /* Add a function return address to the trace stack on thread info.*/ | |
58 | static int | |
59 | ftrace_push_return_trace(unsigned long ret, unsigned long func, | |
60 | unsigned long frame_pointer, unsigned long *retp) | |
61 | { | |
62 | unsigned long long calltime; | |
63 | int index; | |
64 | ||
65 | if (unlikely(ftrace_graph_is_dead())) | |
66 | return -EBUSY; | |
67 | ||
68 | if (!current->ret_stack) | |
69 | return -EBUSY; | |
70 | ||
71 | /* | |
72 | * We must make sure the ret_stack is tested before we read | |
73 | * anything else. | |
74 | */ | |
75 | smp_rmb(); | |
76 | ||
77 | /* The return trace stack is full */ | |
78 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | |
79 | atomic_inc(¤t->trace_overrun); | |
80 | return -EBUSY; | |
81 | } | |
82 | ||
d864a3ca SRV |
83 | calltime = trace_clock_local(); |
84 | ||
85 | index = ++current->curr_ret_stack; | |
d864a3ca SRV |
86 | barrier(); |
87 | current->ret_stack[index].ret = ret; | |
88 | current->ret_stack[index].func = func; | |
89 | current->ret_stack[index].calltime = calltime; | |
90 | #ifdef HAVE_FUNCTION_GRAPH_FP_TEST | |
91 | current->ret_stack[index].fp = frame_pointer; | |
92 | #endif | |
93 | #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR | |
94 | current->ret_stack[index].retp = retp; | |
95 | #endif | |
96 | return 0; | |
97 | } | |
98 | ||
d2ccbccb SRV |
99 | /* |
100 | * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct | |
101 | * functions. But those archs currently don't support direct functions | |
102 | * anyway, and ftrace_find_rec_direct() is just a stub for them. | |
103 | * Define MCOUNT_INSN_SIZE to keep those archs compiling. | |
104 | */ | |
105 | #ifndef MCOUNT_INSN_SIZE | |
106 | /* Make sure this only works without direct calls */ | |
107 | # ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS | |
108 | # error MCOUNT_INSN_SIZE not defined with direct calls enabled | |
109 | # endif | |
110 | # define MCOUNT_INSN_SIZE 0 | |
111 | #endif | |
112 | ||
d864a3ca SRV |
113 | int function_graph_enter(unsigned long ret, unsigned long func, |
114 | unsigned long frame_pointer, unsigned long *retp) | |
115 | { | |
116 | struct ftrace_graph_ent trace; | |
117 | ||
ff205766 AS |
118 | /* |
119 | * Skip graph tracing if the return location is served by direct trampoline, | |
120 | * since call sequence and return addresses is unpredicatable anymore. | |
121 | * Ex: BPF trampoline may call original function and may skip frame | |
122 | * depending on type of BPF programs attached. | |
123 | */ | |
124 | if (ftrace_direct_func_count && | |
125 | ftrace_find_rec_direct(ret - MCOUNT_INSN_SIZE)) | |
126 | return -EBUSY; | |
d864a3ca SRV |
127 | trace.func = func; |
128 | trace.depth = ++current->curr_ret_depth; | |
129 | ||
130 | if (ftrace_push_return_trace(ret, func, frame_pointer, retp)) | |
131 | goto out; | |
132 | ||
133 | /* Only trace if the calling function expects to */ | |
134 | if (!ftrace_graph_entry(&trace)) | |
135 | goto out_ret; | |
136 | ||
137 | return 0; | |
138 | out_ret: | |
139 | current->curr_ret_stack--; | |
140 | out: | |
141 | current->curr_ret_depth--; | |
142 | return -EBUSY; | |
143 | } | |
144 | ||
145 | /* Retrieve a function return address to the trace stack on thread info.*/ | |
146 | static void | |
147 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, | |
148 | unsigned long frame_pointer) | |
149 | { | |
150 | int index; | |
151 | ||
152 | index = current->curr_ret_stack; | |
153 | ||
d864a3ca SRV |
154 | if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) { |
155 | ftrace_graph_stop(); | |
156 | WARN_ON(1); | |
157 | /* Might as well panic, otherwise we have no where to go */ | |
158 | *ret = (unsigned long)panic; | |
159 | return; | |
160 | } | |
161 | ||
162 | #ifdef HAVE_FUNCTION_GRAPH_FP_TEST | |
163 | /* | |
164 | * The arch may choose to record the frame pointer used | |
165 | * and check it here to make sure that it is what we expect it | |
166 | * to be. If gcc does not set the place holder of the return | |
167 | * address in the frame pointer, and does a copy instead, then | |
168 | * the function graph trace will fail. This test detects this | |
169 | * case. | |
170 | * | |
171 | * Currently, x86_32 with optimize for size (-Os) makes the latest | |
172 | * gcc do the above. | |
173 | * | |
174 | * Note, -mfentry does not use frame pointers, and this test | |
175 | * is not needed if CC_USING_FENTRY is set. | |
176 | */ | |
177 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { | |
178 | ftrace_graph_stop(); | |
179 | WARN(1, "Bad frame pointer: expected %lx, received %lx\n" | |
180 | " from func %ps return to %lx\n", | |
181 | current->ret_stack[index].fp, | |
182 | frame_pointer, | |
183 | (void *)current->ret_stack[index].func, | |
184 | current->ret_stack[index].ret); | |
185 | *ret = (unsigned long)panic; | |
186 | return; | |
187 | } | |
188 | #endif | |
189 | ||
190 | *ret = current->ret_stack[index].ret; | |
191 | trace->func = current->ret_stack[index].func; | |
192 | trace->calltime = current->ret_stack[index].calltime; | |
193 | trace->overrun = atomic_read(¤t->trace_overrun); | |
194 | trace->depth = current->curr_ret_depth--; | |
195 | /* | |
196 | * We still want to trace interrupts coming in if | |
197 | * max_depth is set to 1. Make sure the decrement is | |
198 | * seen before ftrace_graph_return. | |
199 | */ | |
200 | barrier(); | |
201 | } | |
202 | ||
e73e679f SRV |
203 | /* |
204 | * Hibernation protection. | |
205 | * The state of the current task is too much unstable during | |
206 | * suspend/restore to disk. We want to protect against that. | |
207 | */ | |
208 | static int | |
209 | ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, | |
210 | void *unused) | |
211 | { | |
212 | switch (state) { | |
213 | case PM_HIBERNATION_PREPARE: | |
214 | pause_graph_tracing(); | |
215 | break; | |
216 | ||
217 | case PM_POST_HIBERNATION: | |
218 | unpause_graph_tracing(); | |
219 | break; | |
220 | } | |
221 | return NOTIFY_DONE; | |
222 | } | |
223 | ||
224 | static struct notifier_block ftrace_suspend_notifier = { | |
225 | .notifier_call = ftrace_suspend_notifier_call, | |
226 | }; | |
227 | ||
d864a3ca SRV |
228 | /* |
229 | * Send the trace to the ring-buffer. | |
230 | * @return the original return address. | |
231 | */ | |
232 | unsigned long ftrace_return_to_handler(unsigned long frame_pointer) | |
233 | { | |
234 | struct ftrace_graph_ret trace; | |
235 | unsigned long ret; | |
236 | ||
237 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); | |
238 | trace.rettime = trace_clock_local(); | |
239 | ftrace_graph_return(&trace); | |
240 | /* | |
241 | * The ftrace_graph_return() may still access the current | |
242 | * ret_stack structure, we need to make sure the update of | |
243 | * curr_ret_stack is after that. | |
244 | */ | |
245 | barrier(); | |
246 | current->curr_ret_stack--; | |
d864a3ca SRV |
247 | |
248 | if (unlikely(!ret)) { | |
249 | ftrace_graph_stop(); | |
250 | WARN_ON(1); | |
251 | /* Might as well panic. What else to do? */ | |
252 | ret = (unsigned long)panic; | |
253 | } | |
254 | ||
255 | return ret; | |
256 | } | |
e73e679f | 257 | |
45fe439b SRV |
258 | /** |
259 | * ftrace_graph_get_ret_stack - return the entry of the shadow stack | |
260 | * @task: The task to read the shadow stack from | |
261 | * @idx: Index down the shadow stack | |
262 | * | |
263 | * Return the ret_struct on the shadow stack of the @task at the | |
264 | * call graph at @idx starting with zero. If @idx is zero, it | |
265 | * will return the last saved ret_stack entry. If it is greater than | |
266 | * zero, it will return the corresponding ret_stack for the depth | |
267 | * of saved return addresses. | |
268 | */ | |
b0e21a61 SRV |
269 | struct ftrace_ret_stack * |
270 | ftrace_graph_get_ret_stack(struct task_struct *task, int idx) | |
271 | { | |
e8d086dd | 272 | idx = task->curr_ret_stack - idx; |
b0e21a61 SRV |
273 | |
274 | if (idx >= 0 && idx <= task->curr_ret_stack) | |
e8d086dd | 275 | return &task->ret_stack[idx]; |
b0e21a61 SRV |
276 | |
277 | return NULL; | |
278 | } | |
279 | ||
76b42b63 SRV |
280 | /** |
281 | * ftrace_graph_ret_addr - convert a potentially modified stack return address | |
282 | * to its original value | |
283 | * | |
284 | * This function can be called by stack unwinding code to convert a found stack | |
285 | * return address ('ret') to its original value, in case the function graph | |
286 | * tracer has modified it to be 'return_to_handler'. If the address hasn't | |
287 | * been modified, the unchanged value of 'ret' is returned. | |
288 | * | |
289 | * 'idx' is a state variable which should be initialized by the caller to zero | |
290 | * before the first call. | |
291 | * | |
292 | * 'retp' is a pointer to the return address on the stack. It's ignored if | |
293 | * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined. | |
294 | */ | |
295 | #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR | |
296 | unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, | |
297 | unsigned long ret, unsigned long *retp) | |
298 | { | |
299 | int index = task->curr_ret_stack; | |
300 | int i; | |
301 | ||
a3db31ff | 302 | if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler)) |
76b42b63 SRV |
303 | return ret; |
304 | ||
305 | if (index < 0) | |
306 | return ret; | |
307 | ||
308 | for (i = 0; i <= index; i++) | |
309 | if (task->ret_stack[i].retp == retp) | |
310 | return task->ret_stack[i].ret; | |
311 | ||
312 | return ret; | |
313 | } | |
314 | #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */ | |
315 | unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, | |
316 | unsigned long ret, unsigned long *retp) | |
317 | { | |
318 | int task_idx; | |
319 | ||
a3db31ff | 320 | if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler)) |
76b42b63 SRV |
321 | return ret; |
322 | ||
323 | task_idx = task->curr_ret_stack; | |
324 | ||
325 | if (!task->ret_stack || task_idx < *idx) | |
326 | return ret; | |
327 | ||
328 | task_idx -= *idx; | |
329 | (*idx)++; | |
330 | ||
331 | return task->ret_stack[task_idx].ret; | |
332 | } | |
333 | #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */ | |
334 | ||
e73e679f SRV |
335 | static struct ftrace_ops graph_ops = { |
336 | .func = ftrace_stub, | |
337 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | | |
338 | FTRACE_OPS_FL_INITIALIZED | | |
339 | FTRACE_OPS_FL_PID | | |
340 | FTRACE_OPS_FL_STUB, | |
341 | #ifdef FTRACE_GRAPH_TRAMP_ADDR | |
342 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, | |
343 | /* trampoline_size is only needed for dynamically allocated tramps */ | |
344 | #endif | |
345 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) | |
346 | }; | |
347 | ||
348 | void ftrace_graph_sleep_time_control(bool enable) | |
349 | { | |
350 | fgraph_sleep_time = enable; | |
351 | } | |
352 | ||
353 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | |
354 | { | |
355 | return 0; | |
356 | } | |
357 | ||
b83b43ff SRV |
358 | /* |
359 | * Simply points to ftrace_stub, but with the proper protocol. | |
360 | * Defined by the linker script in linux/vmlinux.lds.h | |
361 | */ | |
46f94692 | 362 | extern void ftrace_stub_graph(struct ftrace_graph_ret *); |
b83b43ff | 363 | |
e73e679f | 364 | /* The callbacks that hook a function */ |
46f94692 | 365 | trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph; |
e73e679f SRV |
366 | trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; |
367 | static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub; | |
368 | ||
369 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ | |
370 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) | |
371 | { | |
372 | int i; | |
373 | int ret = 0; | |
374 | int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; | |
375 | struct task_struct *g, *t; | |
376 | ||
377 | for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { | |
378 | ret_stack_list[i] = | |
379 | kmalloc_array(FTRACE_RETFUNC_DEPTH, | |
380 | sizeof(struct ftrace_ret_stack), | |
381 | GFP_KERNEL); | |
382 | if (!ret_stack_list[i]) { | |
383 | start = 0; | |
384 | end = i; | |
385 | ret = -ENOMEM; | |
386 | goto free; | |
387 | } | |
388 | } | |
389 | ||
390 | read_lock(&tasklist_lock); | |
391 | do_each_thread(g, t) { | |
392 | if (start == end) { | |
393 | ret = -EAGAIN; | |
394 | goto unlock; | |
395 | } | |
396 | ||
397 | if (t->ret_stack == NULL) { | |
398 | atomic_set(&t->tracing_graph_pause, 0); | |
399 | atomic_set(&t->trace_overrun, 0); | |
400 | t->curr_ret_stack = -1; | |
401 | t->curr_ret_depth = -1; | |
402 | /* Make sure the tasks see the -1 first: */ | |
403 | smp_wmb(); | |
404 | t->ret_stack = ret_stack_list[start++]; | |
405 | } | |
406 | } while_each_thread(g, t); | |
407 | ||
408 | unlock: | |
409 | read_unlock(&tasklist_lock); | |
410 | free: | |
411 | for (i = start; i < end; i++) | |
412 | kfree(ret_stack_list[i]); | |
413 | return ret; | |
414 | } | |
415 | ||
416 | static void | |
417 | ftrace_graph_probe_sched_switch(void *ignore, bool preempt, | |
418 | struct task_struct *prev, struct task_struct *next) | |
419 | { | |
420 | unsigned long long timestamp; | |
421 | int index; | |
422 | ||
423 | /* | |
424 | * Does the user want to count the time a function was asleep. | |
425 | * If so, do not update the time stamps. | |
426 | */ | |
427 | if (fgraph_sleep_time) | |
428 | return; | |
429 | ||
430 | timestamp = trace_clock_local(); | |
431 | ||
432 | prev->ftrace_timestamp = timestamp; | |
433 | ||
434 | /* only process tasks that we timestamped */ | |
435 | if (!next->ftrace_timestamp) | |
436 | return; | |
437 | ||
438 | /* | |
439 | * Update all the counters in next to make up for the | |
440 | * time next was sleeping. | |
441 | */ | |
442 | timestamp -= next->ftrace_timestamp; | |
443 | ||
444 | for (index = next->curr_ret_stack; index >= 0; index--) | |
445 | next->ret_stack[index].calltime += timestamp; | |
446 | } | |
447 | ||
448 | static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) | |
449 | { | |
450 | if (!ftrace_ops_test(&global_ops, trace->func, NULL)) | |
451 | return 0; | |
452 | return __ftrace_graph_entry(trace); | |
453 | } | |
454 | ||
455 | /* | |
456 | * The function graph tracer should only trace the functions defined | |
457 | * by set_ftrace_filter and set_ftrace_notrace. If another function | |
458 | * tracer ops is registered, the graph tracer requires testing the | |
459 | * function against the global ops, and not just trace any function | |
460 | * that any ftrace_ops registered. | |
461 | */ | |
462 | void update_function_graph_func(void) | |
463 | { | |
464 | struct ftrace_ops *op; | |
465 | bool do_test = false; | |
466 | ||
467 | /* | |
468 | * The graph and global ops share the same set of functions | |
469 | * to test. If any other ops is on the list, then | |
470 | * the graph tracing needs to test if its the function | |
471 | * it should call. | |
472 | */ | |
473 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
474 | if (op != &global_ops && op != &graph_ops && | |
475 | op != &ftrace_list_end) { | |
476 | do_test = true; | |
477 | /* in double loop, break out with goto */ | |
478 | goto out; | |
479 | } | |
480 | } while_for_each_ftrace_op(op); | |
481 | out: | |
482 | if (do_test) | |
483 | ftrace_graph_entry = ftrace_graph_entry_test; | |
484 | else | |
485 | ftrace_graph_entry = __ftrace_graph_entry; | |
486 | } | |
487 | ||
488 | static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); | |
489 | ||
490 | static void | |
491 | graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) | |
492 | { | |
493 | atomic_set(&t->tracing_graph_pause, 0); | |
494 | atomic_set(&t->trace_overrun, 0); | |
495 | t->ftrace_timestamp = 0; | |
496 | /* make curr_ret_stack visible before we add the ret_stack */ | |
497 | smp_wmb(); | |
498 | t->ret_stack = ret_stack; | |
499 | } | |
500 | ||
501 | /* | |
502 | * Allocate a return stack for the idle task. May be the first | |
503 | * time through, or it may be done by CPU hotplug online. | |
504 | */ | |
505 | void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) | |
506 | { | |
507 | t->curr_ret_stack = -1; | |
508 | t->curr_ret_depth = -1; | |
509 | /* | |
510 | * The idle task has no parent, it either has its own | |
511 | * stack or no stack at all. | |
512 | */ | |
513 | if (t->ret_stack) | |
514 | WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); | |
515 | ||
516 | if (ftrace_graph_active) { | |
517 | struct ftrace_ret_stack *ret_stack; | |
518 | ||
519 | ret_stack = per_cpu(idle_ret_stack, cpu); | |
520 | if (!ret_stack) { | |
521 | ret_stack = | |
522 | kmalloc_array(FTRACE_RETFUNC_DEPTH, | |
523 | sizeof(struct ftrace_ret_stack), | |
524 | GFP_KERNEL); | |
525 | if (!ret_stack) | |
526 | return; | |
527 | per_cpu(idle_ret_stack, cpu) = ret_stack; | |
528 | } | |
529 | graph_init_task(t, ret_stack); | |
530 | } | |
531 | } | |
532 | ||
533 | /* Allocate a return stack for newly created task */ | |
534 | void ftrace_graph_init_task(struct task_struct *t) | |
535 | { | |
536 | /* Make sure we do not use the parent ret_stack */ | |
537 | t->ret_stack = NULL; | |
538 | t->curr_ret_stack = -1; | |
539 | t->curr_ret_depth = -1; | |
540 | ||
541 | if (ftrace_graph_active) { | |
542 | struct ftrace_ret_stack *ret_stack; | |
543 | ||
544 | ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH, | |
545 | sizeof(struct ftrace_ret_stack), | |
546 | GFP_KERNEL); | |
547 | if (!ret_stack) | |
548 | return; | |
549 | graph_init_task(t, ret_stack); | |
550 | } | |
551 | } | |
552 | ||
553 | void ftrace_graph_exit_task(struct task_struct *t) | |
554 | { | |
555 | struct ftrace_ret_stack *ret_stack = t->ret_stack; | |
556 | ||
557 | t->ret_stack = NULL; | |
558 | /* NULL must become visible to IRQs before we free it: */ | |
559 | barrier(); | |
560 | ||
561 | kfree(ret_stack); | |
562 | } | |
563 | ||
564 | /* Allocate a return stack for each task */ | |
565 | static int start_graph_tracing(void) | |
566 | { | |
567 | struct ftrace_ret_stack **ret_stack_list; | |
568 | int ret, cpu; | |
569 | ||
570 | ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE, | |
571 | sizeof(struct ftrace_ret_stack *), | |
572 | GFP_KERNEL); | |
573 | ||
574 | if (!ret_stack_list) | |
575 | return -ENOMEM; | |
576 | ||
577 | /* The cpu_boot init_task->ret_stack will never be freed */ | |
578 | for_each_online_cpu(cpu) { | |
579 | if (!idle_task(cpu)->ret_stack) | |
580 | ftrace_graph_init_idle_task(idle_task(cpu), cpu); | |
581 | } | |
582 | ||
583 | do { | |
584 | ret = alloc_retstack_tasklist(ret_stack_list); | |
585 | } while (ret == -EAGAIN); | |
586 | ||
587 | if (!ret) { | |
588 | ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | |
589 | if (ret) | |
590 | pr_info("ftrace_graph: Couldn't activate tracepoint" | |
591 | " probe to kernel_sched_switch\n"); | |
592 | } | |
593 | ||
594 | kfree(ret_stack_list); | |
595 | return ret; | |
596 | } | |
597 | ||
688f7089 | 598 | int register_ftrace_graph(struct fgraph_ops *gops) |
e73e679f SRV |
599 | { |
600 | int ret = 0; | |
601 | ||
602 | mutex_lock(&ftrace_lock); | |
603 | ||
604 | /* we currently allow only one tracer registered at a time */ | |
605 | if (ftrace_graph_active) { | |
606 | ret = -EBUSY; | |
607 | goto out; | |
608 | } | |
609 | ||
610 | register_pm_notifier(&ftrace_suspend_notifier); | |
611 | ||
612 | ftrace_graph_active++; | |
613 | ret = start_graph_tracing(); | |
614 | if (ret) { | |
615 | ftrace_graph_active--; | |
616 | goto out; | |
617 | } | |
618 | ||
688f7089 | 619 | ftrace_graph_return = gops->retfunc; |
e73e679f SRV |
620 | |
621 | /* | |
622 | * Update the indirect function to the entryfunc, and the | |
623 | * function that gets called to the entry_test first. Then | |
624 | * call the update fgraph entry function to determine if | |
625 | * the entryfunc should be called directly or not. | |
626 | */ | |
688f7089 | 627 | __ftrace_graph_entry = gops->entryfunc; |
e73e679f SRV |
628 | ftrace_graph_entry = ftrace_graph_entry_test; |
629 | update_function_graph_func(); | |
630 | ||
631 | ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); | |
632 | out: | |
633 | mutex_unlock(&ftrace_lock); | |
634 | return ret; | |
635 | } | |
636 | ||
688f7089 | 637 | void unregister_ftrace_graph(struct fgraph_ops *gops) |
e73e679f SRV |
638 | { |
639 | mutex_lock(&ftrace_lock); | |
640 | ||
641 | if (unlikely(!ftrace_graph_active)) | |
642 | goto out; | |
643 | ||
644 | ftrace_graph_active--; | |
46f94692 | 645 | ftrace_graph_return = ftrace_stub_graph; |
e73e679f SRV |
646 | ftrace_graph_entry = ftrace_graph_entry_stub; |
647 | __ftrace_graph_entry = ftrace_graph_entry_stub; | |
648 | ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET); | |
649 | unregister_pm_notifier(&ftrace_suspend_notifier); | |
650 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | |
651 | ||
652 | out: | |
653 | mutex_unlock(&ftrace_lock); | |
654 | } |