]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
fb52607a FW |
2 | /* |
3 | * | |
4 | * Function graph tracer. | |
9005f3eb | 5 | * Copyright (c) 2008-2009 Frederic Weisbecker <[email protected]> |
fb52607a FW |
6 | * Mostly borrowed from function tracer which |
7 | * is Copyright (c) Steven Rostedt <[email protected]> | |
8 | * | |
9 | */ | |
fb52607a FW |
10 | #include <linux/uaccess.h> |
11 | #include <linux/ftrace.h> | |
be7635e7 | 12 | #include <linux/interrupt.h> |
5a0e3ad6 | 13 | #include <linux/slab.h> |
fb52607a FW |
14 | #include <linux/fs.h> |
15 | ||
16 | #include "trace.h" | |
f0868d1e | 17 | #include "trace_output.h" |
fb52607a | 18 | |
1b2f121c SRRH |
19 | static bool kill_ftrace_graph; |
20 | ||
21 | /** | |
22 | * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called | |
23 | * | |
24 | * ftrace_graph_stop() is called when a severe error is detected in | |
25 | * the function graph tracing. This function is called by the critical | |
26 | * paths of function graph to keep those paths from doing any more harm. | |
27 | */ | |
28 | bool ftrace_graph_is_dead(void) | |
29 | { | |
30 | return kill_ftrace_graph; | |
31 | } | |
32 | ||
33 | /** | |
34 | * ftrace_graph_stop - set to permanently disable function graph tracincg | |
35 | * | |
36 | * In case of an error int function graph tracing, this is called | |
37 | * to try to keep function graph tracing from causing any more harm. | |
38 | * Usually this is pretty severe and this is called to try to at least | |
39 | * get a warning out to the user. | |
40 | */ | |
41 | void ftrace_graph_stop(void) | |
42 | { | |
43 | kill_ftrace_graph = true; | |
1b2f121c SRRH |
44 | } |
45 | ||
b304d044 SR |
46 | /* When set, irq functions will be ignored */ |
47 | static int ftrace_graph_skip_irqs; | |
48 | ||
be1eca39 | 49 | struct fgraph_cpu_data { |
2fbcdb35 SR |
50 | pid_t last_pid; |
51 | int depth; | |
2bd16212 | 52 | int depth_irq; |
be1eca39 | 53 | int ignore; |
f1c7f517 | 54 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; |
be1eca39 JO |
55 | }; |
56 | ||
57 | struct fgraph_data { | |
6016ee13 | 58 | struct fgraph_cpu_data __percpu *cpu_data; |
be1eca39 JO |
59 | |
60 | /* Place to preserve last processed entry. */ | |
61 | struct ftrace_graph_ent_entry ent; | |
62 | struct ftrace_graph_ret_entry ret; | |
63 | int failed; | |
64 | int cpu; | |
2fbcdb35 SR |
65 | }; |
66 | ||
287b6e68 | 67 | #define TRACE_GRAPH_INDENT 2 |
fb52607a | 68 | |
1a414428 | 69 | unsigned int fgraph_max_depth; |
8741db53 | 70 | |
fb52607a | 71 | static struct tracer_opt trace_opts[] = { |
9005f3eb | 72 | /* Display overruns? (for self-debug purpose) */ |
1a056155 FW |
73 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, |
74 | /* Display CPU ? */ | |
75 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | |
76 | /* Display Overhead ? */ | |
77 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | |
11e84acc FW |
78 | /* Display proc name/pid */ |
79 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | |
9005f3eb FW |
80 | /* Display duration of execution */ |
81 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | |
82 | /* Display absolute time of an entry */ | |
83 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | |
2bd16212 JO |
84 | /* Display interrupts */ |
85 | { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, | |
607e3a29 RE |
86 | /* Display function name after trailing } */ |
87 | { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) }, | |
55577204 SRRH |
88 | /* Include sleep time (scheduled out) between entry and return */ |
89 | { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) }, | |
90 | /* Include time within nested functions */ | |
91 | { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) }, | |
fb52607a FW |
92 | { } /* Empty entry */ |
93 | }; | |
94 | ||
95 | static struct tracer_flags tracer_flags = { | |
607e3a29 | 96 | /* Don't display overruns, proc, or tail by default */ |
9005f3eb | 97 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
55577204 SRRH |
98 | TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS | |
99 | TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME, | |
fb52607a FW |
100 | .opts = trace_opts |
101 | }; | |
102 | ||
1a0799a8 | 103 | static struct trace_array *graph_array; |
9005f3eb | 104 | |
ffeb80fc JO |
105 | /* |
106 | * DURATION column is being also used to display IRQ signs, | |
107 | * following values are used by print_graph_irq and others | |
108 | * to fill in space into DURATION column. | |
109 | */ | |
110 | enum { | |
6fc84ea7 SRRH |
111 | FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT, |
112 | FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT, | |
113 | FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT, | |
ffeb80fc JO |
114 | }; |
115 | ||
9d9add34 | 116 | static void |
983f938a SRRH |
117 | print_graph_duration(struct trace_array *tr, unsigned long long duration, |
118 | struct trace_seq *s, u32 flags); | |
fb52607a | 119 | |
712406a6 | 120 | /* Add a function return address to the trace stack on thread info.*/ |
d125f3f8 | 121 | static int |
39eb456d | 122 | ftrace_push_return_trace(unsigned long ret, unsigned long func, |
9a7c348b | 123 | unsigned long frame_pointer, unsigned long *retp) |
712406a6 | 124 | { |
5d1a03dc | 125 | unsigned long long calltime; |
712406a6 SR |
126 | int index; |
127 | ||
1b2f121c SRRH |
128 | if (unlikely(ftrace_graph_is_dead())) |
129 | return -EBUSY; | |
130 | ||
712406a6 SR |
131 | if (!current->ret_stack) |
132 | return -EBUSY; | |
133 | ||
82310a32 SR |
134 | /* |
135 | * We must make sure the ret_stack is tested before we read | |
136 | * anything else. | |
137 | */ | |
138 | smp_rmb(); | |
139 | ||
712406a6 SR |
140 | /* The return trace stack is full */ |
141 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | |
142 | atomic_inc(¤t->trace_overrun); | |
143 | return -EBUSY; | |
144 | } | |
145 | ||
29ad23b0 NK |
146 | /* |
147 | * The curr_ret_stack is an index to ftrace return stack of | |
148 | * current task. Its value should be in [0, FTRACE_RETFUNC_ | |
149 | * DEPTH) when the function graph tracer is used. To support | |
150 | * filtering out specific functions, it makes the index | |
151 | * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH) | |
152 | * so when it sees a negative index the ftrace will ignore | |
153 | * the record. And the index gets recovered when returning | |
154 | * from the filtered function by adding the FTRACE_NOTRACE_ | |
155 | * DEPTH and then it'll continue to record functions normally. | |
156 | * | |
157 | * The curr_ret_stack is initialized to -1 and get increased | |
158 | * in this function. So it can be less than -1 only if it was | |
159 | * filtered out via ftrace_graph_notrace_addr() which can be | |
8434dc93 | 160 | * set from set_graph_notrace file in tracefs by user. |
29ad23b0 NK |
161 | */ |
162 | if (current->curr_ret_stack < -1) | |
163 | return -EBUSY; | |
164 | ||
5d1a03dc SR |
165 | calltime = trace_clock_local(); |
166 | ||
712406a6 | 167 | index = ++current->curr_ret_stack; |
29ad23b0 NK |
168 | if (ftrace_graph_notrace_addr(func)) |
169 | current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH; | |
712406a6 SR |
170 | barrier(); |
171 | current->ret_stack[index].ret = ret; | |
172 | current->ret_stack[index].func = func; | |
5d1a03dc | 173 | current->ret_stack[index].calltime = calltime; |
daa460a8 | 174 | #ifdef HAVE_FUNCTION_GRAPH_FP_TEST |
71e308a2 | 175 | current->ret_stack[index].fp = frame_pointer; |
9a7c348b JP |
176 | #endif |
177 | #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR | |
178 | current->ret_stack[index].retp = retp; | |
daa460a8 | 179 | #endif |
712406a6 SR |
180 | return 0; |
181 | } | |
182 | ||
8114865f SRV |
183 | int function_graph_enter(unsigned long ret, unsigned long func, |
184 | unsigned long frame_pointer, unsigned long *retp) | |
185 | { | |
186 | struct ftrace_graph_ent trace; | |
187 | ||
188 | trace.func = func; | |
39eb456d | 189 | trace.depth = ++current->curr_ret_depth; |
8114865f | 190 | |
39eb456d SRV |
191 | if (ftrace_push_return_trace(ret, func, |
192 | frame_pointer, retp)) | |
193 | goto out; | |
194 | ||
7c6ea35e SRV |
195 | /* Only trace if the calling function expects to */ |
196 | if (!ftrace_graph_entry(&trace)) | |
197 | goto out_ret; | |
198 | ||
39eb456d | 199 | return 0; |
7c6ea35e SRV |
200 | out_ret: |
201 | current->curr_ret_stack--; | |
39eb456d SRV |
202 | out: |
203 | current->curr_ret_depth--; | |
204 | return -EBUSY; | |
8114865f SRV |
205 | } |
206 | ||
712406a6 | 207 | /* Retrieve a function return address to the trace stack on thread info.*/ |
a2a16d6a | 208 | static void |
71e308a2 SR |
209 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, |
210 | unsigned long frame_pointer) | |
712406a6 SR |
211 | { |
212 | int index; | |
213 | ||
214 | index = current->curr_ret_stack; | |
215 | ||
29ad23b0 NK |
216 | /* |
217 | * A negative index here means that it's just returned from a | |
218 | * notrace'd function. Recover index to get an original | |
219 | * return address. See ftrace_push_return_trace(). | |
220 | * | |
221 | * TODO: Need to check whether the stack gets corrupted. | |
222 | */ | |
223 | if (index < 0) | |
224 | index += FTRACE_NOTRACE_DEPTH; | |
225 | ||
226 | if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) { | |
712406a6 SR |
227 | ftrace_graph_stop(); |
228 | WARN_ON(1); | |
229 | /* Might as well panic, otherwise we have no where to go */ | |
230 | *ret = (unsigned long)panic; | |
231 | return; | |
232 | } | |
233 | ||
e4a744ef | 234 | #ifdef HAVE_FUNCTION_GRAPH_FP_TEST |
71e308a2 SR |
235 | /* |
236 | * The arch may choose to record the frame pointer used | |
237 | * and check it here to make sure that it is what we expect it | |
238 | * to be. If gcc does not set the place holder of the return | |
239 | * address in the frame pointer, and does a copy instead, then | |
240 | * the function graph trace will fail. This test detects this | |
241 | * case. | |
242 | * | |
243 | * Currently, x86_32 with optimize for size (-Os) makes the latest | |
244 | * gcc do the above. | |
781d0624 SR |
245 | * |
246 | * Note, -mfentry does not use frame pointers, and this test | |
247 | * is not needed if CC_USING_FENTRY is set. | |
71e308a2 SR |
248 | */ |
249 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { | |
250 | ftrace_graph_stop(); | |
251 | WARN(1, "Bad frame pointer: expected %lx, received %lx\n" | |
b375a11a | 252 | " from func %ps return to %lx\n", |
71e308a2 SR |
253 | current->ret_stack[index].fp, |
254 | frame_pointer, | |
255 | (void *)current->ret_stack[index].func, | |
256 | current->ret_stack[index].ret); | |
257 | *ret = (unsigned long)panic; | |
258 | return; | |
259 | } | |
260 | #endif | |
261 | ||
712406a6 SR |
262 | *ret = current->ret_stack[index].ret; |
263 | trace->func = current->ret_stack[index].func; | |
264 | trace->calltime = current->ret_stack[index].calltime; | |
265 | trace->overrun = atomic_read(¤t->trace_overrun); | |
552701dd SRV |
266 | trace->depth = current->curr_ret_depth--; |
267 | /* | |
268 | * We still want to trace interrupts coming in if | |
269 | * max_depth is set to 1. Make sure the decrement is | |
270 | * seen before ftrace_graph_return. | |
271 | */ | |
272 | barrier(); | |
712406a6 SR |
273 | } |
274 | ||
275 | /* | |
276 | * Send the trace to the ring-buffer. | |
277 | * @return the original return address. | |
278 | */ | |
71e308a2 | 279 | unsigned long ftrace_return_to_handler(unsigned long frame_pointer) |
712406a6 SR |
280 | { |
281 | struct ftrace_graph_ret trace; | |
282 | unsigned long ret; | |
283 | ||
71e308a2 | 284 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); |
0012693a | 285 | trace.rettime = trace_clock_local(); |
552701dd SRV |
286 | ftrace_graph_return(&trace); |
287 | /* | |
288 | * The ftrace_graph_return() may still access the current | |
289 | * ret_stack structure, we need to make sure the update of | |
290 | * curr_ret_stack is after that. | |
291 | */ | |
a2a16d6a SR |
292 | barrier(); |
293 | current->curr_ret_stack--; | |
29ad23b0 NK |
294 | /* |
295 | * The curr_ret_stack can be less than -1 only if it was | |
296 | * filtered out and it's about to return from the function. | |
297 | * Recover the index and continue to trace normal functions. | |
298 | */ | |
299 | if (current->curr_ret_stack < -1) { | |
300 | current->curr_ret_stack += FTRACE_NOTRACE_DEPTH; | |
301 | return ret; | |
302 | } | |
712406a6 SR |
303 | |
304 | if (unlikely(!ret)) { | |
305 | ftrace_graph_stop(); | |
306 | WARN_ON(1); | |
307 | /* Might as well panic. What else to do? */ | |
308 | ret = (unsigned long)panic; | |
309 | } | |
310 | ||
311 | return ret; | |
312 | } | |
313 | ||
223918e3 JP |
314 | /** |
315 | * ftrace_graph_ret_addr - convert a potentially modified stack return address | |
316 | * to its original value | |
317 | * | |
318 | * This function can be called by stack unwinding code to convert a found stack | |
319 | * return address ('ret') to its original value, in case the function graph | |
320 | * tracer has modified it to be 'return_to_handler'. If the address hasn't | |
321 | * been modified, the unchanged value of 'ret' is returned. | |
322 | * | |
323 | * 'idx' is a state variable which should be initialized by the caller to zero | |
324 | * before the first call. | |
325 | * | |
326 | * 'retp' is a pointer to the return address on the stack. It's ignored if | |
327 | * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined. | |
328 | */ | |
329 | #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR | |
330 | unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, | |
331 | unsigned long ret, unsigned long *retp) | |
332 | { | |
333 | int index = task->curr_ret_stack; | |
334 | int i; | |
335 | ||
336 | if (ret != (unsigned long)return_to_handler) | |
337 | return ret; | |
338 | ||
339 | if (index < -1) | |
340 | index += FTRACE_NOTRACE_DEPTH; | |
341 | ||
342 | if (index < 0) | |
343 | return ret; | |
344 | ||
345 | for (i = 0; i <= index; i++) | |
346 | if (task->ret_stack[i].retp == retp) | |
347 | return task->ret_stack[i].ret; | |
348 | ||
349 | return ret; | |
350 | } | |
351 | #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */ | |
352 | unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, | |
353 | unsigned long ret, unsigned long *retp) | |
354 | { | |
355 | int task_idx; | |
356 | ||
357 | if (ret != (unsigned long)return_to_handler) | |
358 | return ret; | |
359 | ||
360 | task_idx = task->curr_ret_stack; | |
361 | ||
362 | if (!task->ret_stack || task_idx < *idx) | |
363 | return ret; | |
364 | ||
365 | task_idx -= *idx; | |
366 | (*idx)++; | |
367 | ||
368 | return task->ret_stack[task_idx].ret; | |
369 | } | |
370 | #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */ | |
371 | ||
62b915f1 | 372 | int __trace_graph_entry(struct trace_array *tr, |
1a0799a8 FW |
373 | struct ftrace_graph_ent *trace, |
374 | unsigned long flags, | |
375 | int pc) | |
376 | { | |
2425bcb9 | 377 | struct trace_event_call *call = &event_funcgraph_entry; |
1a0799a8 | 378 | struct ring_buffer_event *event; |
12883efb | 379 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
1a0799a8 FW |
380 | struct ftrace_graph_ent_entry *entry; |
381 | ||
e77405ad | 382 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
1a0799a8 FW |
383 | sizeof(*entry), flags, pc); |
384 | if (!event) | |
385 | return 0; | |
386 | entry = ring_buffer_event_data(event); | |
387 | entry->graph_ent = *trace; | |
f306cc82 | 388 | if (!call_filter_check_discard(call, entry, buffer, event)) |
52ffabe3 | 389 | trace_buffer_unlock_commit_nostack(buffer, event); |
1a0799a8 FW |
390 | |
391 | return 1; | |
392 | } | |
393 | ||
b304d044 SR |
394 | static inline int ftrace_graph_ignore_irqs(void) |
395 | { | |
e4a3f541 | 396 | if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT)) |
b304d044 SR |
397 | return 0; |
398 | ||
399 | return in_irq(); | |
400 | } | |
401 | ||
1a0799a8 FW |
402 | int trace_graph_entry(struct ftrace_graph_ent *trace) |
403 | { | |
404 | struct trace_array *tr = graph_array; | |
405 | struct trace_array_cpu *data; | |
406 | unsigned long flags; | |
407 | long disabled; | |
408 | int ret; | |
409 | int cpu; | |
410 | int pc; | |
411 | ||
345ddcc8 | 412 | if (!ftrace_trace_task(tr)) |
1a0799a8 FW |
413 | return 0; |
414 | ||
1a414428 SRRH |
415 | if (ftrace_graph_ignore_func(trace)) |
416 | return 0; | |
417 | ||
418 | if (ftrace_graph_ignore_irqs()) | |
1a0799a8 FW |
419 | return 0; |
420 | ||
29ad23b0 NK |
421 | /* |
422 | * Do not trace a function if it's filtered by set_graph_notrace. | |
423 | * Make the index of ret stack negative to indicate that it should | |
424 | * ignore further functions. But it needs its own ret stack entry | |
425 | * to recover the original index in order to continue tracing after | |
426 | * returning from the function. | |
427 | */ | |
428 | if (ftrace_graph_notrace_addr(trace->func)) | |
429 | return 1; | |
430 | ||
7fa8b717 JF |
431 | /* |
432 | * Stop here if tracing_threshold is set. We only write function return | |
433 | * events to the ring buffer. | |
434 | */ | |
435 | if (tracing_thresh) | |
436 | return 1; | |
437 | ||
1a0799a8 FW |
438 | local_irq_save(flags); |
439 | cpu = raw_smp_processor_id(); | |
12883efb | 440 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
1a0799a8 FW |
441 | disabled = atomic_inc_return(&data->disabled); |
442 | if (likely(disabled == 1)) { | |
443 | pc = preempt_count(); | |
444 | ret = __trace_graph_entry(tr, trace, flags, pc); | |
445 | } else { | |
446 | ret = 0; | |
447 | } | |
1a0799a8 FW |
448 | |
449 | atomic_dec(&data->disabled); | |
450 | local_irq_restore(flags); | |
451 | ||
452 | return ret; | |
453 | } | |
454 | ||
0a772620 JO |
455 | static void |
456 | __trace_graph_function(struct trace_array *tr, | |
457 | unsigned long ip, unsigned long flags, int pc) | |
458 | { | |
459 | u64 time = trace_clock_local(); | |
460 | struct ftrace_graph_ent ent = { | |
461 | .func = ip, | |
462 | .depth = 0, | |
463 | }; | |
464 | struct ftrace_graph_ret ret = { | |
465 | .func = ip, | |
466 | .depth = 0, | |
467 | .calltime = time, | |
468 | .rettime = time, | |
469 | }; | |
470 | ||
471 | __trace_graph_entry(tr, &ent, flags, pc); | |
472 | __trace_graph_return(tr, &ret, flags, pc); | |
473 | } | |
474 | ||
475 | void | |
476 | trace_graph_function(struct trace_array *tr, | |
477 | unsigned long ip, unsigned long parent_ip, | |
478 | unsigned long flags, int pc) | |
479 | { | |
0a772620 JO |
480 | __trace_graph_function(tr, ip, flags, pc); |
481 | } | |
482 | ||
62b915f1 | 483 | void __trace_graph_return(struct trace_array *tr, |
1a0799a8 FW |
484 | struct ftrace_graph_ret *trace, |
485 | unsigned long flags, | |
486 | int pc) | |
487 | { | |
2425bcb9 | 488 | struct trace_event_call *call = &event_funcgraph_exit; |
1a0799a8 | 489 | struct ring_buffer_event *event; |
12883efb | 490 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
1a0799a8 FW |
491 | struct ftrace_graph_ret_entry *entry; |
492 | ||
e77405ad | 493 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |
1a0799a8 FW |
494 | sizeof(*entry), flags, pc); |
495 | if (!event) | |
496 | return; | |
497 | entry = ring_buffer_event_data(event); | |
498 | entry->ret = *trace; | |
f306cc82 | 499 | if (!call_filter_check_discard(call, entry, buffer, event)) |
52ffabe3 | 500 | trace_buffer_unlock_commit_nostack(buffer, event); |
1a0799a8 FW |
501 | } |
502 | ||
503 | void trace_graph_return(struct ftrace_graph_ret *trace) | |
504 | { | |
505 | struct trace_array *tr = graph_array; | |
506 | struct trace_array_cpu *data; | |
507 | unsigned long flags; | |
508 | long disabled; | |
509 | int cpu; | |
510 | int pc; | |
511 | ||
5cf99a0f SRV |
512 | ftrace_graph_addr_finish(trace); |
513 | ||
1a0799a8 FW |
514 | local_irq_save(flags); |
515 | cpu = raw_smp_processor_id(); | |
12883efb | 516 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
1a0799a8 FW |
517 | disabled = atomic_inc_return(&data->disabled); |
518 | if (likely(disabled == 1)) { | |
519 | pc = preempt_count(); | |
520 | __trace_graph_return(tr, trace, flags, pc); | |
521 | } | |
1a0799a8 FW |
522 | atomic_dec(&data->disabled); |
523 | local_irq_restore(flags); | |
524 | } | |
525 | ||
24a53652 FW |
526 | void set_graph_array(struct trace_array *tr) |
527 | { | |
528 | graph_array = tr; | |
529 | ||
530 | /* Make graph_array visible before we start tracing */ | |
531 | ||
532 | smp_mb(); | |
533 | } | |
534 | ||
ba1afef6 | 535 | static void trace_graph_thresh_return(struct ftrace_graph_ret *trace) |
0e950173 | 536 | { |
5cf99a0f SRV |
537 | ftrace_graph_addr_finish(trace); |
538 | ||
0e950173 TB |
539 | if (tracing_thresh && |
540 | (trace->rettime - trace->calltime < tracing_thresh)) | |
541 | return; | |
542 | else | |
543 | trace_graph_return(trace); | |
544 | } | |
545 | ||
fb52607a FW |
546 | static int graph_trace_init(struct trace_array *tr) |
547 | { | |
1a0799a8 FW |
548 | int ret; |
549 | ||
24a53652 | 550 | set_graph_array(tr); |
0e950173 TB |
551 | if (tracing_thresh) |
552 | ret = register_ftrace_graph(&trace_graph_thresh_return, | |
7fa8b717 | 553 | &trace_graph_entry); |
0e950173 TB |
554 | else |
555 | ret = register_ftrace_graph(&trace_graph_return, | |
556 | &trace_graph_entry); | |
660c7f9b SR |
557 | if (ret) |
558 | return ret; | |
559 | tracing_start_cmdline_record(); | |
560 | ||
561 | return 0; | |
fb52607a FW |
562 | } |
563 | ||
564 | static void graph_trace_reset(struct trace_array *tr) | |
565 | { | |
660c7f9b SR |
566 | tracing_stop_cmdline_record(); |
567 | unregister_ftrace_graph(); | |
fb52607a FW |
568 | } |
569 | ||
ba1afef6 | 570 | static int graph_trace_update_thresh(struct trace_array *tr) |
6508fa76 SF |
571 | { |
572 | graph_trace_reset(tr); | |
573 | return graph_trace_init(tr); | |
574 | } | |
575 | ||
0c9e6f63 | 576 | static int max_bytes_for_cpu; |
1a056155 | 577 | |
9d9add34 | 578 | static void print_graph_cpu(struct trace_seq *s, int cpu) |
1a056155 | 579 | { |
d51090b3 IM |
580 | /* |
581 | * Start with a space character - to make it stand out | |
582 | * to the right a bit when trace output is pasted into | |
583 | * email: | |
584 | */ | |
9d9add34 | 585 | trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); |
1a056155 FW |
586 | } |
587 | ||
11e84acc FW |
588 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 |
589 | ||
9d9add34 | 590 | static void print_graph_proc(struct trace_seq *s, pid_t pid) |
11e84acc | 591 | { |
4ca53085 | 592 | char comm[TASK_COMM_LEN]; |
11e84acc FW |
593 | /* sign + log10(MAX_INT) + '\0' */ |
594 | char pid_str[11]; | |
4ca53085 | 595 | int spaces = 0; |
4ca53085 SR |
596 | int len; |
597 | int i; | |
11e84acc | 598 | |
4ca53085 | 599 | trace_find_cmdline(pid, comm); |
11e84acc FW |
600 | comm[7] = '\0'; |
601 | sprintf(pid_str, "%d", pid); | |
602 | ||
603 | /* 1 stands for the "-" character */ | |
604 | len = strlen(comm) + strlen(pid_str) + 1; | |
605 | ||
606 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) | |
607 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; | |
608 | ||
609 | /* First spaces to align center */ | |
9d9add34 SRRH |
610 | for (i = 0; i < spaces / 2; i++) |
611 | trace_seq_putc(s, ' '); | |
11e84acc | 612 | |
9d9add34 | 613 | trace_seq_printf(s, "%s-%s", comm, pid_str); |
11e84acc FW |
614 | |
615 | /* Last spaces to align center */ | |
9d9add34 SRRH |
616 | for (i = 0; i < spaces - (spaces / 2); i++) |
617 | trace_seq_putc(s, ' '); | |
11e84acc FW |
618 | } |
619 | ||
1a056155 | 620 | |
9d9add34 | 621 | static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) |
49ff5903 | 622 | { |
9d9add34 SRRH |
623 | trace_seq_putc(s, ' '); |
624 | trace_print_lat_fmt(s, entry); | |
49ff5903 SR |
625 | } |
626 | ||
287b6e68 | 627 | /* If the pid changed since the last trace, output this event */ |
9d9add34 | 628 | static void |
2fbcdb35 | 629 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) |
287b6e68 | 630 | { |
d51090b3 | 631 | pid_t prev_pid; |
9005f3eb | 632 | pid_t *last_pid; |
660c7f9b | 633 | |
2fbcdb35 | 634 | if (!data) |
9d9add34 | 635 | return; |
9005f3eb | 636 | |
be1eca39 | 637 | last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
9005f3eb FW |
638 | |
639 | if (*last_pid == pid) | |
9d9add34 | 640 | return; |
fb52607a | 641 | |
9005f3eb FW |
642 | prev_pid = *last_pid; |
643 | *last_pid = pid; | |
d51090b3 | 644 | |
9005f3eb | 645 | if (prev_pid == -1) |
9d9add34 | 646 | return; |
d51090b3 IM |
647 | /* |
648 | * Context-switch trace line: | |
649 | ||
650 | ------------------------------------------ | |
651 | | 1) migration/0--1 => sshd-1755 | |
652 | ------------------------------------------ | |
653 | ||
654 | */ | |
9d9add34 SRRH |
655 | trace_seq_puts(s, " ------------------------------------------\n"); |
656 | print_graph_cpu(s, cpu); | |
657 | print_graph_proc(s, prev_pid); | |
658 | trace_seq_puts(s, " => "); | |
659 | print_graph_proc(s, pid); | |
660 | trace_seq_puts(s, "\n ------------------------------------------\n\n"); | |
287b6e68 FW |
661 | } |
662 | ||
b91facc3 FW |
663 | static struct ftrace_graph_ret_entry * |
664 | get_return_for_leaf(struct trace_iterator *iter, | |
83a8df61 FW |
665 | struct ftrace_graph_ent_entry *curr) |
666 | { | |
be1eca39 JO |
667 | struct fgraph_data *data = iter->private; |
668 | struct ring_buffer_iter *ring_iter = NULL; | |
83a8df61 FW |
669 | struct ring_buffer_event *event; |
670 | struct ftrace_graph_ret_entry *next; | |
671 | ||
be1eca39 JO |
672 | /* |
673 | * If the previous output failed to write to the seq buffer, | |
674 | * then we just reuse the data from before. | |
675 | */ | |
676 | if (data && data->failed) { | |
677 | curr = &data->ent; | |
678 | next = &data->ret; | |
679 | } else { | |
83a8df61 | 680 | |
6d158a81 | 681 | ring_iter = trace_buffer_iter(iter, iter->cpu); |
be1eca39 JO |
682 | |
683 | /* First peek to compare current entry and the next one */ | |
684 | if (ring_iter) | |
685 | event = ring_buffer_iter_peek(ring_iter, NULL); | |
686 | else { | |
687 | /* | |
688 | * We need to consume the current entry to see | |
689 | * the next one. | |
690 | */ | |
12883efb | 691 | ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, |
66a8cb95 | 692 | NULL, NULL); |
12883efb | 693 | event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu, |
66a8cb95 | 694 | NULL, NULL); |
be1eca39 | 695 | } |
83a8df61 | 696 | |
be1eca39 JO |
697 | if (!event) |
698 | return NULL; | |
699 | ||
700 | next = ring_buffer_event_data(event); | |
83a8df61 | 701 | |
be1eca39 JO |
702 | if (data) { |
703 | /* | |
704 | * Save current and next entries for later reference | |
705 | * if the output fails. | |
706 | */ | |
707 | data->ent = *curr; | |
575570f0 SL |
708 | /* |
709 | * If the next event is not a return type, then | |
710 | * we only care about what type it is. Otherwise we can | |
711 | * safely copy the entire event. | |
712 | */ | |
713 | if (next->ent.type == TRACE_GRAPH_RET) | |
714 | data->ret = *next; | |
715 | else | |
716 | data->ret.ent.type = next->ent.type; | |
be1eca39 JO |
717 | } |
718 | } | |
83a8df61 FW |
719 | |
720 | if (next->ent.type != TRACE_GRAPH_RET) | |
b91facc3 | 721 | return NULL; |
83a8df61 FW |
722 | |
723 | if (curr->ent.pid != next->ent.pid || | |
724 | curr->graph_ent.func != next->ret.func) | |
b91facc3 | 725 | return NULL; |
83a8df61 | 726 | |
b91facc3 FW |
727 | /* this is a leaf, now advance the iterator */ |
728 | if (ring_iter) | |
729 | ring_buffer_read(ring_iter, NULL); | |
730 | ||
731 | return next; | |
83a8df61 FW |
732 | } |
733 | ||
9d9add34 | 734 | static void print_graph_abs_time(u64 t, struct trace_seq *s) |
d1f9cbd7 FW |
735 | { |
736 | unsigned long usecs_rem; | |
737 | ||
738 | usecs_rem = do_div(t, NSEC_PER_SEC); | |
739 | usecs_rem /= 1000; | |
740 | ||
9d9add34 SRRH |
741 | trace_seq_printf(s, "%5lu.%06lu | ", |
742 | (unsigned long)t, usecs_rem); | |
d1f9cbd7 FW |
743 | } |
744 | ||
9d9add34 | 745 | static void |
d1f9cbd7 | 746 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, |
d7a8d9e9 | 747 | enum trace_type type, int cpu, pid_t pid, u32 flags) |
f8b755ac | 748 | { |
983f938a | 749 | struct trace_array *tr = iter->tr; |
d1f9cbd7 | 750 | struct trace_seq *s = &iter->seq; |
678f845e | 751 | struct trace_entry *ent = iter->ent; |
f8b755ac FW |
752 | |
753 | if (addr < (unsigned long)__irqentry_text_start || | |
754 | addr >= (unsigned long)__irqentry_text_end) | |
9d9add34 | 755 | return; |
f8b755ac | 756 | |
983f938a | 757 | if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { |
749230b0 | 758 | /* Absolute time */ |
9d9add34 SRRH |
759 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
760 | print_graph_abs_time(iter->ts, s); | |
d1f9cbd7 | 761 | |
749230b0 | 762 | /* Cpu */ |
9d9add34 SRRH |
763 | if (flags & TRACE_GRAPH_PRINT_CPU) |
764 | print_graph_cpu(s, cpu); | |
49ff5903 | 765 | |
749230b0 JO |
766 | /* Proc */ |
767 | if (flags & TRACE_GRAPH_PRINT_PROC) { | |
9d9add34 SRRH |
768 | print_graph_proc(s, pid); |
769 | trace_seq_puts(s, " | "); | |
749230b0 | 770 | } |
678f845e DBO |
771 | |
772 | /* Latency format */ | |
983f938a | 773 | if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) |
9d9add34 | 774 | print_graph_lat_fmt(s, ent); |
9005f3eb | 775 | } |
f8b755ac | 776 | |
9005f3eb | 777 | /* No overhead */ |
983f938a | 778 | print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START); |
f8b755ac | 779 | |
9005f3eb | 780 | if (type == TRACE_GRAPH_ENT) |
9d9add34 | 781 | trace_seq_puts(s, "==========>"); |
9005f3eb | 782 | else |
9d9add34 | 783 | trace_seq_puts(s, "<=========="); |
9005f3eb | 784 | |
983f938a | 785 | print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END); |
9d9add34 | 786 | trace_seq_putc(s, '\n'); |
f8b755ac | 787 | } |
83a8df61 | 788 | |
9d9add34 | 789 | void |
0706f1c4 | 790 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) |
83a8df61 FW |
791 | { |
792 | unsigned long nsecs_rem = do_div(duration, 1000); | |
166d3c79 | 793 | /* log10(ULONG_MAX) + '\0' */ |
4526d067 | 794 | char usecs_str[21]; |
166d3c79 | 795 | char nsecs_str[5]; |
9d9add34 | 796 | int len; |
166d3c79 FW |
797 | int i; |
798 | ||
4526d067 | 799 | sprintf(usecs_str, "%lu", (unsigned long) duration); |
166d3c79 FW |
800 | |
801 | /* Print msecs */ | |
9d9add34 | 802 | trace_seq_printf(s, "%s", usecs_str); |
166d3c79 | 803 | |
4526d067 | 804 | len = strlen(usecs_str); |
166d3c79 FW |
805 | |
806 | /* Print nsecs (we don't want to exceed 7 numbers) */ | |
807 | if (len < 7) { | |
14cae9bd BP |
808 | size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); |
809 | ||
810 | snprintf(nsecs_str, slen, "%03lu", nsecs_rem); | |
9d9add34 | 811 | trace_seq_printf(s, ".%s", nsecs_str); |
82c355e8 | 812 | len += strlen(nsecs_str) + 1; |
166d3c79 FW |
813 | } |
814 | ||
9d9add34 | 815 | trace_seq_puts(s, " us "); |
166d3c79 FW |
816 | |
817 | /* Print remaining spaces to fit the row's width */ | |
82c355e8 | 818 | for (i = len; i < 8; i++) |
9d9add34 | 819 | trace_seq_putc(s, ' '); |
0706f1c4 SR |
820 | } |
821 | ||
9d9add34 | 822 | static void |
983f938a SRRH |
823 | print_graph_duration(struct trace_array *tr, unsigned long long duration, |
824 | struct trace_seq *s, u32 flags) | |
0706f1c4 | 825 | { |
749230b0 | 826 | if (!(flags & TRACE_GRAPH_PRINT_DURATION) || |
983f938a | 827 | !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) |
9d9add34 | 828 | return; |
ffeb80fc JO |
829 | |
830 | /* No real adata, just filling the column with spaces */ | |
6fc84ea7 SRRH |
831 | switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) { |
832 | case FLAGS_FILL_FULL: | |
9d9add34 SRRH |
833 | trace_seq_puts(s, " | "); |
834 | return; | |
6fc84ea7 | 835 | case FLAGS_FILL_START: |
9d9add34 SRRH |
836 | trace_seq_puts(s, " "); |
837 | return; | |
6fc84ea7 | 838 | case FLAGS_FILL_END: |
9d9add34 SRRH |
839 | trace_seq_puts(s, " |"); |
840 | return; | |
ffeb80fc JO |
841 | } |
842 | ||
843 | /* Signal a overhead of time execution to the output */ | |
8e1e1df2 BP |
844 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) |
845 | trace_seq_printf(s, "%c ", trace_find_mark(duration)); | |
846 | else | |
9d9add34 | 847 | trace_seq_puts(s, " "); |
0706f1c4 | 848 | |
9d9add34 SRRH |
849 | trace_print_graph_duration(duration, s); |
850 | trace_seq_puts(s, "| "); | |
83a8df61 FW |
851 | } |
852 | ||
83a8df61 | 853 | /* Case of a leaf function on its call entry */ |
287b6e68 | 854 | static enum print_line_t |
83a8df61 | 855 | print_graph_entry_leaf(struct trace_iterator *iter, |
b91facc3 | 856 | struct ftrace_graph_ent_entry *entry, |
d7a8d9e9 JO |
857 | struct ftrace_graph_ret_entry *ret_entry, |
858 | struct trace_seq *s, u32 flags) | |
fb52607a | 859 | { |
2fbcdb35 | 860 | struct fgraph_data *data = iter->private; |
983f938a | 861 | struct trace_array *tr = iter->tr; |
83a8df61 | 862 | struct ftrace_graph_ret *graph_ret; |
83a8df61 FW |
863 | struct ftrace_graph_ent *call; |
864 | unsigned long long duration; | |
1fe4293f | 865 | int cpu = iter->cpu; |
1a056155 | 866 | int i; |
fb52607a | 867 | |
83a8df61 FW |
868 | graph_ret = &ret_entry->ret; |
869 | call = &entry->graph_ent; | |
870 | duration = graph_ret->rettime - graph_ret->calltime; | |
871 | ||
2fbcdb35 | 872 | if (data) { |
f1c7f517 | 873 | struct fgraph_cpu_data *cpu_data; |
f1c7f517 SR |
874 | |
875 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | |
2fbcdb35 | 876 | |
794de08a SRRH |
877 | /* If a graph tracer ignored set_graph_notrace */ |
878 | if (call->depth < -1) | |
879 | call->depth += FTRACE_NOTRACE_DEPTH; | |
880 | ||
2fbcdb35 SR |
881 | /* |
882 | * Comments display at + 1 to depth. Since | |
883 | * this is a leaf function, keep the comments | |
884 | * equal to this depth. | |
885 | */ | |
f1c7f517 SR |
886 | cpu_data->depth = call->depth - 1; |
887 | ||
888 | /* No need to keep this function around for this depth */ | |
794de08a SRRH |
889 | if (call->depth < FTRACE_RETFUNC_DEPTH && |
890 | !WARN_ON_ONCE(call->depth < 0)) | |
f1c7f517 | 891 | cpu_data->enter_funcs[call->depth] = 0; |
2fbcdb35 SR |
892 | } |
893 | ||
ffeb80fc | 894 | /* Overhead and duration */ |
983f938a | 895 | print_graph_duration(tr, duration, s, flags); |
1a056155 | 896 | |
83a8df61 | 897 | /* Function */ |
9d9add34 SRRH |
898 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) |
899 | trace_seq_putc(s, ' '); | |
83a8df61 | 900 | |
9d9add34 | 901 | trace_seq_printf(s, "%ps();\n", (void *)call->func); |
83a8df61 | 902 | |
1fe4293f CD |
903 | print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET, |
904 | cpu, iter->ent->pid, flags); | |
905 | ||
9d9add34 | 906 | return trace_handle_return(s); |
83a8df61 FW |
907 | } |
908 | ||
909 | static enum print_line_t | |
2fbcdb35 SR |
910 | print_graph_entry_nested(struct trace_iterator *iter, |
911 | struct ftrace_graph_ent_entry *entry, | |
d7a8d9e9 | 912 | struct trace_seq *s, int cpu, u32 flags) |
83a8df61 | 913 | { |
83a8df61 | 914 | struct ftrace_graph_ent *call = &entry->graph_ent; |
2fbcdb35 | 915 | struct fgraph_data *data = iter->private; |
983f938a | 916 | struct trace_array *tr = iter->tr; |
2fbcdb35 SR |
917 | int i; |
918 | ||
919 | if (data) { | |
f1c7f517 | 920 | struct fgraph_cpu_data *cpu_data; |
2fbcdb35 | 921 | int cpu = iter->cpu; |
2fbcdb35 | 922 | |
794de08a SRRH |
923 | /* If a graph tracer ignored set_graph_notrace */ |
924 | if (call->depth < -1) | |
925 | call->depth += FTRACE_NOTRACE_DEPTH; | |
926 | ||
f1c7f517 SR |
927 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
928 | cpu_data->depth = call->depth; | |
929 | ||
930 | /* Save this function pointer to see if the exit matches */ | |
794de08a SRRH |
931 | if (call->depth < FTRACE_RETFUNC_DEPTH && |
932 | !WARN_ON_ONCE(call->depth < 0)) | |
f1c7f517 | 933 | cpu_data->enter_funcs[call->depth] = call->func; |
2fbcdb35 | 934 | } |
83a8df61 | 935 | |
9005f3eb | 936 | /* No time */ |
983f938a | 937 | print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); |
f8b755ac | 938 | |
83a8df61 | 939 | /* Function */ |
9d9add34 SRRH |
940 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) |
941 | trace_seq_putc(s, ' '); | |
942 | ||
943 | trace_seq_printf(s, "%ps() {\n", (void *)call->func); | |
287b6e68 | 944 | |
9d9add34 | 945 | if (trace_seq_has_overflowed(s)) |
83a8df61 FW |
946 | return TRACE_TYPE_PARTIAL_LINE; |
947 | ||
b91facc3 FW |
948 | /* |
949 | * we already consumed the current entry to check the next one | |
950 | * and see if this is a leaf. | |
951 | */ | |
952 | return TRACE_TYPE_NO_CONSUME; | |
287b6e68 FW |
953 | } |
954 | ||
9d9add34 | 955 | static void |
ac5f6c96 | 956 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, |
d7a8d9e9 | 957 | int type, unsigned long addr, u32 flags) |
83a8df61 | 958 | { |
2fbcdb35 | 959 | struct fgraph_data *data = iter->private; |
83a8df61 | 960 | struct trace_entry *ent = iter->ent; |
983f938a | 961 | struct trace_array *tr = iter->tr; |
ac5f6c96 | 962 | int cpu = iter->cpu; |
83a8df61 | 963 | |
1a056155 | 964 | /* Pid */ |
9d9add34 | 965 | verif_pid(s, ent->pid, cpu, data); |
9005f3eb | 966 | |
9d9add34 | 967 | if (type) |
ac5f6c96 | 968 | /* Interrupt */ |
9d9add34 | 969 | print_graph_irq(iter, addr, type, cpu, ent->pid, flags); |
83a8df61 | 970 | |
983f938a | 971 | if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) |
9d9add34 | 972 | return; |
749230b0 | 973 | |
9005f3eb | 974 | /* Absolute time */ |
9d9add34 SRRH |
975 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
976 | print_graph_abs_time(iter->ts, s); | |
9005f3eb | 977 | |
1a056155 | 978 | /* Cpu */ |
9d9add34 SRRH |
979 | if (flags & TRACE_GRAPH_PRINT_CPU) |
980 | print_graph_cpu(s, cpu); | |
11e84acc FW |
981 | |
982 | /* Proc */ | |
d7a8d9e9 | 983 | if (flags & TRACE_GRAPH_PRINT_PROC) { |
9d9add34 SRRH |
984 | print_graph_proc(s, ent->pid); |
985 | trace_seq_puts(s, " | "); | |
1a056155 | 986 | } |
83a8df61 | 987 | |
49ff5903 | 988 | /* Latency format */ |
983f938a | 989 | if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) |
9d9add34 | 990 | print_graph_lat_fmt(s, ent); |
49ff5903 | 991 | |
9d9add34 | 992 | return; |
ac5f6c96 SR |
993 | } |
994 | ||
2bd16212 JO |
995 | /* |
996 | * Entry check for irq code | |
997 | * | |
998 | * returns 1 if | |
999 | * - we are inside irq code | |
25985edc | 1000 | * - we just entered irq code |
2bd16212 JO |
1001 | * |
1002 | * retunns 0 if | |
1003 | * - funcgraph-interrupts option is set | |
1004 | * - we are not inside irq code | |
1005 | */ | |
1006 | static int | |
1007 | check_irq_entry(struct trace_iterator *iter, u32 flags, | |
1008 | unsigned long addr, int depth) | |
1009 | { | |
1010 | int cpu = iter->cpu; | |
a9d61173 | 1011 | int *depth_irq; |
2bd16212 | 1012 | struct fgraph_data *data = iter->private; |
2bd16212 | 1013 | |
a9d61173 JO |
1014 | /* |
1015 | * If we are either displaying irqs, or we got called as | |
1016 | * a graph event and private data does not exist, | |
1017 | * then we bypass the irq check. | |
1018 | */ | |
1019 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || | |
1020 | (!data)) | |
2bd16212 JO |
1021 | return 0; |
1022 | ||
a9d61173 JO |
1023 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
1024 | ||
2bd16212 JO |
1025 | /* |
1026 | * We are inside the irq code | |
1027 | */ | |
1028 | if (*depth_irq >= 0) | |
1029 | return 1; | |
1030 | ||
1031 | if ((addr < (unsigned long)__irqentry_text_start) || | |
1032 | (addr >= (unsigned long)__irqentry_text_end)) | |
1033 | return 0; | |
1034 | ||
1035 | /* | |
1036 | * We are entering irq code. | |
1037 | */ | |
1038 | *depth_irq = depth; | |
1039 | return 1; | |
1040 | } | |
1041 | ||
1042 | /* | |
1043 | * Return check for irq code | |
1044 | * | |
1045 | * returns 1 if | |
1046 | * - we are inside irq code | |
1047 | * - we just left irq code | |
1048 | * | |
1049 | * returns 0 if | |
1050 | * - funcgraph-interrupts option is set | |
1051 | * - we are not inside irq code | |
1052 | */ | |
1053 | static int | |
1054 | check_irq_return(struct trace_iterator *iter, u32 flags, int depth) | |
1055 | { | |
1056 | int cpu = iter->cpu; | |
a9d61173 | 1057 | int *depth_irq; |
2bd16212 | 1058 | struct fgraph_data *data = iter->private; |
2bd16212 | 1059 | |
a9d61173 JO |
1060 | /* |
1061 | * If we are either displaying irqs, or we got called as | |
1062 | * a graph event and private data does not exist, | |
1063 | * then we bypass the irq check. | |
1064 | */ | |
1065 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || | |
1066 | (!data)) | |
2bd16212 JO |
1067 | return 0; |
1068 | ||
a9d61173 JO |
1069 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
1070 | ||
2bd16212 JO |
1071 | /* |
1072 | * We are not inside the irq code. | |
1073 | */ | |
1074 | if (*depth_irq == -1) | |
1075 | return 0; | |
1076 | ||
1077 | /* | |
1078 | * We are inside the irq code, and this is returning entry. | |
1079 | * Let's not trace it and clear the entry depth, since | |
1080 | * we are out of irq code. | |
1081 | * | |
1082 | * This condition ensures that we 'leave the irq code' once | |
1083 | * we are out of the entry depth. Thus protecting us from | |
1084 | * the RETURN entry loss. | |
1085 | */ | |
1086 | if (*depth_irq >= depth) { | |
1087 | *depth_irq = -1; | |
1088 | return 1; | |
1089 | } | |
1090 | ||
1091 | /* | |
1092 | * We are inside the irq code, and this is not the entry. | |
1093 | */ | |
1094 | return 1; | |
1095 | } | |
1096 | ||
ac5f6c96 SR |
1097 | static enum print_line_t |
1098 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |
d7a8d9e9 | 1099 | struct trace_iterator *iter, u32 flags) |
ac5f6c96 | 1100 | { |
be1eca39 | 1101 | struct fgraph_data *data = iter->private; |
ac5f6c96 SR |
1102 | struct ftrace_graph_ent *call = &field->graph_ent; |
1103 | struct ftrace_graph_ret_entry *leaf_ret; | |
be1eca39 JO |
1104 | static enum print_line_t ret; |
1105 | int cpu = iter->cpu; | |
ac5f6c96 | 1106 | |
2bd16212 JO |
1107 | if (check_irq_entry(iter, flags, call->func, call->depth)) |
1108 | return TRACE_TYPE_HANDLED; | |
1109 | ||
9d9add34 | 1110 | print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags); |
ac5f6c96 | 1111 | |
b91facc3 FW |
1112 | leaf_ret = get_return_for_leaf(iter, field); |
1113 | if (leaf_ret) | |
d7a8d9e9 | 1114 | ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags); |
83a8df61 | 1115 | else |
d7a8d9e9 | 1116 | ret = print_graph_entry_nested(iter, field, s, cpu, flags); |
83a8df61 | 1117 | |
be1eca39 JO |
1118 | if (data) { |
1119 | /* | |
1120 | * If we failed to write our output, then we need to make | |
1121 | * note of it. Because we already consumed our entry. | |
1122 | */ | |
1123 | if (s->full) { | |
1124 | data->failed = 1; | |
1125 | data->cpu = cpu; | |
1126 | } else | |
1127 | data->failed = 0; | |
1128 | } | |
1129 | ||
1130 | return ret; | |
83a8df61 FW |
1131 | } |
1132 | ||
287b6e68 FW |
1133 | static enum print_line_t |
1134 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |
d7a8d9e9 JO |
1135 | struct trace_entry *ent, struct trace_iterator *iter, |
1136 | u32 flags) | |
287b6e68 | 1137 | { |
83a8df61 | 1138 | unsigned long long duration = trace->rettime - trace->calltime; |
2fbcdb35 | 1139 | struct fgraph_data *data = iter->private; |
983f938a | 1140 | struct trace_array *tr = iter->tr; |
2fbcdb35 SR |
1141 | pid_t pid = ent->pid; |
1142 | int cpu = iter->cpu; | |
f1c7f517 | 1143 | int func_match = 1; |
2fbcdb35 SR |
1144 | int i; |
1145 | ||
2bd16212 JO |
1146 | if (check_irq_return(iter, flags, trace->depth)) |
1147 | return TRACE_TYPE_HANDLED; | |
1148 | ||
2fbcdb35 | 1149 | if (data) { |
f1c7f517 SR |
1150 | struct fgraph_cpu_data *cpu_data; |
1151 | int cpu = iter->cpu; | |
1152 | ||
1153 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | |
2fbcdb35 SR |
1154 | |
1155 | /* | |
1156 | * Comments display at + 1 to depth. This is the | |
1157 | * return from a function, we now want the comments | |
1158 | * to display at the same level of the bracket. | |
1159 | */ | |
f1c7f517 SR |
1160 | cpu_data->depth = trace->depth - 1; |
1161 | ||
794de08a SRRH |
1162 | if (trace->depth < FTRACE_RETFUNC_DEPTH && |
1163 | !WARN_ON_ONCE(trace->depth < 0)) { | |
f1c7f517 SR |
1164 | if (cpu_data->enter_funcs[trace->depth] != trace->func) |
1165 | func_match = 0; | |
1166 | cpu_data->enter_funcs[trace->depth] = 0; | |
1167 | } | |
2fbcdb35 | 1168 | } |
287b6e68 | 1169 | |
9d9add34 | 1170 | print_graph_prologue(iter, s, 0, 0, flags); |
437f24fb | 1171 | |
ffeb80fc | 1172 | /* Overhead and duration */ |
983f938a | 1173 | print_graph_duration(tr, duration, s, flags); |
1a056155 | 1174 | |
83a8df61 | 1175 | /* Closing brace */ |
9d9add34 SRRH |
1176 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) |
1177 | trace_seq_putc(s, ' '); | |
287b6e68 | 1178 | |
f1c7f517 SR |
1179 | /* |
1180 | * If the return function does not have a matching entry, | |
1181 | * then the entry was lost. Instead of just printing | |
1182 | * the '}' and letting the user guess what function this | |
607e3a29 RE |
1183 | * belongs to, write out the function name. Always do |
1184 | * that if the funcgraph-tail option is enabled. | |
f1c7f517 | 1185 | */ |
9d9add34 SRRH |
1186 | if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) |
1187 | trace_seq_puts(s, "}\n"); | |
1188 | else | |
1189 | trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); | |
fb52607a | 1190 | |
83a8df61 | 1191 | /* Overrun */ |
9d9add34 SRRH |
1192 | if (flags & TRACE_GRAPH_PRINT_OVERRUN) |
1193 | trace_seq_printf(s, " (Overruns: %lu)\n", | |
1194 | trace->overrun); | |
f8b755ac | 1195 | |
9d9add34 SRRH |
1196 | print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, |
1197 | cpu, pid, flags); | |
f8b755ac | 1198 | |
9d9add34 | 1199 | return trace_handle_return(s); |
287b6e68 FW |
1200 | } |
1201 | ||
1fd8f2a3 | 1202 | static enum print_line_t |
d7a8d9e9 JO |
1203 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, |
1204 | struct trace_iterator *iter, u32 flags) | |
1fd8f2a3 | 1205 | { |
983f938a SRRH |
1206 | struct trace_array *tr = iter->tr; |
1207 | unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); | |
2fbcdb35 | 1208 | struct fgraph_data *data = iter->private; |
5087f8d2 | 1209 | struct trace_event *event; |
2fbcdb35 | 1210 | int depth = 0; |
1fd8f2a3 | 1211 | int ret; |
2fbcdb35 SR |
1212 | int i; |
1213 | ||
1214 | if (data) | |
be1eca39 | 1215 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; |
9005f3eb | 1216 | |
9d9add34 | 1217 | print_graph_prologue(iter, s, 0, 0, flags); |
d1f9cbd7 | 1218 | |
9005f3eb | 1219 | /* No time */ |
983f938a | 1220 | print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); |
1fd8f2a3 | 1221 | |
1fd8f2a3 | 1222 | /* Indentation */ |
2fbcdb35 | 1223 | if (depth > 0) |
9d9add34 SRRH |
1224 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) |
1225 | trace_seq_putc(s, ' '); | |
1fd8f2a3 FW |
1226 | |
1227 | /* The comment */ | |
9d9add34 | 1228 | trace_seq_puts(s, "/* "); |
769b0441 | 1229 | |
5087f8d2 | 1230 | switch (iter->ent->type) { |
613dccdf NK |
1231 | case TRACE_BPUTS: |
1232 | ret = trace_print_bputs_msg_only(iter); | |
1233 | if (ret != TRACE_TYPE_HANDLED) | |
1234 | return ret; | |
1235 | break; | |
5087f8d2 SR |
1236 | case TRACE_BPRINT: |
1237 | ret = trace_print_bprintk_msg_only(iter); | |
1238 | if (ret != TRACE_TYPE_HANDLED) | |
1239 | return ret; | |
1240 | break; | |
1241 | case TRACE_PRINT: | |
1242 | ret = trace_print_printk_msg_only(iter); | |
1243 | if (ret != TRACE_TYPE_HANDLED) | |
1244 | return ret; | |
1245 | break; | |
1246 | default: | |
1247 | event = ftrace_find_event(ent->type); | |
1248 | if (!event) | |
1249 | return TRACE_TYPE_UNHANDLED; | |
1250 | ||
a9a57763 | 1251 | ret = event->funcs->trace(iter, sym_flags, event); |
5087f8d2 SR |
1252 | if (ret != TRACE_TYPE_HANDLED) |
1253 | return ret; | |
1254 | } | |
1fd8f2a3 | 1255 | |
5ac48378 SRRH |
1256 | if (trace_seq_has_overflowed(s)) |
1257 | goto out; | |
1258 | ||
412d0bb5 | 1259 | /* Strip ending newline */ |
3a161d99 SRRH |
1260 | if (s->buffer[s->seq.len - 1] == '\n') { |
1261 | s->buffer[s->seq.len - 1] = '\0'; | |
1262 | s->seq.len--; | |
412d0bb5 FW |
1263 | } |
1264 | ||
9d9add34 | 1265 | trace_seq_puts(s, " */\n"); |
5ac48378 | 1266 | out: |
9d9add34 | 1267 | return trace_handle_return(s); |
1fd8f2a3 FW |
1268 | } |
1269 | ||
1270 | ||
287b6e68 | 1271 | enum print_line_t |
321e68b0 | 1272 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
287b6e68 | 1273 | { |
be1eca39 JO |
1274 | struct ftrace_graph_ent_entry *field; |
1275 | struct fgraph_data *data = iter->private; | |
287b6e68 | 1276 | struct trace_entry *entry = iter->ent; |
5087f8d2 | 1277 | struct trace_seq *s = &iter->seq; |
be1eca39 JO |
1278 | int cpu = iter->cpu; |
1279 | int ret; | |
1280 | ||
1281 | if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { | |
1282 | per_cpu_ptr(data->cpu_data, cpu)->ignore = 0; | |
1283 | return TRACE_TYPE_HANDLED; | |
1284 | } | |
1285 | ||
1286 | /* | |
1287 | * If the last output failed, there's a possibility we need | |
1288 | * to print out the missing entry which would never go out. | |
1289 | */ | |
1290 | if (data && data->failed) { | |
1291 | field = &data->ent; | |
1292 | iter->cpu = data->cpu; | |
d7a8d9e9 | 1293 | ret = print_graph_entry(field, s, iter, flags); |
be1eca39 JO |
1294 | if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { |
1295 | per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; | |
1296 | ret = TRACE_TYPE_NO_CONSUME; | |
1297 | } | |
1298 | iter->cpu = cpu; | |
1299 | return ret; | |
1300 | } | |
fb52607a | 1301 | |
287b6e68 FW |
1302 | switch (entry->type) { |
1303 | case TRACE_GRAPH_ENT: { | |
38ceb592 LJ |
1304 | /* |
1305 | * print_graph_entry() may consume the current event, | |
1306 | * thus @field may become invalid, so we need to save it. | |
1307 | * sizeof(struct ftrace_graph_ent_entry) is very small, | |
1308 | * it can be safely saved at the stack. | |
1309 | */ | |
be1eca39 | 1310 | struct ftrace_graph_ent_entry saved; |
287b6e68 | 1311 | trace_assign_type(field, entry); |
38ceb592 | 1312 | saved = *field; |
d7a8d9e9 | 1313 | return print_graph_entry(&saved, s, iter, flags); |
287b6e68 FW |
1314 | } |
1315 | case TRACE_GRAPH_RET: { | |
1316 | struct ftrace_graph_ret_entry *field; | |
1317 | trace_assign_type(field, entry); | |
d7a8d9e9 | 1318 | return print_graph_return(&field->ret, s, entry, iter, flags); |
287b6e68 | 1319 | } |
62b915f1 JO |
1320 | case TRACE_STACK: |
1321 | case TRACE_FN: | |
1322 | /* dont trace stack and functions as comments */ | |
1323 | return TRACE_TYPE_UNHANDLED; | |
1324 | ||
287b6e68 | 1325 | default: |
d7a8d9e9 | 1326 | return print_graph_comment(s, entry, iter, flags); |
fb52607a | 1327 | } |
5087f8d2 SR |
1328 | |
1329 | return TRACE_TYPE_HANDLED; | |
fb52607a FW |
1330 | } |
1331 | ||
d7a8d9e9 JO |
1332 | static enum print_line_t |
1333 | print_graph_function(struct trace_iterator *iter) | |
1334 | { | |
321e68b0 | 1335 | return print_graph_function_flags(iter, tracer_flags.val); |
d7a8d9e9 JO |
1336 | } |
1337 | ||
9106b693 | 1338 | static enum print_line_t |
a9a57763 SR |
1339 | print_graph_function_event(struct trace_iterator *iter, int flags, |
1340 | struct trace_event *event) | |
9106b693 JO |
1341 | { |
1342 | return print_graph_function(iter); | |
1343 | } | |
1344 | ||
d7a8d9e9 | 1345 | static void print_lat_header(struct seq_file *s, u32 flags) |
49ff5903 SR |
1346 | { |
1347 | static const char spaces[] = " " /* 16 spaces */ | |
1348 | " " /* 4 spaces */ | |
1349 | " "; /* 17 spaces */ | |
1350 | int size = 0; | |
1351 | ||
d7a8d9e9 | 1352 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
49ff5903 | 1353 | size += 16; |
d7a8d9e9 | 1354 | if (flags & TRACE_GRAPH_PRINT_CPU) |
49ff5903 | 1355 | size += 4; |
d7a8d9e9 | 1356 | if (flags & TRACE_GRAPH_PRINT_PROC) |
49ff5903 SR |
1357 | size += 17; |
1358 | ||
1359 | seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); | |
1360 | seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); | |
1361 | seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); | |
1362 | seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); | |
199abfab | 1363 | seq_printf(s, "#%.*s||| / \n", size, spaces); |
49ff5903 SR |
1364 | } |
1365 | ||
983f938a SRRH |
1366 | static void __print_graph_headers_flags(struct trace_array *tr, |
1367 | struct seq_file *s, u32 flags) | |
decbec38 | 1368 | { |
983f938a | 1369 | int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT; |
49ff5903 SR |
1370 | |
1371 | if (lat) | |
d7a8d9e9 | 1372 | print_lat_header(s, flags); |
49ff5903 | 1373 | |
decbec38 | 1374 | /* 1st line */ |
1177e436 | 1375 | seq_putc(s, '#'); |
d7a8d9e9 | 1376 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
fa6f0cc7 | 1377 | seq_puts(s, " TIME "); |
d7a8d9e9 | 1378 | if (flags & TRACE_GRAPH_PRINT_CPU) |
fa6f0cc7 | 1379 | seq_puts(s, " CPU"); |
d7a8d9e9 | 1380 | if (flags & TRACE_GRAPH_PRINT_PROC) |
fa6f0cc7 | 1381 | seq_puts(s, " TASK/PID "); |
49ff5903 | 1382 | if (lat) |
fa6f0cc7 | 1383 | seq_puts(s, "||||"); |
d7a8d9e9 | 1384 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
fa6f0cc7 RV |
1385 | seq_puts(s, " DURATION "); |
1386 | seq_puts(s, " FUNCTION CALLS\n"); | |
decbec38 FW |
1387 | |
1388 | /* 2nd line */ | |
1177e436 | 1389 | seq_putc(s, '#'); |
d7a8d9e9 | 1390 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
fa6f0cc7 | 1391 | seq_puts(s, " | "); |
d7a8d9e9 | 1392 | if (flags & TRACE_GRAPH_PRINT_CPU) |
fa6f0cc7 | 1393 | seq_puts(s, " | "); |
d7a8d9e9 | 1394 | if (flags & TRACE_GRAPH_PRINT_PROC) |
fa6f0cc7 | 1395 | seq_puts(s, " | | "); |
49ff5903 | 1396 | if (lat) |
fa6f0cc7 | 1397 | seq_puts(s, "||||"); |
d7a8d9e9 | 1398 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
fa6f0cc7 RV |
1399 | seq_puts(s, " | | "); |
1400 | seq_puts(s, " | | | |\n"); | |
decbec38 | 1401 | } |
9005f3eb | 1402 | |
ba1afef6 | 1403 | static void print_graph_headers(struct seq_file *s) |
d7a8d9e9 JO |
1404 | { |
1405 | print_graph_headers_flags(s, tracer_flags.val); | |
1406 | } | |
1407 | ||
0a772620 JO |
1408 | void print_graph_headers_flags(struct seq_file *s, u32 flags) |
1409 | { | |
1410 | struct trace_iterator *iter = s->private; | |
983f938a | 1411 | struct trace_array *tr = iter->tr; |
0a772620 | 1412 | |
983f938a | 1413 | if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) |
749230b0 JO |
1414 | return; |
1415 | ||
983f938a | 1416 | if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) { |
0a772620 JO |
1417 | /* print nothing if the buffers are empty */ |
1418 | if (trace_empty(iter)) | |
1419 | return; | |
1420 | ||
1421 | print_trace_header(s, iter); | |
321e68b0 | 1422 | } |
0a772620 | 1423 | |
983f938a | 1424 | __print_graph_headers_flags(tr, s, flags); |
0a772620 JO |
1425 | } |
1426 | ||
62b915f1 | 1427 | void graph_trace_open(struct trace_iterator *iter) |
9005f3eb | 1428 | { |
2fbcdb35 | 1429 | /* pid and depth on the last trace processed */ |
be1eca39 | 1430 | struct fgraph_data *data; |
ef99b88b | 1431 | gfp_t gfpflags; |
9005f3eb FW |
1432 | int cpu; |
1433 | ||
be1eca39 JO |
1434 | iter->private = NULL; |
1435 | ||
ef99b88b RV |
1436 | /* We can be called in atomic context via ftrace_dump() */ |
1437 | gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; | |
1438 | ||
1439 | data = kzalloc(sizeof(*data), gfpflags); | |
2fbcdb35 | 1440 | if (!data) |
be1eca39 JO |
1441 | goto out_err; |
1442 | ||
ef99b88b | 1443 | data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags); |
be1eca39 JO |
1444 | if (!data->cpu_data) |
1445 | goto out_err_free; | |
1446 | ||
1447 | for_each_possible_cpu(cpu) { | |
1448 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); | |
1449 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | |
1450 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); | |
2bd16212 JO |
1451 | int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
1452 | ||
be1eca39 JO |
1453 | *pid = -1; |
1454 | *depth = 0; | |
1455 | *ignore = 0; | |
2bd16212 | 1456 | *depth_irq = -1; |
be1eca39 | 1457 | } |
9005f3eb | 1458 | |
2fbcdb35 | 1459 | iter->private = data; |
be1eca39 JO |
1460 | |
1461 | return; | |
1462 | ||
1463 | out_err_free: | |
1464 | kfree(data); | |
1465 | out_err: | |
a395d6a7 | 1466 | pr_warn("function graph tracer: not enough memory\n"); |
9005f3eb FW |
1467 | } |
1468 | ||
62b915f1 | 1469 | void graph_trace_close(struct trace_iterator *iter) |
9005f3eb | 1470 | { |
be1eca39 JO |
1471 | struct fgraph_data *data = iter->private; |
1472 | ||
1473 | if (data) { | |
1474 | free_percpu(data->cpu_data); | |
1475 | kfree(data); | |
1476 | } | |
9005f3eb FW |
1477 | } |
1478 | ||
8c1a49ae SRRH |
1479 | static int |
1480 | func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | |
b304d044 SR |
1481 | { |
1482 | if (bit == TRACE_GRAPH_PRINT_IRQS) | |
1483 | ftrace_graph_skip_irqs = !set; | |
1484 | ||
55577204 SRRH |
1485 | if (bit == TRACE_GRAPH_SLEEP_TIME) |
1486 | ftrace_graph_sleep_time_control(set); | |
1487 | ||
1488 | if (bit == TRACE_GRAPH_GRAPH_TIME) | |
1489 | ftrace_graph_graph_time_control(set); | |
1490 | ||
b304d044 SR |
1491 | return 0; |
1492 | } | |
1493 | ||
a9a57763 SR |
1494 | static struct trace_event_functions graph_functions = { |
1495 | .trace = print_graph_function_event, | |
1496 | }; | |
1497 | ||
9106b693 JO |
1498 | static struct trace_event graph_trace_entry_event = { |
1499 | .type = TRACE_GRAPH_ENT, | |
a9a57763 | 1500 | .funcs = &graph_functions, |
9106b693 JO |
1501 | }; |
1502 | ||
1503 | static struct trace_event graph_trace_ret_event = { | |
1504 | .type = TRACE_GRAPH_RET, | |
a9a57763 | 1505 | .funcs = &graph_functions |
9106b693 JO |
1506 | }; |
1507 | ||
8f768993 | 1508 | static struct tracer graph_trace __tracer_data = { |
ef18012b | 1509 | .name = "function_graph", |
6508fa76 | 1510 | .update_thresh = graph_trace_update_thresh, |
9005f3eb | 1511 | .open = graph_trace_open, |
be1eca39 | 1512 | .pipe_open = graph_trace_open, |
9005f3eb | 1513 | .close = graph_trace_close, |
be1eca39 | 1514 | .pipe_close = graph_trace_close, |
ef18012b SR |
1515 | .init = graph_trace_init, |
1516 | .reset = graph_trace_reset, | |
decbec38 FW |
1517 | .print_line = print_graph_function, |
1518 | .print_header = print_graph_headers, | |
fb52607a | 1519 | .flags = &tracer_flags, |
b304d044 | 1520 | .set_flag = func_graph_set_flag, |
7447dce9 FW |
1521 | #ifdef CONFIG_FTRACE_SELFTEST |
1522 | .selftest = trace_selftest_startup_function_graph, | |
1523 | #endif | |
fb52607a FW |
1524 | }; |
1525 | ||
8741db53 SR |
1526 | |
1527 | static ssize_t | |
1528 | graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt, | |
1529 | loff_t *ppos) | |
1530 | { | |
1531 | unsigned long val; | |
1532 | int ret; | |
1533 | ||
1534 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | |
1535 | if (ret) | |
1536 | return ret; | |
1537 | ||
1a414428 | 1538 | fgraph_max_depth = val; |
8741db53 SR |
1539 | |
1540 | *ppos += cnt; | |
1541 | ||
1542 | return cnt; | |
1543 | } | |
1544 | ||
1545 | static ssize_t | |
1546 | graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt, | |
1547 | loff_t *ppos) | |
1548 | { | |
1549 | char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/ | |
1550 | int n; | |
1551 | ||
1a414428 | 1552 | n = sprintf(buf, "%d\n", fgraph_max_depth); |
8741db53 SR |
1553 | |
1554 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, n); | |
1555 | } | |
1556 | ||
1557 | static const struct file_operations graph_depth_fops = { | |
1558 | .open = tracing_open_generic, | |
1559 | .write = graph_depth_write, | |
1560 | .read = graph_depth_read, | |
1561 | .llseek = generic_file_llseek, | |
1562 | }; | |
1563 | ||
8434dc93 | 1564 | static __init int init_graph_tracefs(void) |
8741db53 SR |
1565 | { |
1566 | struct dentry *d_tracer; | |
1567 | ||
1568 | d_tracer = tracing_init_dentry(); | |
14a5ae40 | 1569 | if (IS_ERR(d_tracer)) |
8741db53 SR |
1570 | return 0; |
1571 | ||
1572 | trace_create_file("max_graph_depth", 0644, d_tracer, | |
1573 | NULL, &graph_depth_fops); | |
1574 | ||
1575 | return 0; | |
1576 | } | |
8434dc93 | 1577 | fs_initcall(init_graph_tracefs); |
8741db53 | 1578 | |
fb52607a FW |
1579 | static __init int init_graph_trace(void) |
1580 | { | |
9b130ad5 | 1581 | max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1); |
0c9e6f63 | 1582 | |
9023c930 | 1583 | if (!register_trace_event(&graph_trace_entry_event)) { |
a395d6a7 | 1584 | pr_warn("Warning: could not register graph trace events\n"); |
9106b693 JO |
1585 | return 1; |
1586 | } | |
1587 | ||
9023c930 | 1588 | if (!register_trace_event(&graph_trace_ret_event)) { |
a395d6a7 | 1589 | pr_warn("Warning: could not register graph trace events\n"); |
9106b693 JO |
1590 | return 1; |
1591 | } | |
1592 | ||
fb52607a FW |
1593 | return register_tracer(&graph_trace); |
1594 | } | |
1595 | ||
6f415672 | 1596 | core_initcall(init_graph_trace); |