]>
Commit | Line | Data |
---|---|---|
fb52607a FW |
1 | /* |
2 | * | |
3 | * Function graph tracer. | |
9005f3eb | 4 | * Copyright (c) 2008-2009 Frederic Weisbecker <[email protected]> |
fb52607a FW |
5 | * Mostly borrowed from function tracer which |
6 | * is Copyright (c) Steven Rostedt <[email protected]> | |
7 | * | |
8 | */ | |
9 | #include <linux/debugfs.h> | |
10 | #include <linux/uaccess.h> | |
11 | #include <linux/ftrace.h> | |
5a0e3ad6 | 12 | #include <linux/slab.h> |
fb52607a FW |
13 | #include <linux/fs.h> |
14 | ||
15 | #include "trace.h" | |
f0868d1e | 16 | #include "trace_output.h" |
fb52607a | 17 | |
b304d044 SR |
18 | /* When set, irq functions will be ignored */ |
19 | static int ftrace_graph_skip_irqs; | |
20 | ||
be1eca39 | 21 | struct fgraph_cpu_data { |
2fbcdb35 SR |
22 | pid_t last_pid; |
23 | int depth; | |
2bd16212 | 24 | int depth_irq; |
be1eca39 | 25 | int ignore; |
f1c7f517 | 26 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; |
be1eca39 JO |
27 | }; |
28 | ||
29 | struct fgraph_data { | |
6016ee13 | 30 | struct fgraph_cpu_data __percpu *cpu_data; |
be1eca39 JO |
31 | |
32 | /* Place to preserve last processed entry. */ | |
33 | struct ftrace_graph_ent_entry ent; | |
34 | struct ftrace_graph_ret_entry ret; | |
35 | int failed; | |
36 | int cpu; | |
2fbcdb35 SR |
37 | }; |
38 | ||
287b6e68 | 39 | #define TRACE_GRAPH_INDENT 2 |
fb52607a | 40 | |
1a056155 | 41 | /* Flag options */ |
fb52607a | 42 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 |
1a056155 FW |
43 | #define TRACE_GRAPH_PRINT_CPU 0x2 |
44 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | |
11e84acc | 45 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
9005f3eb | 46 | #define TRACE_GRAPH_PRINT_DURATION 0x10 |
9106b693 | 47 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 |
2bd16212 | 48 | #define TRACE_GRAPH_PRINT_IRQS 0x40 |
1a056155 | 49 | |
8741db53 SR |
50 | static unsigned int max_depth; |
51 | ||
fb52607a | 52 | static struct tracer_opt trace_opts[] = { |
9005f3eb | 53 | /* Display overruns? (for self-debug purpose) */ |
1a056155 FW |
54 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, |
55 | /* Display CPU ? */ | |
56 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | |
57 | /* Display Overhead ? */ | |
58 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | |
11e84acc FW |
59 | /* Display proc name/pid */ |
60 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | |
9005f3eb FW |
61 | /* Display duration of execution */ |
62 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | |
63 | /* Display absolute time of an entry */ | |
64 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | |
2bd16212 JO |
65 | /* Display interrupts */ |
66 | { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, | |
fb52607a FW |
67 | { } /* Empty entry */ |
68 | }; | |
69 | ||
70 | static struct tracer_flags tracer_flags = { | |
11e84acc | 71 | /* Don't display overruns and proc by default */ |
9005f3eb | 72 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
2bd16212 | 73 | TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS, |
fb52607a FW |
74 | .opts = trace_opts |
75 | }; | |
76 | ||
1a0799a8 | 77 | static struct trace_array *graph_array; |
9005f3eb | 78 | |
ffeb80fc JO |
79 | /* |
80 | * DURATION column is being also used to display IRQ signs, | |
81 | * following values are used by print_graph_irq and others | |
82 | * to fill in space into DURATION column. | |
83 | */ | |
84 | enum { | |
85 | DURATION_FILL_FULL = -1, | |
86 | DURATION_FILL_START = -2, | |
87 | DURATION_FILL_END = -3, | |
88 | }; | |
89 | ||
90 | static enum print_line_t | |
91 | print_graph_duration(unsigned long long duration, struct trace_seq *s, | |
92 | u32 flags); | |
fb52607a | 93 | |
712406a6 SR |
94 | /* Add a function return address to the trace stack on thread info.*/ |
95 | int | |
71e308a2 SR |
96 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, |
97 | unsigned long frame_pointer) | |
712406a6 | 98 | { |
5d1a03dc | 99 | unsigned long long calltime; |
712406a6 SR |
100 | int index; |
101 | ||
102 | if (!current->ret_stack) | |
103 | return -EBUSY; | |
104 | ||
82310a32 SR |
105 | /* |
106 | * We must make sure the ret_stack is tested before we read | |
107 | * anything else. | |
108 | */ | |
109 | smp_rmb(); | |
110 | ||
712406a6 SR |
111 | /* The return trace stack is full */ |
112 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | |
113 | atomic_inc(¤t->trace_overrun); | |
114 | return -EBUSY; | |
115 | } | |
116 | ||
5d1a03dc SR |
117 | calltime = trace_clock_local(); |
118 | ||
712406a6 SR |
119 | index = ++current->curr_ret_stack; |
120 | barrier(); | |
121 | current->ret_stack[index].ret = ret; | |
122 | current->ret_stack[index].func = func; | |
5d1a03dc | 123 | current->ret_stack[index].calltime = calltime; |
a2a16d6a | 124 | current->ret_stack[index].subtime = 0; |
71e308a2 | 125 | current->ret_stack[index].fp = frame_pointer; |
712406a6 SR |
126 | *depth = index; |
127 | ||
128 | return 0; | |
129 | } | |
130 | ||
131 | /* Retrieve a function return address to the trace stack on thread info.*/ | |
a2a16d6a | 132 | static void |
71e308a2 SR |
133 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, |
134 | unsigned long frame_pointer) | |
712406a6 SR |
135 | { |
136 | int index; | |
137 | ||
138 | index = current->curr_ret_stack; | |
139 | ||
140 | if (unlikely(index < 0)) { | |
141 | ftrace_graph_stop(); | |
142 | WARN_ON(1); | |
143 | /* Might as well panic, otherwise we have no where to go */ | |
144 | *ret = (unsigned long)panic; | |
145 | return; | |
146 | } | |
147 | ||
781d0624 | 148 | #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY) |
71e308a2 SR |
149 | /* |
150 | * The arch may choose to record the frame pointer used | |
151 | * and check it here to make sure that it is what we expect it | |
152 | * to be. If gcc does not set the place holder of the return | |
153 | * address in the frame pointer, and does a copy instead, then | |
154 | * the function graph trace will fail. This test detects this | |
155 | * case. | |
156 | * | |
157 | * Currently, x86_32 with optimize for size (-Os) makes the latest | |
158 | * gcc do the above. | |
781d0624 SR |
159 | * |
160 | * Note, -mfentry does not use frame pointers, and this test | |
161 | * is not needed if CC_USING_FENTRY is set. | |
71e308a2 SR |
162 | */ |
163 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { | |
164 | ftrace_graph_stop(); | |
165 | WARN(1, "Bad frame pointer: expected %lx, received %lx\n" | |
b375a11a | 166 | " from func %ps return to %lx\n", |
71e308a2 SR |
167 | current->ret_stack[index].fp, |
168 | frame_pointer, | |
169 | (void *)current->ret_stack[index].func, | |
170 | current->ret_stack[index].ret); | |
171 | *ret = (unsigned long)panic; | |
172 | return; | |
173 | } | |
174 | #endif | |
175 | ||
712406a6 SR |
176 | *ret = current->ret_stack[index].ret; |
177 | trace->func = current->ret_stack[index].func; | |
178 | trace->calltime = current->ret_stack[index].calltime; | |
179 | trace->overrun = atomic_read(¤t->trace_overrun); | |
180 | trace->depth = index; | |
712406a6 SR |
181 | } |
182 | ||
183 | /* | |
184 | * Send the trace to the ring-buffer. | |
185 | * @return the original return address. | |
186 | */ | |
71e308a2 | 187 | unsigned long ftrace_return_to_handler(unsigned long frame_pointer) |
712406a6 SR |
188 | { |
189 | struct ftrace_graph_ret trace; | |
190 | unsigned long ret; | |
191 | ||
71e308a2 | 192 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); |
0012693a | 193 | trace.rettime = trace_clock_local(); |
a2a16d6a SR |
194 | barrier(); |
195 | current->curr_ret_stack--; | |
712406a6 | 196 | |
03274a3f SRRH |
197 | /* |
198 | * The trace should run after decrementing the ret counter | |
199 | * in case an interrupt were to come in. We don't want to | |
200 | * lose the interrupt if max_depth is set. | |
201 | */ | |
202 | ftrace_graph_return(&trace); | |
203 | ||
712406a6 SR |
204 | if (unlikely(!ret)) { |
205 | ftrace_graph_stop(); | |
206 | WARN_ON(1); | |
207 | /* Might as well panic. What else to do? */ | |
208 | ret = (unsigned long)panic; | |
209 | } | |
210 | ||
211 | return ret; | |
212 | } | |
213 | ||
62b915f1 | 214 | int __trace_graph_entry(struct trace_array *tr, |
1a0799a8 FW |
215 | struct ftrace_graph_ent *trace, |
216 | unsigned long flags, | |
217 | int pc) | |
218 | { | |
219 | struct ftrace_event_call *call = &event_funcgraph_entry; | |
220 | struct ring_buffer_event *event; | |
12883efb | 221 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
1a0799a8 FW |
222 | struct ftrace_graph_ent_entry *entry; |
223 | ||
dd17c8f7 | 224 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
1a0799a8 FW |
225 | return 0; |
226 | ||
e77405ad | 227 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
1a0799a8 FW |
228 | sizeof(*entry), flags, pc); |
229 | if (!event) | |
230 | return 0; | |
231 | entry = ring_buffer_event_data(event); | |
232 | entry->graph_ent = *trace; | |
e77405ad | 233 | if (!filter_current_check_discard(buffer, call, entry, event)) |
7ffbd48d | 234 | __buffer_unlock_commit(buffer, event); |
1a0799a8 FW |
235 | |
236 | return 1; | |
237 | } | |
238 | ||
b304d044 SR |
239 | static inline int ftrace_graph_ignore_irqs(void) |
240 | { | |
e4a3f541 | 241 | if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT)) |
b304d044 SR |
242 | return 0; |
243 | ||
244 | return in_irq(); | |
245 | } | |
246 | ||
1a0799a8 FW |
247 | int trace_graph_entry(struct ftrace_graph_ent *trace) |
248 | { | |
249 | struct trace_array *tr = graph_array; | |
250 | struct trace_array_cpu *data; | |
251 | unsigned long flags; | |
252 | long disabled; | |
253 | int ret; | |
254 | int cpu; | |
255 | int pc; | |
256 | ||
1a0799a8 FW |
257 | if (!ftrace_trace_task(current)) |
258 | return 0; | |
259 | ||
ea2c68a0 | 260 | /* trace it when it is-nested-in or is a function enabled. */ |
8741db53 SR |
261 | if ((!(trace->depth || ftrace_graph_addr(trace->func)) || |
262 | ftrace_graph_ignore_irqs()) || | |
263 | (max_depth && trace->depth >= max_depth)) | |
1a0799a8 FW |
264 | return 0; |
265 | ||
266 | local_irq_save(flags); | |
267 | cpu = raw_smp_processor_id(); | |
12883efb | 268 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
1a0799a8 FW |
269 | disabled = atomic_inc_return(&data->disabled); |
270 | if (likely(disabled == 1)) { | |
271 | pc = preempt_count(); | |
272 | ret = __trace_graph_entry(tr, trace, flags, pc); | |
273 | } else { | |
274 | ret = 0; | |
275 | } | |
1a0799a8 FW |
276 | |
277 | atomic_dec(&data->disabled); | |
278 | local_irq_restore(flags); | |
279 | ||
280 | return ret; | |
281 | } | |
282 | ||
0e950173 TB |
283 | int trace_graph_thresh_entry(struct ftrace_graph_ent *trace) |
284 | { | |
285 | if (tracing_thresh) | |
286 | return 1; | |
287 | else | |
288 | return trace_graph_entry(trace); | |
289 | } | |
290 | ||
0a772620 JO |
291 | static void |
292 | __trace_graph_function(struct trace_array *tr, | |
293 | unsigned long ip, unsigned long flags, int pc) | |
294 | { | |
295 | u64 time = trace_clock_local(); | |
296 | struct ftrace_graph_ent ent = { | |
297 | .func = ip, | |
298 | .depth = 0, | |
299 | }; | |
300 | struct ftrace_graph_ret ret = { | |
301 | .func = ip, | |
302 | .depth = 0, | |
303 | .calltime = time, | |
304 | .rettime = time, | |
305 | }; | |
306 | ||
307 | __trace_graph_entry(tr, &ent, flags, pc); | |
308 | __trace_graph_return(tr, &ret, flags, pc); | |
309 | } | |
310 | ||
311 | void | |
312 | trace_graph_function(struct trace_array *tr, | |
313 | unsigned long ip, unsigned long parent_ip, | |
314 | unsigned long flags, int pc) | |
315 | { | |
0a772620 JO |
316 | __trace_graph_function(tr, ip, flags, pc); |
317 | } | |
318 | ||
62b915f1 | 319 | void __trace_graph_return(struct trace_array *tr, |
1a0799a8 FW |
320 | struct ftrace_graph_ret *trace, |
321 | unsigned long flags, | |
322 | int pc) | |
323 | { | |
324 | struct ftrace_event_call *call = &event_funcgraph_exit; | |
325 | struct ring_buffer_event *event; | |
12883efb | 326 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
1a0799a8 FW |
327 | struct ftrace_graph_ret_entry *entry; |
328 | ||
dd17c8f7 | 329 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
1a0799a8 FW |
330 | return; |
331 | ||
e77405ad | 332 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |
1a0799a8 FW |
333 | sizeof(*entry), flags, pc); |
334 | if (!event) | |
335 | return; | |
336 | entry = ring_buffer_event_data(event); | |
337 | entry->ret = *trace; | |
e77405ad | 338 | if (!filter_current_check_discard(buffer, call, entry, event)) |
7ffbd48d | 339 | __buffer_unlock_commit(buffer, event); |
1a0799a8 FW |
340 | } |
341 | ||
342 | void trace_graph_return(struct ftrace_graph_ret *trace) | |
343 | { | |
344 | struct trace_array *tr = graph_array; | |
345 | struct trace_array_cpu *data; | |
346 | unsigned long flags; | |
347 | long disabled; | |
348 | int cpu; | |
349 | int pc; | |
350 | ||
351 | local_irq_save(flags); | |
352 | cpu = raw_smp_processor_id(); | |
12883efb | 353 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
1a0799a8 FW |
354 | disabled = atomic_inc_return(&data->disabled); |
355 | if (likely(disabled == 1)) { | |
356 | pc = preempt_count(); | |
357 | __trace_graph_return(tr, trace, flags, pc); | |
358 | } | |
1a0799a8 FW |
359 | atomic_dec(&data->disabled); |
360 | local_irq_restore(flags); | |
361 | } | |
362 | ||
24a53652 FW |
363 | void set_graph_array(struct trace_array *tr) |
364 | { | |
365 | graph_array = tr; | |
366 | ||
367 | /* Make graph_array visible before we start tracing */ | |
368 | ||
369 | smp_mb(); | |
370 | } | |
371 | ||
0e950173 TB |
372 | void trace_graph_thresh_return(struct ftrace_graph_ret *trace) |
373 | { | |
374 | if (tracing_thresh && | |
375 | (trace->rettime - trace->calltime < tracing_thresh)) | |
376 | return; | |
377 | else | |
378 | trace_graph_return(trace); | |
379 | } | |
380 | ||
fb52607a FW |
381 | static int graph_trace_init(struct trace_array *tr) |
382 | { | |
1a0799a8 FW |
383 | int ret; |
384 | ||
24a53652 | 385 | set_graph_array(tr); |
0e950173 TB |
386 | if (tracing_thresh) |
387 | ret = register_ftrace_graph(&trace_graph_thresh_return, | |
388 | &trace_graph_thresh_entry); | |
389 | else | |
390 | ret = register_ftrace_graph(&trace_graph_return, | |
391 | &trace_graph_entry); | |
660c7f9b SR |
392 | if (ret) |
393 | return ret; | |
394 | tracing_start_cmdline_record(); | |
395 | ||
396 | return 0; | |
fb52607a FW |
397 | } |
398 | ||
399 | static void graph_trace_reset(struct trace_array *tr) | |
400 | { | |
660c7f9b SR |
401 | tracing_stop_cmdline_record(); |
402 | unregister_ftrace_graph(); | |
fb52607a FW |
403 | } |
404 | ||
0c9e6f63 | 405 | static int max_bytes_for_cpu; |
1a056155 FW |
406 | |
407 | static enum print_line_t | |
408 | print_graph_cpu(struct trace_seq *s, int cpu) | |
409 | { | |
1a056155 | 410 | int ret; |
1a056155 | 411 | |
d51090b3 IM |
412 | /* |
413 | * Start with a space character - to make it stand out | |
414 | * to the right a bit when trace output is pasted into | |
415 | * email: | |
416 | */ | |
0c9e6f63 | 417 | ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); |
1a056155 | 418 | if (!ret) |
d51090b3 IM |
419 | return TRACE_TYPE_PARTIAL_LINE; |
420 | ||
1a056155 FW |
421 | return TRACE_TYPE_HANDLED; |
422 | } | |
423 | ||
11e84acc FW |
424 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 |
425 | ||
426 | static enum print_line_t | |
427 | print_graph_proc(struct trace_seq *s, pid_t pid) | |
428 | { | |
4ca53085 | 429 | char comm[TASK_COMM_LEN]; |
11e84acc FW |
430 | /* sign + log10(MAX_INT) + '\0' */ |
431 | char pid_str[11]; | |
4ca53085 SR |
432 | int spaces = 0; |
433 | int ret; | |
434 | int len; | |
435 | int i; | |
11e84acc | 436 | |
4ca53085 | 437 | trace_find_cmdline(pid, comm); |
11e84acc FW |
438 | comm[7] = '\0'; |
439 | sprintf(pid_str, "%d", pid); | |
440 | ||
441 | /* 1 stands for the "-" character */ | |
442 | len = strlen(comm) + strlen(pid_str) + 1; | |
443 | ||
444 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) | |
445 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; | |
446 | ||
447 | /* First spaces to align center */ | |
448 | for (i = 0; i < spaces / 2; i++) { | |
146c3442 | 449 | ret = trace_seq_putc(s, ' '); |
11e84acc FW |
450 | if (!ret) |
451 | return TRACE_TYPE_PARTIAL_LINE; | |
452 | } | |
453 | ||
454 | ret = trace_seq_printf(s, "%s-%s", comm, pid_str); | |
455 | if (!ret) | |
456 | return TRACE_TYPE_PARTIAL_LINE; | |
457 | ||
458 | /* Last spaces to align center */ | |
459 | for (i = 0; i < spaces - (spaces / 2); i++) { | |
146c3442 | 460 | ret = trace_seq_putc(s, ' '); |
11e84acc FW |
461 | if (!ret) |
462 | return TRACE_TYPE_PARTIAL_LINE; | |
463 | } | |
464 | return TRACE_TYPE_HANDLED; | |
465 | } | |
466 | ||
1a056155 | 467 | |
49ff5903 SR |
468 | static enum print_line_t |
469 | print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | |
470 | { | |
f81c972d | 471 | if (!trace_seq_putc(s, ' ')) |
637e7e86 SR |
472 | return 0; |
473 | ||
f81c972d | 474 | return trace_print_lat_fmt(s, entry); |
49ff5903 SR |
475 | } |
476 | ||
287b6e68 | 477 | /* If the pid changed since the last trace, output this event */ |
11e84acc | 478 | static enum print_line_t |
2fbcdb35 | 479 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) |
287b6e68 | 480 | { |
d51090b3 | 481 | pid_t prev_pid; |
9005f3eb | 482 | pid_t *last_pid; |
d51090b3 | 483 | int ret; |
660c7f9b | 484 | |
2fbcdb35 | 485 | if (!data) |
9005f3eb FW |
486 | return TRACE_TYPE_HANDLED; |
487 | ||
be1eca39 | 488 | last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
9005f3eb FW |
489 | |
490 | if (*last_pid == pid) | |
11e84acc | 491 | return TRACE_TYPE_HANDLED; |
fb52607a | 492 | |
9005f3eb FW |
493 | prev_pid = *last_pid; |
494 | *last_pid = pid; | |
d51090b3 | 495 | |
9005f3eb FW |
496 | if (prev_pid == -1) |
497 | return TRACE_TYPE_HANDLED; | |
d51090b3 IM |
498 | /* |
499 | * Context-switch trace line: | |
500 | ||
501 | ------------------------------------------ | |
502 | | 1) migration/0--1 => sshd-1755 | |
503 | ------------------------------------------ | |
504 | ||
505 | */ | |
146c3442 | 506 | ret = trace_seq_puts(s, |
1fd8f2a3 | 507 | " ------------------------------------------\n"); |
11e84acc | 508 | if (!ret) |
810dc732 | 509 | return TRACE_TYPE_PARTIAL_LINE; |
11e84acc FW |
510 | |
511 | ret = print_graph_cpu(s, cpu); | |
512 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
810dc732 | 513 | return TRACE_TYPE_PARTIAL_LINE; |
11e84acc FW |
514 | |
515 | ret = print_graph_proc(s, prev_pid); | |
516 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
810dc732 | 517 | return TRACE_TYPE_PARTIAL_LINE; |
11e84acc | 518 | |
146c3442 | 519 | ret = trace_seq_puts(s, " => "); |
11e84acc | 520 | if (!ret) |
810dc732 | 521 | return TRACE_TYPE_PARTIAL_LINE; |
11e84acc FW |
522 | |
523 | ret = print_graph_proc(s, pid); | |
524 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
810dc732 | 525 | return TRACE_TYPE_PARTIAL_LINE; |
11e84acc | 526 | |
146c3442 | 527 | ret = trace_seq_puts(s, |
11e84acc FW |
528 | "\n ------------------------------------------\n\n"); |
529 | if (!ret) | |
810dc732 | 530 | return TRACE_TYPE_PARTIAL_LINE; |
11e84acc | 531 | |
810dc732 | 532 | return TRACE_TYPE_HANDLED; |
287b6e68 FW |
533 | } |
534 | ||
b91facc3 FW |
535 | static struct ftrace_graph_ret_entry * |
536 | get_return_for_leaf(struct trace_iterator *iter, | |
83a8df61 FW |
537 | struct ftrace_graph_ent_entry *curr) |
538 | { | |
be1eca39 JO |
539 | struct fgraph_data *data = iter->private; |
540 | struct ring_buffer_iter *ring_iter = NULL; | |
83a8df61 FW |
541 | struct ring_buffer_event *event; |
542 | struct ftrace_graph_ret_entry *next; | |
543 | ||
be1eca39 JO |
544 | /* |
545 | * If the previous output failed to write to the seq buffer, | |
546 | * then we just reuse the data from before. | |
547 | */ | |
548 | if (data && data->failed) { | |
549 | curr = &data->ent; | |
550 | next = &data->ret; | |
551 | } else { | |
83a8df61 | 552 | |
6d158a81 | 553 | ring_iter = trace_buffer_iter(iter, iter->cpu); |
be1eca39 JO |
554 | |
555 | /* First peek to compare current entry and the next one */ | |
556 | if (ring_iter) | |
557 | event = ring_buffer_iter_peek(ring_iter, NULL); | |
558 | else { | |
559 | /* | |
560 | * We need to consume the current entry to see | |
561 | * the next one. | |
562 | */ | |
12883efb | 563 | ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, |
66a8cb95 | 564 | NULL, NULL); |
12883efb | 565 | event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu, |
66a8cb95 | 566 | NULL, NULL); |
be1eca39 | 567 | } |
83a8df61 | 568 | |
be1eca39 JO |
569 | if (!event) |
570 | return NULL; | |
571 | ||
572 | next = ring_buffer_event_data(event); | |
83a8df61 | 573 | |
be1eca39 JO |
574 | if (data) { |
575 | /* | |
576 | * Save current and next entries for later reference | |
577 | * if the output fails. | |
578 | */ | |
579 | data->ent = *curr; | |
575570f0 SL |
580 | /* |
581 | * If the next event is not a return type, then | |
582 | * we only care about what type it is. Otherwise we can | |
583 | * safely copy the entire event. | |
584 | */ | |
585 | if (next->ent.type == TRACE_GRAPH_RET) | |
586 | data->ret = *next; | |
587 | else | |
588 | data->ret.ent.type = next->ent.type; | |
be1eca39 JO |
589 | } |
590 | } | |
83a8df61 FW |
591 | |
592 | if (next->ent.type != TRACE_GRAPH_RET) | |
b91facc3 | 593 | return NULL; |
83a8df61 FW |
594 | |
595 | if (curr->ent.pid != next->ent.pid || | |
596 | curr->graph_ent.func != next->ret.func) | |
b91facc3 | 597 | return NULL; |
83a8df61 | 598 | |
b91facc3 FW |
599 | /* this is a leaf, now advance the iterator */ |
600 | if (ring_iter) | |
601 | ring_buffer_read(ring_iter, NULL); | |
602 | ||
603 | return next; | |
83a8df61 FW |
604 | } |
605 | ||
d1f9cbd7 FW |
606 | static int print_graph_abs_time(u64 t, struct trace_seq *s) |
607 | { | |
608 | unsigned long usecs_rem; | |
609 | ||
610 | usecs_rem = do_div(t, NSEC_PER_SEC); | |
611 | usecs_rem /= 1000; | |
612 | ||
613 | return trace_seq_printf(s, "%5lu.%06lu | ", | |
614 | (unsigned long)t, usecs_rem); | |
615 | } | |
616 | ||
f8b755ac | 617 | static enum print_line_t |
d1f9cbd7 | 618 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, |
d7a8d9e9 | 619 | enum trace_type type, int cpu, pid_t pid, u32 flags) |
f8b755ac FW |
620 | { |
621 | int ret; | |
d1f9cbd7 | 622 | struct trace_seq *s = &iter->seq; |
f8b755ac FW |
623 | |
624 | if (addr < (unsigned long)__irqentry_text_start || | |
625 | addr >= (unsigned long)__irqentry_text_end) | |
626 | return TRACE_TYPE_UNHANDLED; | |
627 | ||
749230b0 JO |
628 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
629 | /* Absolute time */ | |
630 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { | |
631 | ret = print_graph_abs_time(iter->ts, s); | |
632 | if (!ret) | |
633 | return TRACE_TYPE_PARTIAL_LINE; | |
634 | } | |
d1f9cbd7 | 635 | |
749230b0 JO |
636 | /* Cpu */ |
637 | if (flags & TRACE_GRAPH_PRINT_CPU) { | |
638 | ret = print_graph_cpu(s, cpu); | |
639 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
640 | return TRACE_TYPE_PARTIAL_LINE; | |
641 | } | |
49ff5903 | 642 | |
749230b0 JO |
643 | /* Proc */ |
644 | if (flags & TRACE_GRAPH_PRINT_PROC) { | |
645 | ret = print_graph_proc(s, pid); | |
646 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
647 | return TRACE_TYPE_PARTIAL_LINE; | |
146c3442 | 648 | ret = trace_seq_puts(s, " | "); |
749230b0 JO |
649 | if (!ret) |
650 | return TRACE_TYPE_PARTIAL_LINE; | |
651 | } | |
9005f3eb | 652 | } |
f8b755ac | 653 | |
9005f3eb | 654 | /* No overhead */ |
ffeb80fc JO |
655 | ret = print_graph_duration(DURATION_FILL_START, s, flags); |
656 | if (ret != TRACE_TYPE_HANDLED) | |
657 | return ret; | |
f8b755ac | 658 | |
9005f3eb | 659 | if (type == TRACE_GRAPH_ENT) |
146c3442 | 660 | ret = trace_seq_puts(s, "==========>"); |
9005f3eb | 661 | else |
146c3442 | 662 | ret = trace_seq_puts(s, "<=========="); |
9005f3eb FW |
663 | |
664 | if (!ret) | |
665 | return TRACE_TYPE_PARTIAL_LINE; | |
666 | ||
ffeb80fc JO |
667 | ret = print_graph_duration(DURATION_FILL_END, s, flags); |
668 | if (ret != TRACE_TYPE_HANDLED) | |
669 | return ret; | |
670 | ||
146c3442 | 671 | ret = trace_seq_putc(s, '\n'); |
f8b755ac | 672 | |
f8b755ac FW |
673 | if (!ret) |
674 | return TRACE_TYPE_PARTIAL_LINE; | |
675 | return TRACE_TYPE_HANDLED; | |
676 | } | |
83a8df61 | 677 | |
0706f1c4 SR |
678 | enum print_line_t |
679 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) | |
83a8df61 FW |
680 | { |
681 | unsigned long nsecs_rem = do_div(duration, 1000); | |
166d3c79 FW |
682 | /* log10(ULONG_MAX) + '\0' */ |
683 | char msecs_str[21]; | |
684 | char nsecs_str[5]; | |
685 | int ret, len; | |
686 | int i; | |
687 | ||
688 | sprintf(msecs_str, "%lu", (unsigned long) duration); | |
689 | ||
690 | /* Print msecs */ | |
9005f3eb | 691 | ret = trace_seq_printf(s, "%s", msecs_str); |
166d3c79 FW |
692 | if (!ret) |
693 | return TRACE_TYPE_PARTIAL_LINE; | |
694 | ||
695 | len = strlen(msecs_str); | |
696 | ||
697 | /* Print nsecs (we don't want to exceed 7 numbers) */ | |
698 | if (len < 7) { | |
14cae9bd BP |
699 | size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); |
700 | ||
701 | snprintf(nsecs_str, slen, "%03lu", nsecs_rem); | |
166d3c79 FW |
702 | ret = trace_seq_printf(s, ".%s", nsecs_str); |
703 | if (!ret) | |
704 | return TRACE_TYPE_PARTIAL_LINE; | |
705 | len += strlen(nsecs_str); | |
706 | } | |
707 | ||
146c3442 | 708 | ret = trace_seq_puts(s, " us "); |
166d3c79 FW |
709 | if (!ret) |
710 | return TRACE_TYPE_PARTIAL_LINE; | |
711 | ||
712 | /* Print remaining spaces to fit the row's width */ | |
713 | for (i = len; i < 7; i++) { | |
146c3442 | 714 | ret = trace_seq_putc(s, ' '); |
166d3c79 FW |
715 | if (!ret) |
716 | return TRACE_TYPE_PARTIAL_LINE; | |
717 | } | |
0706f1c4 SR |
718 | return TRACE_TYPE_HANDLED; |
719 | } | |
720 | ||
721 | static enum print_line_t | |
ffeb80fc JO |
722 | print_graph_duration(unsigned long long duration, struct trace_seq *s, |
723 | u32 flags) | |
0706f1c4 | 724 | { |
ffeb80fc JO |
725 | int ret = -1; |
726 | ||
749230b0 JO |
727 | if (!(flags & TRACE_GRAPH_PRINT_DURATION) || |
728 | !(trace_flags & TRACE_ITER_CONTEXT_INFO)) | |
729 | return TRACE_TYPE_HANDLED; | |
ffeb80fc JO |
730 | |
731 | /* No real adata, just filling the column with spaces */ | |
732 | switch (duration) { | |
733 | case DURATION_FILL_FULL: | |
146c3442 | 734 | ret = trace_seq_puts(s, " | "); |
ffeb80fc JO |
735 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
736 | case DURATION_FILL_START: | |
146c3442 | 737 | ret = trace_seq_puts(s, " "); |
ffeb80fc JO |
738 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
739 | case DURATION_FILL_END: | |
146c3442 | 740 | ret = trace_seq_puts(s, " |"); |
ffeb80fc JO |
741 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
742 | } | |
743 | ||
744 | /* Signal a overhead of time execution to the output */ | |
745 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { | |
746 | /* Duration exceeded 100 msecs */ | |
747 | if (duration > 100000ULL) | |
146c3442 | 748 | ret = trace_seq_puts(s, "! "); |
ffeb80fc JO |
749 | /* Duration exceeded 10 msecs */ |
750 | else if (duration > 10000ULL) | |
146c3442 | 751 | ret = trace_seq_puts(s, "+ "); |
ffeb80fc JO |
752 | } |
753 | ||
754 | /* | |
755 | * The -1 means we either did not exceed the duration tresholds | |
756 | * or we dont want to print out the overhead. Either way we need | |
757 | * to fill out the space. | |
758 | */ | |
759 | if (ret == -1) | |
146c3442 | 760 | ret = trace_seq_puts(s, " "); |
ffeb80fc JO |
761 | |
762 | /* Catching here any failure happenned above */ | |
763 | if (!ret) | |
764 | return TRACE_TYPE_PARTIAL_LINE; | |
0706f1c4 SR |
765 | |
766 | ret = trace_print_graph_duration(duration, s); | |
767 | if (ret != TRACE_TYPE_HANDLED) | |
768 | return ret; | |
166d3c79 | 769 | |
146c3442 | 770 | ret = trace_seq_puts(s, "| "); |
166d3c79 FW |
771 | if (!ret) |
772 | return TRACE_TYPE_PARTIAL_LINE; | |
166d3c79 | 773 | |
0706f1c4 | 774 | return TRACE_TYPE_HANDLED; |
83a8df61 FW |
775 | } |
776 | ||
83a8df61 | 777 | /* Case of a leaf function on its call entry */ |
287b6e68 | 778 | static enum print_line_t |
83a8df61 | 779 | print_graph_entry_leaf(struct trace_iterator *iter, |
b91facc3 | 780 | struct ftrace_graph_ent_entry *entry, |
d7a8d9e9 JO |
781 | struct ftrace_graph_ret_entry *ret_entry, |
782 | struct trace_seq *s, u32 flags) | |
fb52607a | 783 | { |
2fbcdb35 | 784 | struct fgraph_data *data = iter->private; |
83a8df61 | 785 | struct ftrace_graph_ret *graph_ret; |
83a8df61 FW |
786 | struct ftrace_graph_ent *call; |
787 | unsigned long long duration; | |
fb52607a | 788 | int ret; |
1a056155 | 789 | int i; |
fb52607a | 790 | |
83a8df61 FW |
791 | graph_ret = &ret_entry->ret; |
792 | call = &entry->graph_ent; | |
793 | duration = graph_ret->rettime - graph_ret->calltime; | |
794 | ||
2fbcdb35 | 795 | if (data) { |
f1c7f517 | 796 | struct fgraph_cpu_data *cpu_data; |
2fbcdb35 | 797 | int cpu = iter->cpu; |
f1c7f517 SR |
798 | |
799 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | |
2fbcdb35 SR |
800 | |
801 | /* | |
802 | * Comments display at + 1 to depth. Since | |
803 | * this is a leaf function, keep the comments | |
804 | * equal to this depth. | |
805 | */ | |
f1c7f517 SR |
806 | cpu_data->depth = call->depth - 1; |
807 | ||
808 | /* No need to keep this function around for this depth */ | |
809 | if (call->depth < FTRACE_RETFUNC_DEPTH) | |
810 | cpu_data->enter_funcs[call->depth] = 0; | |
2fbcdb35 SR |
811 | } |
812 | ||
ffeb80fc JO |
813 | /* Overhead and duration */ |
814 | ret = print_graph_duration(duration, s, flags); | |
815 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
9005f3eb | 816 | return TRACE_TYPE_PARTIAL_LINE; |
1a056155 | 817 | |
83a8df61 FW |
818 | /* Function */ |
819 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | |
146c3442 | 820 | ret = trace_seq_putc(s, ' '); |
83a8df61 FW |
821 | if (!ret) |
822 | return TRACE_TYPE_PARTIAL_LINE; | |
823 | } | |
824 | ||
b375a11a | 825 | ret = trace_seq_printf(s, "%ps();\n", (void *)call->func); |
83a8df61 FW |
826 | if (!ret) |
827 | return TRACE_TYPE_PARTIAL_LINE; | |
828 | ||
829 | return TRACE_TYPE_HANDLED; | |
830 | } | |
831 | ||
832 | static enum print_line_t | |
2fbcdb35 SR |
833 | print_graph_entry_nested(struct trace_iterator *iter, |
834 | struct ftrace_graph_ent_entry *entry, | |
d7a8d9e9 | 835 | struct trace_seq *s, int cpu, u32 flags) |
83a8df61 | 836 | { |
83a8df61 | 837 | struct ftrace_graph_ent *call = &entry->graph_ent; |
2fbcdb35 SR |
838 | struct fgraph_data *data = iter->private; |
839 | int ret; | |
840 | int i; | |
841 | ||
842 | if (data) { | |
f1c7f517 | 843 | struct fgraph_cpu_data *cpu_data; |
2fbcdb35 | 844 | int cpu = iter->cpu; |
2fbcdb35 | 845 | |
f1c7f517 SR |
846 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
847 | cpu_data->depth = call->depth; | |
848 | ||
849 | /* Save this function pointer to see if the exit matches */ | |
850 | if (call->depth < FTRACE_RETFUNC_DEPTH) | |
851 | cpu_data->enter_funcs[call->depth] = call->func; | |
2fbcdb35 | 852 | } |
83a8df61 | 853 | |
9005f3eb | 854 | /* No time */ |
ffeb80fc JO |
855 | ret = print_graph_duration(DURATION_FILL_FULL, s, flags); |
856 | if (ret != TRACE_TYPE_HANDLED) | |
857 | return ret; | |
f8b755ac | 858 | |
83a8df61 | 859 | /* Function */ |
287b6e68 | 860 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
146c3442 | 861 | ret = trace_seq_putc(s, ' '); |
fb52607a FW |
862 | if (!ret) |
863 | return TRACE_TYPE_PARTIAL_LINE; | |
287b6e68 FW |
864 | } |
865 | ||
b375a11a | 866 | ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func); |
83a8df61 FW |
867 | if (!ret) |
868 | return TRACE_TYPE_PARTIAL_LINE; | |
869 | ||
b91facc3 FW |
870 | /* |
871 | * we already consumed the current entry to check the next one | |
872 | * and see if this is a leaf. | |
873 | */ | |
874 | return TRACE_TYPE_NO_CONSUME; | |
287b6e68 FW |
875 | } |
876 | ||
83a8df61 | 877 | static enum print_line_t |
ac5f6c96 | 878 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, |
d7a8d9e9 | 879 | int type, unsigned long addr, u32 flags) |
83a8df61 | 880 | { |
2fbcdb35 | 881 | struct fgraph_data *data = iter->private; |
83a8df61 | 882 | struct trace_entry *ent = iter->ent; |
ac5f6c96 SR |
883 | int cpu = iter->cpu; |
884 | int ret; | |
83a8df61 | 885 | |
1a056155 | 886 | /* Pid */ |
2fbcdb35 | 887 | if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) |
9005f3eb FW |
888 | return TRACE_TYPE_PARTIAL_LINE; |
889 | ||
ac5f6c96 SR |
890 | if (type) { |
891 | /* Interrupt */ | |
d7a8d9e9 | 892 | ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags); |
ac5f6c96 SR |
893 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
894 | return TRACE_TYPE_PARTIAL_LINE; | |
895 | } | |
83a8df61 | 896 | |
749230b0 JO |
897 | if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) |
898 | return 0; | |
899 | ||
9005f3eb | 900 | /* Absolute time */ |
d7a8d9e9 | 901 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { |
9005f3eb FW |
902 | ret = print_graph_abs_time(iter->ts, s); |
903 | if (!ret) | |
904 | return TRACE_TYPE_PARTIAL_LINE; | |
905 | } | |
906 | ||
1a056155 | 907 | /* Cpu */ |
d7a8d9e9 | 908 | if (flags & TRACE_GRAPH_PRINT_CPU) { |
1a056155 | 909 | ret = print_graph_cpu(s, cpu); |
11e84acc FW |
910 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
911 | return TRACE_TYPE_PARTIAL_LINE; | |
912 | } | |
913 | ||
914 | /* Proc */ | |
d7a8d9e9 | 915 | if (flags & TRACE_GRAPH_PRINT_PROC) { |
00a8bf85 | 916 | ret = print_graph_proc(s, ent->pid); |
11e84acc FW |
917 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
918 | return TRACE_TYPE_PARTIAL_LINE; | |
919 | ||
146c3442 | 920 | ret = trace_seq_puts(s, " | "); |
1a056155 FW |
921 | if (!ret) |
922 | return TRACE_TYPE_PARTIAL_LINE; | |
923 | } | |
83a8df61 | 924 | |
49ff5903 SR |
925 | /* Latency format */ |
926 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { | |
927 | ret = print_graph_lat_fmt(s, ent); | |
928 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
929 | return TRACE_TYPE_PARTIAL_LINE; | |
930 | } | |
931 | ||
ac5f6c96 SR |
932 | return 0; |
933 | } | |
934 | ||
2bd16212 JO |
935 | /* |
936 | * Entry check for irq code | |
937 | * | |
938 | * returns 1 if | |
939 | * - we are inside irq code | |
25985edc | 940 | * - we just entered irq code |
2bd16212 JO |
941 | * |
942 | * retunns 0 if | |
943 | * - funcgraph-interrupts option is set | |
944 | * - we are not inside irq code | |
945 | */ | |
946 | static int | |
947 | check_irq_entry(struct trace_iterator *iter, u32 flags, | |
948 | unsigned long addr, int depth) | |
949 | { | |
950 | int cpu = iter->cpu; | |
a9d61173 | 951 | int *depth_irq; |
2bd16212 | 952 | struct fgraph_data *data = iter->private; |
2bd16212 | 953 | |
a9d61173 JO |
954 | /* |
955 | * If we are either displaying irqs, or we got called as | |
956 | * a graph event and private data does not exist, | |
957 | * then we bypass the irq check. | |
958 | */ | |
959 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || | |
960 | (!data)) | |
2bd16212 JO |
961 | return 0; |
962 | ||
a9d61173 JO |
963 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
964 | ||
2bd16212 JO |
965 | /* |
966 | * We are inside the irq code | |
967 | */ | |
968 | if (*depth_irq >= 0) | |
969 | return 1; | |
970 | ||
971 | if ((addr < (unsigned long)__irqentry_text_start) || | |
972 | (addr >= (unsigned long)__irqentry_text_end)) | |
973 | return 0; | |
974 | ||
975 | /* | |
976 | * We are entering irq code. | |
977 | */ | |
978 | *depth_irq = depth; | |
979 | return 1; | |
980 | } | |
981 | ||
982 | /* | |
983 | * Return check for irq code | |
984 | * | |
985 | * returns 1 if | |
986 | * - we are inside irq code | |
987 | * - we just left irq code | |
988 | * | |
989 | * returns 0 if | |
990 | * - funcgraph-interrupts option is set | |
991 | * - we are not inside irq code | |
992 | */ | |
993 | static int | |
994 | check_irq_return(struct trace_iterator *iter, u32 flags, int depth) | |
995 | { | |
996 | int cpu = iter->cpu; | |
a9d61173 | 997 | int *depth_irq; |
2bd16212 | 998 | struct fgraph_data *data = iter->private; |
2bd16212 | 999 | |
a9d61173 JO |
1000 | /* |
1001 | * If we are either displaying irqs, or we got called as | |
1002 | * a graph event and private data does not exist, | |
1003 | * then we bypass the irq check. | |
1004 | */ | |
1005 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || | |
1006 | (!data)) | |
2bd16212 JO |
1007 | return 0; |
1008 | ||
a9d61173 JO |
1009 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
1010 | ||
2bd16212 JO |
1011 | /* |
1012 | * We are not inside the irq code. | |
1013 | */ | |
1014 | if (*depth_irq == -1) | |
1015 | return 0; | |
1016 | ||
1017 | /* | |
1018 | * We are inside the irq code, and this is returning entry. | |
1019 | * Let's not trace it and clear the entry depth, since | |
1020 | * we are out of irq code. | |
1021 | * | |
1022 | * This condition ensures that we 'leave the irq code' once | |
1023 | * we are out of the entry depth. Thus protecting us from | |
1024 | * the RETURN entry loss. | |
1025 | */ | |
1026 | if (*depth_irq >= depth) { | |
1027 | *depth_irq = -1; | |
1028 | return 1; | |
1029 | } | |
1030 | ||
1031 | /* | |
1032 | * We are inside the irq code, and this is not the entry. | |
1033 | */ | |
1034 | return 1; | |
1035 | } | |
1036 | ||
ac5f6c96 SR |
1037 | static enum print_line_t |
1038 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |
d7a8d9e9 | 1039 | struct trace_iterator *iter, u32 flags) |
ac5f6c96 | 1040 | { |
be1eca39 | 1041 | struct fgraph_data *data = iter->private; |
ac5f6c96 SR |
1042 | struct ftrace_graph_ent *call = &field->graph_ent; |
1043 | struct ftrace_graph_ret_entry *leaf_ret; | |
be1eca39 JO |
1044 | static enum print_line_t ret; |
1045 | int cpu = iter->cpu; | |
ac5f6c96 | 1046 | |
2bd16212 JO |
1047 | if (check_irq_entry(iter, flags, call->func, call->depth)) |
1048 | return TRACE_TYPE_HANDLED; | |
1049 | ||
d7a8d9e9 | 1050 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags)) |
ac5f6c96 SR |
1051 | return TRACE_TYPE_PARTIAL_LINE; |
1052 | ||
b91facc3 FW |
1053 | leaf_ret = get_return_for_leaf(iter, field); |
1054 | if (leaf_ret) | |
d7a8d9e9 | 1055 | ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags); |
83a8df61 | 1056 | else |
d7a8d9e9 | 1057 | ret = print_graph_entry_nested(iter, field, s, cpu, flags); |
83a8df61 | 1058 | |
be1eca39 JO |
1059 | if (data) { |
1060 | /* | |
1061 | * If we failed to write our output, then we need to make | |
1062 | * note of it. Because we already consumed our entry. | |
1063 | */ | |
1064 | if (s->full) { | |
1065 | data->failed = 1; | |
1066 | data->cpu = cpu; | |
1067 | } else | |
1068 | data->failed = 0; | |
1069 | } | |
1070 | ||
1071 | return ret; | |
83a8df61 FW |
1072 | } |
1073 | ||
287b6e68 FW |
1074 | static enum print_line_t |
1075 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |
d7a8d9e9 JO |
1076 | struct trace_entry *ent, struct trace_iterator *iter, |
1077 | u32 flags) | |
287b6e68 | 1078 | { |
83a8df61 | 1079 | unsigned long long duration = trace->rettime - trace->calltime; |
2fbcdb35 SR |
1080 | struct fgraph_data *data = iter->private; |
1081 | pid_t pid = ent->pid; | |
1082 | int cpu = iter->cpu; | |
f1c7f517 | 1083 | int func_match = 1; |
2fbcdb35 SR |
1084 | int ret; |
1085 | int i; | |
1086 | ||
2bd16212 JO |
1087 | if (check_irq_return(iter, flags, trace->depth)) |
1088 | return TRACE_TYPE_HANDLED; | |
1089 | ||
2fbcdb35 | 1090 | if (data) { |
f1c7f517 SR |
1091 | struct fgraph_cpu_data *cpu_data; |
1092 | int cpu = iter->cpu; | |
1093 | ||
1094 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | |
2fbcdb35 SR |
1095 | |
1096 | /* | |
1097 | * Comments display at + 1 to depth. This is the | |
1098 | * return from a function, we now want the comments | |
1099 | * to display at the same level of the bracket. | |
1100 | */ | |
f1c7f517 SR |
1101 | cpu_data->depth = trace->depth - 1; |
1102 | ||
1103 | if (trace->depth < FTRACE_RETFUNC_DEPTH) { | |
1104 | if (cpu_data->enter_funcs[trace->depth] != trace->func) | |
1105 | func_match = 0; | |
1106 | cpu_data->enter_funcs[trace->depth] = 0; | |
1107 | } | |
2fbcdb35 | 1108 | } |
287b6e68 | 1109 | |
d7a8d9e9 | 1110 | if (print_graph_prologue(iter, s, 0, 0, flags)) |
437f24fb SR |
1111 | return TRACE_TYPE_PARTIAL_LINE; |
1112 | ||
ffeb80fc JO |
1113 | /* Overhead and duration */ |
1114 | ret = print_graph_duration(duration, s, flags); | |
1115 | if (ret == TRACE_TYPE_PARTIAL_LINE) | |
9005f3eb | 1116 | return TRACE_TYPE_PARTIAL_LINE; |
1a056155 | 1117 | |
83a8df61 | 1118 | /* Closing brace */ |
287b6e68 | 1119 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { |
146c3442 | 1120 | ret = trace_seq_putc(s, ' '); |
fb52607a FW |
1121 | if (!ret) |
1122 | return TRACE_TYPE_PARTIAL_LINE; | |
287b6e68 FW |
1123 | } |
1124 | ||
f1c7f517 SR |
1125 | /* |
1126 | * If the return function does not have a matching entry, | |
1127 | * then the entry was lost. Instead of just printing | |
1128 | * the '}' and letting the user guess what function this | |
1129 | * belongs to, write out the function name. | |
1130 | */ | |
1131 | if (func_match) { | |
146c3442 | 1132 | ret = trace_seq_puts(s, "}\n"); |
f1c7f517 SR |
1133 | if (!ret) |
1134 | return TRACE_TYPE_PARTIAL_LINE; | |
1135 | } else { | |
a094fe04 | 1136 | ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); |
f1c7f517 SR |
1137 | if (!ret) |
1138 | return TRACE_TYPE_PARTIAL_LINE; | |
1139 | } | |
fb52607a | 1140 | |
83a8df61 | 1141 | /* Overrun */ |
d7a8d9e9 | 1142 | if (flags & TRACE_GRAPH_PRINT_OVERRUN) { |
287b6e68 FW |
1143 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", |
1144 | trace->overrun); | |
fb52607a FW |
1145 | if (!ret) |
1146 | return TRACE_TYPE_PARTIAL_LINE; | |
287b6e68 | 1147 | } |
f8b755ac | 1148 | |
d7a8d9e9 JO |
1149 | ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, |
1150 | cpu, pid, flags); | |
f8b755ac FW |
1151 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
1152 | return TRACE_TYPE_PARTIAL_LINE; | |
1153 | ||
287b6e68 FW |
1154 | return TRACE_TYPE_HANDLED; |
1155 | } | |
1156 | ||
1fd8f2a3 | 1157 | static enum print_line_t |
d7a8d9e9 JO |
1158 | print_graph_comment(struct trace_seq *s, struct trace_entry *ent, |
1159 | struct trace_iterator *iter, u32 flags) | |
1fd8f2a3 | 1160 | { |
5087f8d2 | 1161 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
2fbcdb35 | 1162 | struct fgraph_data *data = iter->private; |
5087f8d2 | 1163 | struct trace_event *event; |
2fbcdb35 | 1164 | int depth = 0; |
1fd8f2a3 | 1165 | int ret; |
2fbcdb35 SR |
1166 | int i; |
1167 | ||
1168 | if (data) | |
be1eca39 | 1169 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; |
9005f3eb | 1170 | |
d7a8d9e9 | 1171 | if (print_graph_prologue(iter, s, 0, 0, flags)) |
d1f9cbd7 FW |
1172 | return TRACE_TYPE_PARTIAL_LINE; |
1173 | ||
9005f3eb | 1174 | /* No time */ |
ffeb80fc JO |
1175 | ret = print_graph_duration(DURATION_FILL_FULL, s, flags); |
1176 | if (ret != TRACE_TYPE_HANDLED) | |
1177 | return ret; | |
1fd8f2a3 | 1178 | |
1fd8f2a3 | 1179 | /* Indentation */ |
2fbcdb35 SR |
1180 | if (depth > 0) |
1181 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { | |
146c3442 | 1182 | ret = trace_seq_putc(s, ' '); |
1fd8f2a3 FW |
1183 | if (!ret) |
1184 | return TRACE_TYPE_PARTIAL_LINE; | |
1185 | } | |
1186 | ||
1187 | /* The comment */ | |
146c3442 | 1188 | ret = trace_seq_puts(s, "/* "); |
769b0441 FW |
1189 | if (!ret) |
1190 | return TRACE_TYPE_PARTIAL_LINE; | |
1191 | ||
5087f8d2 SR |
1192 | switch (iter->ent->type) { |
1193 | case TRACE_BPRINT: | |
1194 | ret = trace_print_bprintk_msg_only(iter); | |
1195 | if (ret != TRACE_TYPE_HANDLED) | |
1196 | return ret; | |
1197 | break; | |
1198 | case TRACE_PRINT: | |
1199 | ret = trace_print_printk_msg_only(iter); | |
1200 | if (ret != TRACE_TYPE_HANDLED) | |
1201 | return ret; | |
1202 | break; | |
1203 | default: | |
1204 | event = ftrace_find_event(ent->type); | |
1205 | if (!event) | |
1206 | return TRACE_TYPE_UNHANDLED; | |
1207 | ||
a9a57763 | 1208 | ret = event->funcs->trace(iter, sym_flags, event); |
5087f8d2 SR |
1209 | if (ret != TRACE_TYPE_HANDLED) |
1210 | return ret; | |
1211 | } | |
1fd8f2a3 | 1212 | |
412d0bb5 FW |
1213 | /* Strip ending newline */ |
1214 | if (s->buffer[s->len - 1] == '\n') { | |
1215 | s->buffer[s->len - 1] = '\0'; | |
1216 | s->len--; | |
1217 | } | |
1218 | ||
146c3442 | 1219 | ret = trace_seq_puts(s, " */\n"); |
1fd8f2a3 FW |
1220 | if (!ret) |
1221 | return TRACE_TYPE_PARTIAL_LINE; | |
1222 | ||
1223 | return TRACE_TYPE_HANDLED; | |
1224 | } | |
1225 | ||
1226 | ||
287b6e68 | 1227 | enum print_line_t |
321e68b0 | 1228 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
287b6e68 | 1229 | { |
be1eca39 JO |
1230 | struct ftrace_graph_ent_entry *field; |
1231 | struct fgraph_data *data = iter->private; | |
287b6e68 | 1232 | struct trace_entry *entry = iter->ent; |
5087f8d2 | 1233 | struct trace_seq *s = &iter->seq; |
be1eca39 JO |
1234 | int cpu = iter->cpu; |
1235 | int ret; | |
1236 | ||
1237 | if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { | |
1238 | per_cpu_ptr(data->cpu_data, cpu)->ignore = 0; | |
1239 | return TRACE_TYPE_HANDLED; | |
1240 | } | |
1241 | ||
1242 | /* | |
1243 | * If the last output failed, there's a possibility we need | |
1244 | * to print out the missing entry which would never go out. | |
1245 | */ | |
1246 | if (data && data->failed) { | |
1247 | field = &data->ent; | |
1248 | iter->cpu = data->cpu; | |
d7a8d9e9 | 1249 | ret = print_graph_entry(field, s, iter, flags); |
be1eca39 JO |
1250 | if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { |
1251 | per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; | |
1252 | ret = TRACE_TYPE_NO_CONSUME; | |
1253 | } | |
1254 | iter->cpu = cpu; | |
1255 | return ret; | |
1256 | } | |
fb52607a | 1257 | |
287b6e68 FW |
1258 | switch (entry->type) { |
1259 | case TRACE_GRAPH_ENT: { | |
38ceb592 LJ |
1260 | /* |
1261 | * print_graph_entry() may consume the current event, | |
1262 | * thus @field may become invalid, so we need to save it. | |
1263 | * sizeof(struct ftrace_graph_ent_entry) is very small, | |
1264 | * it can be safely saved at the stack. | |
1265 | */ | |
be1eca39 | 1266 | struct ftrace_graph_ent_entry saved; |
287b6e68 | 1267 | trace_assign_type(field, entry); |
38ceb592 | 1268 | saved = *field; |
d7a8d9e9 | 1269 | return print_graph_entry(&saved, s, iter, flags); |
287b6e68 FW |
1270 | } |
1271 | case TRACE_GRAPH_RET: { | |
1272 | struct ftrace_graph_ret_entry *field; | |
1273 | trace_assign_type(field, entry); | |
d7a8d9e9 | 1274 | return print_graph_return(&field->ret, s, entry, iter, flags); |
287b6e68 | 1275 | } |
62b915f1 JO |
1276 | case TRACE_STACK: |
1277 | case TRACE_FN: | |
1278 | /* dont trace stack and functions as comments */ | |
1279 | return TRACE_TYPE_UNHANDLED; | |
1280 | ||
287b6e68 | 1281 | default: |
d7a8d9e9 | 1282 | return print_graph_comment(s, entry, iter, flags); |
fb52607a | 1283 | } |
5087f8d2 SR |
1284 | |
1285 | return TRACE_TYPE_HANDLED; | |
fb52607a FW |
1286 | } |
1287 | ||
d7a8d9e9 JO |
1288 | static enum print_line_t |
1289 | print_graph_function(struct trace_iterator *iter) | |
1290 | { | |
321e68b0 | 1291 | return print_graph_function_flags(iter, tracer_flags.val); |
d7a8d9e9 JO |
1292 | } |
1293 | ||
9106b693 | 1294 | static enum print_line_t |
a9a57763 SR |
1295 | print_graph_function_event(struct trace_iterator *iter, int flags, |
1296 | struct trace_event *event) | |
9106b693 JO |
1297 | { |
1298 | return print_graph_function(iter); | |
1299 | } | |
1300 | ||
d7a8d9e9 | 1301 | static void print_lat_header(struct seq_file *s, u32 flags) |
49ff5903 SR |
1302 | { |
1303 | static const char spaces[] = " " /* 16 spaces */ | |
1304 | " " /* 4 spaces */ | |
1305 | " "; /* 17 spaces */ | |
1306 | int size = 0; | |
1307 | ||
d7a8d9e9 | 1308 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
49ff5903 | 1309 | size += 16; |
d7a8d9e9 | 1310 | if (flags & TRACE_GRAPH_PRINT_CPU) |
49ff5903 | 1311 | size += 4; |
d7a8d9e9 | 1312 | if (flags & TRACE_GRAPH_PRINT_PROC) |
49ff5903 SR |
1313 | size += 17; |
1314 | ||
1315 | seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); | |
1316 | seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); | |
1317 | seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); | |
1318 | seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); | |
199abfab | 1319 | seq_printf(s, "#%.*s||| / \n", size, spaces); |
49ff5903 SR |
1320 | } |
1321 | ||
0a772620 | 1322 | static void __print_graph_headers_flags(struct seq_file *s, u32 flags) |
decbec38 | 1323 | { |
49ff5903 SR |
1324 | int lat = trace_flags & TRACE_ITER_LATENCY_FMT; |
1325 | ||
1326 | if (lat) | |
d7a8d9e9 | 1327 | print_lat_header(s, flags); |
49ff5903 | 1328 | |
decbec38 | 1329 | /* 1st line */ |
49ff5903 | 1330 | seq_printf(s, "#"); |
d7a8d9e9 | 1331 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
9005f3eb | 1332 | seq_printf(s, " TIME "); |
d7a8d9e9 | 1333 | if (flags & TRACE_GRAPH_PRINT_CPU) |
49ff5903 | 1334 | seq_printf(s, " CPU"); |
d7a8d9e9 | 1335 | if (flags & TRACE_GRAPH_PRINT_PROC) |
49ff5903 SR |
1336 | seq_printf(s, " TASK/PID "); |
1337 | if (lat) | |
199abfab | 1338 | seq_printf(s, "||||"); |
d7a8d9e9 | 1339 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
9005f3eb FW |
1340 | seq_printf(s, " DURATION "); |
1341 | seq_printf(s, " FUNCTION CALLS\n"); | |
decbec38 FW |
1342 | |
1343 | /* 2nd line */ | |
49ff5903 | 1344 | seq_printf(s, "#"); |
d7a8d9e9 | 1345 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
9005f3eb | 1346 | seq_printf(s, " | "); |
d7a8d9e9 | 1347 | if (flags & TRACE_GRAPH_PRINT_CPU) |
49ff5903 | 1348 | seq_printf(s, " | "); |
d7a8d9e9 | 1349 | if (flags & TRACE_GRAPH_PRINT_PROC) |
49ff5903 SR |
1350 | seq_printf(s, " | | "); |
1351 | if (lat) | |
199abfab | 1352 | seq_printf(s, "||||"); |
d7a8d9e9 | 1353 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
9005f3eb FW |
1354 | seq_printf(s, " | | "); |
1355 | seq_printf(s, " | | | |\n"); | |
decbec38 | 1356 | } |
9005f3eb | 1357 | |
62b915f1 | 1358 | void print_graph_headers(struct seq_file *s) |
d7a8d9e9 JO |
1359 | { |
1360 | print_graph_headers_flags(s, tracer_flags.val); | |
1361 | } | |
1362 | ||
0a772620 JO |
1363 | void print_graph_headers_flags(struct seq_file *s, u32 flags) |
1364 | { | |
1365 | struct trace_iterator *iter = s->private; | |
1366 | ||
749230b0 JO |
1367 | if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) |
1368 | return; | |
1369 | ||
0a772620 JO |
1370 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { |
1371 | /* print nothing if the buffers are empty */ | |
1372 | if (trace_empty(iter)) | |
1373 | return; | |
1374 | ||
1375 | print_trace_header(s, iter); | |
321e68b0 | 1376 | } |
0a772620 JO |
1377 | |
1378 | __print_graph_headers_flags(s, flags); | |
1379 | } | |
1380 | ||
62b915f1 | 1381 | void graph_trace_open(struct trace_iterator *iter) |
9005f3eb | 1382 | { |
2fbcdb35 | 1383 | /* pid and depth on the last trace processed */ |
be1eca39 | 1384 | struct fgraph_data *data; |
9005f3eb FW |
1385 | int cpu; |
1386 | ||
be1eca39 JO |
1387 | iter->private = NULL; |
1388 | ||
1389 | data = kzalloc(sizeof(*data), GFP_KERNEL); | |
2fbcdb35 | 1390 | if (!data) |
be1eca39 JO |
1391 | goto out_err; |
1392 | ||
1393 | data->cpu_data = alloc_percpu(struct fgraph_cpu_data); | |
1394 | if (!data->cpu_data) | |
1395 | goto out_err_free; | |
1396 | ||
1397 | for_each_possible_cpu(cpu) { | |
1398 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); | |
1399 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | |
1400 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); | |
2bd16212 JO |
1401 | int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); |
1402 | ||
be1eca39 JO |
1403 | *pid = -1; |
1404 | *depth = 0; | |
1405 | *ignore = 0; | |
2bd16212 | 1406 | *depth_irq = -1; |
be1eca39 | 1407 | } |
9005f3eb | 1408 | |
2fbcdb35 | 1409 | iter->private = data; |
be1eca39 JO |
1410 | |
1411 | return; | |
1412 | ||
1413 | out_err_free: | |
1414 | kfree(data); | |
1415 | out_err: | |
1416 | pr_warning("function graph tracer: not enough memory\n"); | |
9005f3eb FW |
1417 | } |
1418 | ||
62b915f1 | 1419 | void graph_trace_close(struct trace_iterator *iter) |
9005f3eb | 1420 | { |
be1eca39 JO |
1421 | struct fgraph_data *data = iter->private; |
1422 | ||
1423 | if (data) { | |
1424 | free_percpu(data->cpu_data); | |
1425 | kfree(data); | |
1426 | } | |
9005f3eb FW |
1427 | } |
1428 | ||
b304d044 SR |
1429 | static int func_graph_set_flag(u32 old_flags, u32 bit, int set) |
1430 | { | |
1431 | if (bit == TRACE_GRAPH_PRINT_IRQS) | |
1432 | ftrace_graph_skip_irqs = !set; | |
1433 | ||
1434 | return 0; | |
1435 | } | |
1436 | ||
a9a57763 SR |
1437 | static struct trace_event_functions graph_functions = { |
1438 | .trace = print_graph_function_event, | |
1439 | }; | |
1440 | ||
9106b693 JO |
1441 | static struct trace_event graph_trace_entry_event = { |
1442 | .type = TRACE_GRAPH_ENT, | |
a9a57763 | 1443 | .funcs = &graph_functions, |
9106b693 JO |
1444 | }; |
1445 | ||
1446 | static struct trace_event graph_trace_ret_event = { | |
1447 | .type = TRACE_GRAPH_RET, | |
a9a57763 | 1448 | .funcs = &graph_functions |
9106b693 JO |
1449 | }; |
1450 | ||
8f768993 | 1451 | static struct tracer graph_trace __tracer_data = { |
ef18012b | 1452 | .name = "function_graph", |
9005f3eb | 1453 | .open = graph_trace_open, |
be1eca39 | 1454 | .pipe_open = graph_trace_open, |
9005f3eb | 1455 | .close = graph_trace_close, |
be1eca39 | 1456 | .pipe_close = graph_trace_close, |
6eaaa5d5 | 1457 | .wait_pipe = poll_wait_pipe, |
ef18012b SR |
1458 | .init = graph_trace_init, |
1459 | .reset = graph_trace_reset, | |
decbec38 FW |
1460 | .print_line = print_graph_function, |
1461 | .print_header = print_graph_headers, | |
fb52607a | 1462 | .flags = &tracer_flags, |
b304d044 | 1463 | .set_flag = func_graph_set_flag, |
7447dce9 FW |
1464 | #ifdef CONFIG_FTRACE_SELFTEST |
1465 | .selftest = trace_selftest_startup_function_graph, | |
1466 | #endif | |
fb52607a FW |
1467 | }; |
1468 | ||
8741db53 SR |
1469 | |
1470 | static ssize_t | |
1471 | graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt, | |
1472 | loff_t *ppos) | |
1473 | { | |
1474 | unsigned long val; | |
1475 | int ret; | |
1476 | ||
1477 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | |
1478 | if (ret) | |
1479 | return ret; | |
1480 | ||
1481 | max_depth = val; | |
1482 | ||
1483 | *ppos += cnt; | |
1484 | ||
1485 | return cnt; | |
1486 | } | |
1487 | ||
1488 | static ssize_t | |
1489 | graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt, | |
1490 | loff_t *ppos) | |
1491 | { | |
1492 | char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/ | |
1493 | int n; | |
1494 | ||
1495 | n = sprintf(buf, "%d\n", max_depth); | |
1496 | ||
1497 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, n); | |
1498 | } | |
1499 | ||
1500 | static const struct file_operations graph_depth_fops = { | |
1501 | .open = tracing_open_generic, | |
1502 | .write = graph_depth_write, | |
1503 | .read = graph_depth_read, | |
1504 | .llseek = generic_file_llseek, | |
1505 | }; | |
1506 | ||
1507 | static __init int init_graph_debugfs(void) | |
1508 | { | |
1509 | struct dentry *d_tracer; | |
1510 | ||
1511 | d_tracer = tracing_init_dentry(); | |
1512 | if (!d_tracer) | |
1513 | return 0; | |
1514 | ||
1515 | trace_create_file("max_graph_depth", 0644, d_tracer, | |
1516 | NULL, &graph_depth_fops); | |
1517 | ||
1518 | return 0; | |
1519 | } | |
1520 | fs_initcall(init_graph_debugfs); | |
1521 | ||
fb52607a FW |
1522 | static __init int init_graph_trace(void) |
1523 | { | |
0c9e6f63 LJ |
1524 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); |
1525 | ||
9106b693 JO |
1526 | if (!register_ftrace_event(&graph_trace_entry_event)) { |
1527 | pr_warning("Warning: could not register graph trace events\n"); | |
1528 | return 1; | |
1529 | } | |
1530 | ||
1531 | if (!register_ftrace_event(&graph_trace_ret_event)) { | |
1532 | pr_warning("Warning: could not register graph trace events\n"); | |
1533 | return 1; | |
1534 | } | |
1535 | ||
fb52607a FW |
1536 | return register_tracer(&graph_trace); |
1537 | } | |
1538 | ||
6f415672 | 1539 | core_initcall(init_graph_trace); |