]>
Commit | Line | Data |
---|---|---|
81d68a96 | 1 | /* |
73d8b8bc | 2 | * trace irqs off critical timings |
81d68a96 SR |
3 | * |
4 | * Copyright (C) 2007-2008 Steven Rostedt <[email protected]> | |
5 | * Copyright (C) 2008 Ingo Molnar <[email protected]> | |
6 | * | |
7 | * From code in the latency_tracer, that is: | |
8 | * | |
9 | * Copyright (C) 2004-2006 Ingo Molnar | |
6d49e352 | 10 | * Copyright (C) 2004 Nadia Yvette Chambers |
81d68a96 SR |
11 | */ |
12 | #include <linux/kallsyms.h> | |
81d68a96 SR |
13 | #include <linux/uaccess.h> |
14 | #include <linux/module.h> | |
15 | #include <linux/ftrace.h> | |
81d68a96 SR |
16 | |
17 | #include "trace.h" | |
18 | ||
19 | static struct trace_array *irqsoff_trace __read_mostly; | |
20 | static int tracer_enabled __read_mostly; | |
21 | ||
6cd8a4bb SR |
22 | static DEFINE_PER_CPU(int, tracing_cpu); |
23 | ||
5389f6fa | 24 | static DEFINE_RAW_SPINLOCK(max_trace_lock); |
89b2f978 | 25 | |
6cd8a4bb SR |
26 | enum { |
27 | TRACER_IRQS_OFF = (1 << 1), | |
28 | TRACER_PREEMPT_OFF = (1 << 2), | |
29 | }; | |
30 | ||
31 | static int trace_type __read_mostly; | |
32 | ||
613f04a0 | 33 | static int save_flags; |
e9d25fe6 | 34 | |
62b915f1 JO |
35 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph); |
36 | static int start_irqsoff_tracer(struct trace_array *tr, int graph); | |
37 | ||
6cd8a4bb | 38 | #ifdef CONFIG_PREEMPT_TRACER |
e309b41d | 39 | static inline int |
6cd8a4bb SR |
40 | preempt_trace(void) |
41 | { | |
42 | return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); | |
43 | } | |
44 | #else | |
45 | # define preempt_trace() (0) | |
46 | #endif | |
47 | ||
48 | #ifdef CONFIG_IRQSOFF_TRACER | |
e309b41d | 49 | static inline int |
6cd8a4bb SR |
50 | irq_trace(void) |
51 | { | |
52 | return ((trace_type & TRACER_IRQS_OFF) && | |
53 | irqs_disabled()); | |
54 | } | |
55 | #else | |
56 | # define irq_trace() (0) | |
57 | #endif | |
58 | ||
62b915f1 | 59 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
03905582 | 60 | static int irqsoff_display_graph(struct trace_array *tr, int set); |
983f938a | 61 | # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH) |
03905582 SRRH |
62 | #else |
63 | static inline int irqsoff_display_graph(struct trace_array *tr, int set) | |
64 | { | |
65 | return -EINVAL; | |
66 | } | |
983f938a | 67 | # define is_graph(tr) false |
62b915f1 | 68 | #endif |
62b915f1 | 69 | |
81d68a96 SR |
70 | /* |
71 | * Sequence count - we record it when starting a measurement and | |
72 | * skip the latency if the sequence has changed - some other section | |
73 | * did a maximum and could disturb our measurement with serial console | |
74 | * printouts, etc. Truly coinciding maximum latencies should be rare | |
25985edc | 75 | * and what happens together happens separately as well, so this doesn't |
81d68a96 SR |
76 | * decrease the validity of the maximum found: |
77 | */ | |
78 | static __cacheline_aligned_in_smp unsigned long max_sequence; | |
79 | ||
606576ce | 80 | #ifdef CONFIG_FUNCTION_TRACER |
81d68a96 | 81 | /* |
5e6d2b9c SR |
82 | * Prologue for the preempt and irqs off function tracers. |
83 | * | |
84 | * Returns 1 if it is OK to continue, and data->disabled is | |
85 | * incremented. | |
86 | * 0 if the trace is to be ignored, and data->disabled | |
87 | * is kept the same. | |
88 | * | |
89 | * Note, this function is also used outside this ifdef but | |
90 | * inside the #ifdef of the function graph tracer below. | |
91 | * This is OK, since the function graph tracer is | |
92 | * dependent on the function tracer. | |
81d68a96 | 93 | */ |
5e6d2b9c SR |
94 | static int func_prolog_dec(struct trace_array *tr, |
95 | struct trace_array_cpu **data, | |
96 | unsigned long *flags) | |
81d68a96 | 97 | { |
81d68a96 SR |
98 | long disabled; |
99 | int cpu; | |
100 | ||
361943ad SR |
101 | /* |
102 | * Does not matter if we preempt. We test the flags | |
103 | * afterward, to see if irqs are disabled or not. | |
104 | * If we preempt and get a false positive, the flags | |
105 | * test will fail. | |
106 | */ | |
107 | cpu = raw_smp_processor_id(); | |
108 | if (likely(!per_cpu(tracing_cpu, cpu))) | |
5e6d2b9c | 109 | return 0; |
81d68a96 | 110 | |
5e6d2b9c | 111 | local_save_flags(*flags); |
361943ad | 112 | /* slight chance to get a false positive on tracing_cpu */ |
5e6d2b9c SR |
113 | if (!irqs_disabled_flags(*flags)) |
114 | return 0; | |
81d68a96 | 115 | |
12883efb | 116 | *data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
5e6d2b9c | 117 | disabled = atomic_inc_return(&(*data)->disabled); |
81d68a96 SR |
118 | |
119 | if (likely(disabled == 1)) | |
5e6d2b9c SR |
120 | return 1; |
121 | ||
122 | atomic_dec(&(*data)->disabled); | |
123 | ||
124 | return 0; | |
125 | } | |
126 | ||
127 | /* | |
128 | * irqsoff uses its own tracer function to keep the overhead down: | |
129 | */ | |
130 | static void | |
2f5f6ad9 | 131 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip, |
a1e2e31d | 132 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
5e6d2b9c SR |
133 | { |
134 | struct trace_array *tr = irqsoff_trace; | |
135 | struct trace_array_cpu *data; | |
136 | unsigned long flags; | |
137 | ||
138 | if (!func_prolog_dec(tr, &data, &flags)) | |
139 | return; | |
140 | ||
141 | trace_function(tr, ip, parent_ip, flags, preempt_count()); | |
81d68a96 SR |
142 | |
143 | atomic_dec(&data->disabled); | |
144 | } | |
606576ce | 145 | #endif /* CONFIG_FUNCTION_TRACER */ |
81d68a96 | 146 | |
62b915f1 | 147 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
03905582 | 148 | static int irqsoff_display_graph(struct trace_array *tr, int set) |
62b915f1 JO |
149 | { |
150 | int cpu; | |
151 | ||
983f938a | 152 | if (!(is_graph(tr) ^ set)) |
62b915f1 JO |
153 | return 0; |
154 | ||
155 | stop_irqsoff_tracer(irqsoff_trace, !set); | |
156 | ||
157 | for_each_possible_cpu(cpu) | |
158 | per_cpu(tracing_cpu, cpu) = 0; | |
159 | ||
6d9b3fa5 | 160 | tr->max_latency = 0; |
12883efb | 161 | tracing_reset_online_cpus(&irqsoff_trace->trace_buffer); |
62b915f1 JO |
162 | |
163 | return start_irqsoff_tracer(irqsoff_trace, set); | |
164 | } | |
165 | ||
166 | static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) | |
167 | { | |
168 | struct trace_array *tr = irqsoff_trace; | |
169 | struct trace_array_cpu *data; | |
170 | unsigned long flags; | |
62b915f1 | 171 | int ret; |
62b915f1 JO |
172 | int pc; |
173 | ||
5e6d2b9c | 174 | if (!func_prolog_dec(tr, &data, &flags)) |
62b915f1 JO |
175 | return 0; |
176 | ||
5e6d2b9c SR |
177 | pc = preempt_count(); |
178 | ret = __trace_graph_entry(tr, trace, flags, pc); | |
62b915f1 | 179 | atomic_dec(&data->disabled); |
5e6d2b9c | 180 | |
62b915f1 JO |
181 | return ret; |
182 | } | |
183 | ||
184 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) | |
185 | { | |
186 | struct trace_array *tr = irqsoff_trace; | |
187 | struct trace_array_cpu *data; | |
188 | unsigned long flags; | |
62b915f1 JO |
189 | int pc; |
190 | ||
5e6d2b9c | 191 | if (!func_prolog_dec(tr, &data, &flags)) |
62b915f1 JO |
192 | return; |
193 | ||
5e6d2b9c SR |
194 | pc = preempt_count(); |
195 | __trace_graph_return(tr, trace, flags, pc); | |
62b915f1 JO |
196 | atomic_dec(&data->disabled); |
197 | } | |
198 | ||
199 | static void irqsoff_trace_open(struct trace_iterator *iter) | |
200 | { | |
983f938a | 201 | if (is_graph(iter->tr)) |
62b915f1 JO |
202 | graph_trace_open(iter); |
203 | ||
204 | } | |
205 | ||
206 | static void irqsoff_trace_close(struct trace_iterator *iter) | |
207 | { | |
208 | if (iter->private) | |
209 | graph_trace_close(iter); | |
210 | } | |
211 | ||
212 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ | |
321e68b0 JO |
213 | TRACE_GRAPH_PRINT_PROC | \ |
214 | TRACE_GRAPH_PRINT_ABS_TIME | \ | |
215 | TRACE_GRAPH_PRINT_DURATION) | |
62b915f1 JO |
216 | |
217 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) | |
218 | { | |
62b915f1 JO |
219 | /* |
220 | * In graph mode call the graph tracer output function, | |
221 | * otherwise go with the TRACE_FN event handler | |
222 | */ | |
983f938a | 223 | if (is_graph(iter->tr)) |
0a772620 | 224 | return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); |
62b915f1 JO |
225 | |
226 | return TRACE_TYPE_UNHANDLED; | |
227 | } | |
228 | ||
229 | static void irqsoff_print_header(struct seq_file *s) | |
230 | { | |
983f938a SRRH |
231 | struct trace_array *tr = irqsoff_trace; |
232 | ||
233 | if (is_graph(tr)) | |
0a772620 JO |
234 | print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); |
235 | else | |
62b915f1 JO |
236 | trace_default_header(s); |
237 | } | |
238 | ||
62b915f1 JO |
239 | static void |
240 | __trace_function(struct trace_array *tr, | |
241 | unsigned long ip, unsigned long parent_ip, | |
242 | unsigned long flags, int pc) | |
243 | { | |
983f938a | 244 | if (is_graph(tr)) |
0a772620 JO |
245 | trace_graph_function(tr, ip, parent_ip, flags, pc); |
246 | else | |
62b915f1 | 247 | trace_function(tr, ip, parent_ip, flags, pc); |
62b915f1 JO |
248 | } |
249 | ||
250 | #else | |
251 | #define __trace_function trace_function | |
252 | ||
8179e8a1 | 253 | #ifdef CONFIG_FUNCTION_TRACER |
62b915f1 JO |
254 | static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) |
255 | { | |
256 | return -1; | |
257 | } | |
8179e8a1 | 258 | #endif |
62b915f1 JO |
259 | |
260 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) | |
261 | { | |
262 | return TRACE_TYPE_UNHANDLED; | |
263 | } | |
264 | ||
62b915f1 JO |
265 | static void irqsoff_trace_open(struct trace_iterator *iter) { } |
266 | static void irqsoff_trace_close(struct trace_iterator *iter) { } | |
7e9a49ef JO |
267 | |
268 | #ifdef CONFIG_FUNCTION_TRACER | |
8179e8a1 | 269 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } |
7e9a49ef JO |
270 | static void irqsoff_print_header(struct seq_file *s) |
271 | { | |
272 | trace_default_header(s); | |
273 | } | |
274 | #else | |
275 | static void irqsoff_print_header(struct seq_file *s) | |
276 | { | |
277 | trace_latency_header(s); | |
278 | } | |
279 | #endif /* CONFIG_FUNCTION_TRACER */ | |
62b915f1 JO |
280 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
281 | ||
81d68a96 SR |
282 | /* |
283 | * Should this new latency be reported/recorded? | |
284 | */ | |
79851821 | 285 | static bool report_latency(struct trace_array *tr, cycle_t delta) |
81d68a96 SR |
286 | { |
287 | if (tracing_thresh) { | |
288 | if (delta < tracing_thresh) | |
79851821 | 289 | return false; |
81d68a96 | 290 | } else { |
6d9b3fa5 | 291 | if (delta <= tr->max_latency) |
79851821 | 292 | return false; |
81d68a96 | 293 | } |
79851821 | 294 | return true; |
81d68a96 SR |
295 | } |
296 | ||
e309b41d | 297 | static void |
81d68a96 SR |
298 | check_critical_timing(struct trace_array *tr, |
299 | struct trace_array_cpu *data, | |
300 | unsigned long parent_ip, | |
301 | int cpu) | |
302 | { | |
89b2f978 | 303 | cycle_t T0, T1, delta; |
81d68a96 | 304 | unsigned long flags; |
38697053 | 305 | int pc; |
81d68a96 | 306 | |
81d68a96 | 307 | T0 = data->preempt_timestamp; |
750ed1a4 | 308 | T1 = ftrace_now(cpu); |
81d68a96 SR |
309 | delta = T1-T0; |
310 | ||
311 | local_save_flags(flags); | |
312 | ||
6450c1d3 SR |
313 | pc = preempt_count(); |
314 | ||
6d9b3fa5 | 315 | if (!report_latency(tr, delta)) |
81d68a96 SR |
316 | goto out; |
317 | ||
5389f6fa | 318 | raw_spin_lock_irqsave(&max_trace_lock, flags); |
81d68a96 | 319 | |
89b2f978 | 320 | /* check if we are still the max latency */ |
6d9b3fa5 | 321 | if (!report_latency(tr, delta)) |
89b2f978 SR |
322 | goto out_unlock; |
323 | ||
62b915f1 | 324 | __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
cc51a0fc SR |
325 | /* Skip 5 functions to get to the irq/preempt enable function */ |
326 | __trace_stack(tr, flags, 5, pc); | |
81d68a96 | 327 | |
81d68a96 | 328 | if (data->critical_sequence != max_sequence) |
89b2f978 | 329 | goto out_unlock; |
81d68a96 | 330 | |
81d68a96 SR |
331 | data->critical_end = parent_ip; |
332 | ||
b5130b1e | 333 | if (likely(!is_tracing_stopped())) { |
6d9b3fa5 | 334 | tr->max_latency = delta; |
b5130b1e CE |
335 | update_max_tr_single(tr, current, cpu); |
336 | } | |
81d68a96 | 337 | |
81d68a96 SR |
338 | max_sequence++; |
339 | ||
89b2f978 | 340 | out_unlock: |
5389f6fa | 341 | raw_spin_unlock_irqrestore(&max_trace_lock, flags); |
89b2f978 | 342 | |
81d68a96 SR |
343 | out: |
344 | data->critical_sequence = max_sequence; | |
750ed1a4 | 345 | data->preempt_timestamp = ftrace_now(cpu); |
62b915f1 | 346 | __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
81d68a96 SR |
347 | } |
348 | ||
e309b41d | 349 | static inline void |
81d68a96 SR |
350 | start_critical_timing(unsigned long ip, unsigned long parent_ip) |
351 | { | |
352 | int cpu; | |
353 | struct trace_array *tr = irqsoff_trace; | |
354 | struct trace_array_cpu *data; | |
355 | unsigned long flags; | |
356 | ||
10246fa3 | 357 | if (!tracer_enabled || !tracing_is_enabled()) |
81d68a96 SR |
358 | return; |
359 | ||
c5f888ca SR |
360 | cpu = raw_smp_processor_id(); |
361 | ||
362 | if (per_cpu(tracing_cpu, cpu)) | |
6cd8a4bb SR |
363 | return; |
364 | ||
12883efb | 365 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
81d68a96 | 366 | |
c5f888ca | 367 | if (unlikely(!data) || atomic_read(&data->disabled)) |
81d68a96 SR |
368 | return; |
369 | ||
370 | atomic_inc(&data->disabled); | |
371 | ||
372 | data->critical_sequence = max_sequence; | |
750ed1a4 | 373 | data->preempt_timestamp = ftrace_now(cpu); |
6cd8a4bb | 374 | data->critical_start = parent_ip ? : ip; |
81d68a96 SR |
375 | |
376 | local_save_flags(flags); | |
6cd8a4bb | 377 | |
62b915f1 | 378 | __trace_function(tr, ip, parent_ip, flags, preempt_count()); |
81d68a96 | 379 | |
c5f888ca | 380 | per_cpu(tracing_cpu, cpu) = 1; |
6cd8a4bb | 381 | |
81d68a96 SR |
382 | atomic_dec(&data->disabled); |
383 | } | |
384 | ||
e309b41d | 385 | static inline void |
81d68a96 SR |
386 | stop_critical_timing(unsigned long ip, unsigned long parent_ip) |
387 | { | |
388 | int cpu; | |
389 | struct trace_array *tr = irqsoff_trace; | |
390 | struct trace_array_cpu *data; | |
391 | unsigned long flags; | |
392 | ||
c5f888ca | 393 | cpu = raw_smp_processor_id(); |
6cd8a4bb | 394 | /* Always clear the tracing cpu on stopping the trace */ |
c5f888ca SR |
395 | if (unlikely(per_cpu(tracing_cpu, cpu))) |
396 | per_cpu(tracing_cpu, cpu) = 0; | |
6cd8a4bb SR |
397 | else |
398 | return; | |
399 | ||
10246fa3 | 400 | if (!tracer_enabled || !tracing_is_enabled()) |
81d68a96 SR |
401 | return; |
402 | ||
12883efb | 403 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
81d68a96 | 404 | |
3928a8a2 | 405 | if (unlikely(!data) || |
81d68a96 SR |
406 | !data->critical_start || atomic_read(&data->disabled)) |
407 | return; | |
408 | ||
409 | atomic_inc(&data->disabled); | |
c5f888ca | 410 | |
81d68a96 | 411 | local_save_flags(flags); |
62b915f1 | 412 | __trace_function(tr, ip, parent_ip, flags, preempt_count()); |
6cd8a4bb | 413 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
81d68a96 SR |
414 | data->critical_start = 0; |
415 | atomic_dec(&data->disabled); | |
416 | } | |
417 | ||
6cd8a4bb | 418 | /* start and stop critical timings used to for stoppage (in idle) */ |
e309b41d | 419 | void start_critical_timings(void) |
81d68a96 | 420 | { |
6cd8a4bb | 421 | if (preempt_trace() || irq_trace()) |
81d68a96 SR |
422 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
423 | } | |
1fe37104 | 424 | EXPORT_SYMBOL_GPL(start_critical_timings); |
81d68a96 | 425 | |
e309b41d | 426 | void stop_critical_timings(void) |
81d68a96 | 427 | { |
6cd8a4bb | 428 | if (preempt_trace() || irq_trace()) |
81d68a96 SR |
429 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
430 | } | |
1fe37104 | 431 | EXPORT_SYMBOL_GPL(stop_critical_timings); |
81d68a96 | 432 | |
6cd8a4bb | 433 | #ifdef CONFIG_IRQSOFF_TRACER |
81d68a96 | 434 | #ifdef CONFIG_PROVE_LOCKING |
e309b41d | 435 | void time_hardirqs_on(unsigned long a0, unsigned long a1) |
81d68a96 | 436 | { |
6cd8a4bb | 437 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
438 | stop_critical_timing(a0, a1); |
439 | } | |
440 | ||
e309b41d | 441 | void time_hardirqs_off(unsigned long a0, unsigned long a1) |
81d68a96 | 442 | { |
6cd8a4bb | 443 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
444 | start_critical_timing(a0, a1); |
445 | } | |
446 | ||
447 | #else /* !CONFIG_PROVE_LOCKING */ | |
448 | ||
449 | /* | |
450 | * Stubs: | |
451 | */ | |
452 | ||
81d68a96 SR |
453 | void trace_softirqs_on(unsigned long ip) |
454 | { | |
455 | } | |
456 | ||
457 | void trace_softirqs_off(unsigned long ip) | |
458 | { | |
459 | } | |
460 | ||
e309b41d | 461 | inline void print_irqtrace_events(struct task_struct *curr) |
81d68a96 SR |
462 | { |
463 | } | |
464 | ||
465 | /* | |
466 | * We are only interested in hardirq on/off events: | |
467 | */ | |
e309b41d | 468 | void trace_hardirqs_on(void) |
81d68a96 | 469 | { |
6cd8a4bb | 470 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
471 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
472 | } | |
473 | EXPORT_SYMBOL(trace_hardirqs_on); | |
474 | ||
e309b41d | 475 | void trace_hardirqs_off(void) |
81d68a96 | 476 | { |
6cd8a4bb | 477 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
478 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
479 | } | |
480 | EXPORT_SYMBOL(trace_hardirqs_off); | |
481 | ||
285c00ad | 482 | __visible void trace_hardirqs_on_caller(unsigned long caller_addr) |
81d68a96 | 483 | { |
6cd8a4bb | 484 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
485 | stop_critical_timing(CALLER_ADDR0, caller_addr); |
486 | } | |
487 | EXPORT_SYMBOL(trace_hardirqs_on_caller); | |
488 | ||
285c00ad | 489 | __visible void trace_hardirqs_off_caller(unsigned long caller_addr) |
81d68a96 | 490 | { |
6cd8a4bb | 491 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
492 | start_critical_timing(CALLER_ADDR0, caller_addr); |
493 | } | |
494 | EXPORT_SYMBOL(trace_hardirqs_off_caller); | |
495 | ||
496 | #endif /* CONFIG_PROVE_LOCKING */ | |
6cd8a4bb SR |
497 | #endif /* CONFIG_IRQSOFF_TRACER */ |
498 | ||
499 | #ifdef CONFIG_PREEMPT_TRACER | |
e309b41d | 500 | void trace_preempt_on(unsigned long a0, unsigned long a1) |
6cd8a4bb | 501 | { |
e36de1de | 502 | if (preempt_trace() && !irq_trace()) |
1e01cb0c | 503 | stop_critical_timing(a0, a1); |
6cd8a4bb SR |
504 | } |
505 | ||
e309b41d | 506 | void trace_preempt_off(unsigned long a0, unsigned long a1) |
6cd8a4bb | 507 | { |
e36de1de | 508 | if (preempt_trace() && !irq_trace()) |
1e01cb0c | 509 | start_critical_timing(a0, a1); |
6cd8a4bb SR |
510 | } |
511 | #endif /* CONFIG_PREEMPT_TRACER */ | |
81d68a96 | 512 | |
8179e8a1 SRRH |
513 | #ifdef CONFIG_FUNCTION_TRACER |
514 | static bool function_enabled; | |
515 | ||
4104d326 | 516 | static int register_irqsoff_function(struct trace_array *tr, int graph, int set) |
81d68a96 | 517 | { |
328df475 | 518 | int ret; |
62b915f1 | 519 | |
328df475 | 520 | /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ |
983f938a | 521 | if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION))) |
328df475 SRRH |
522 | return 0; |
523 | ||
524 | if (graph) | |
62b915f1 JO |
525 | ret = register_ftrace_graph(&irqsoff_graph_return, |
526 | &irqsoff_graph_entry); | |
328df475 | 527 | else |
4104d326 | 528 | ret = register_ftrace_function(tr->ops); |
328df475 SRRH |
529 | |
530 | if (!ret) | |
531 | function_enabled = true; | |
532 | ||
533 | return ret; | |
534 | } | |
535 | ||
4104d326 | 536 | static void unregister_irqsoff_function(struct trace_array *tr, int graph) |
328df475 SRRH |
537 | { |
538 | if (!function_enabled) | |
539 | return; | |
540 | ||
541 | if (graph) | |
542 | unregister_ftrace_graph(); | |
543 | else | |
4104d326 | 544 | unregister_ftrace_function(tr->ops); |
328df475 SRRH |
545 | |
546 | function_enabled = false; | |
547 | } | |
548 | ||
8179e8a1 | 549 | static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set) |
328df475 | 550 | { |
8179e8a1 SRRH |
551 | if (!(mask & TRACE_ITER_FUNCTION)) |
552 | return 0; | |
553 | ||
328df475 | 554 | if (set) |
983f938a | 555 | register_irqsoff_function(tr, is_graph(tr), 1); |
328df475 | 556 | else |
983f938a | 557 | unregister_irqsoff_function(tr, is_graph(tr)); |
8179e8a1 SRRH |
558 | return 1; |
559 | } | |
560 | #else | |
561 | static int register_irqsoff_function(struct trace_array *tr, int graph, int set) | |
562 | { | |
03905582 | 563 | return 0; |
328df475 | 564 | } |
8179e8a1 SRRH |
565 | static void unregister_irqsoff_function(struct trace_array *tr, int graph) { } |
566 | static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set) | |
567 | { | |
568 | return 0; | |
569 | } | |
570 | #endif /* CONFIG_FUNCTION_TRACER */ | |
328df475 | 571 | |
bf6065b5 | 572 | static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) |
328df475 | 573 | { |
bf6065b5 SRRH |
574 | struct tracer *tracer = tr->current_trace; |
575 | ||
8179e8a1 SRRH |
576 | if (irqsoff_function_set(tr, mask, set)) |
577 | return 0; | |
03905582 | 578 | |
729358da | 579 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
03905582 SRRH |
580 | if (mask & TRACE_ITER_DISPLAY_GRAPH) |
581 | return irqsoff_display_graph(tr, set); | |
729358da | 582 | #endif |
328df475 SRRH |
583 | |
584 | return trace_keep_overwrite(tracer, mask, set); | |
585 | } | |
586 | ||
587 | static int start_irqsoff_tracer(struct trace_array *tr, int graph) | |
588 | { | |
589 | int ret; | |
590 | ||
4104d326 | 591 | ret = register_irqsoff_function(tr, graph, 0); |
62b915f1 JO |
592 | |
593 | if (!ret && tracing_is_enabled()) | |
9036990d | 594 | tracer_enabled = 1; |
94523e81 | 595 | else |
9036990d | 596 | tracer_enabled = 0; |
62b915f1 JO |
597 | |
598 | return ret; | |
81d68a96 SR |
599 | } |
600 | ||
62b915f1 | 601 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph) |
81d68a96 | 602 | { |
81d68a96 | 603 | tracer_enabled = 0; |
62b915f1 | 604 | |
4104d326 | 605 | unregister_irqsoff_function(tr, graph); |
81d68a96 SR |
606 | } |
607 | ||
02f2f764 SRRH |
608 | static bool irqsoff_busy; |
609 | ||
610 | static int __irqsoff_tracer_init(struct trace_array *tr) | |
81d68a96 | 611 | { |
02f2f764 SRRH |
612 | if (irqsoff_busy) |
613 | return -EBUSY; | |
614 | ||
983f938a | 615 | save_flags = tr->trace_flags; |
613f04a0 SRRH |
616 | |
617 | /* non overwrite screws up the latency tracers */ | |
2b6080f2 SR |
618 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); |
619 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); | |
e9d25fe6 | 620 | |
6d9b3fa5 | 621 | tr->max_latency = 0; |
81d68a96 | 622 | irqsoff_trace = tr; |
c5f888ca | 623 | /* make sure that the tracer is visible */ |
81d68a96 | 624 | smp_wmb(); |
12883efb | 625 | tracing_reset_online_cpus(&tr->trace_buffer); |
62b915f1 | 626 | |
4104d326 SRRH |
627 | ftrace_init_array_ops(tr, irqsoff_tracer_call); |
628 | ||
629 | /* Only toplevel instance supports graph tracing */ | |
630 | if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL && | |
983f938a | 631 | is_graph(tr)))) |
62b915f1 | 632 | printk(KERN_ERR "failed to start irqsoff tracer\n"); |
02f2f764 SRRH |
633 | |
634 | irqsoff_busy = true; | |
635 | return 0; | |
81d68a96 SR |
636 | } |
637 | ||
638 | static void irqsoff_tracer_reset(struct trace_array *tr) | |
639 | { | |
613f04a0 SRRH |
640 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; |
641 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; | |
642 | ||
983f938a | 643 | stop_irqsoff_tracer(tr, is_graph(tr)); |
e9d25fe6 | 644 | |
2b6080f2 SR |
645 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); |
646 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); | |
4104d326 | 647 | ftrace_reset_array_ops(tr); |
02f2f764 SRRH |
648 | |
649 | irqsoff_busy = false; | |
81d68a96 SR |
650 | } |
651 | ||
9036990d SR |
652 | static void irqsoff_tracer_start(struct trace_array *tr) |
653 | { | |
9036990d | 654 | tracer_enabled = 1; |
9036990d SR |
655 | } |
656 | ||
657 | static void irqsoff_tracer_stop(struct trace_array *tr) | |
658 | { | |
659 | tracer_enabled = 0; | |
81d68a96 SR |
660 | } |
661 | ||
6cd8a4bb | 662 | #ifdef CONFIG_IRQSOFF_TRACER |
1c80025a | 663 | static int irqsoff_tracer_init(struct trace_array *tr) |
6cd8a4bb SR |
664 | { |
665 | trace_type = TRACER_IRQS_OFF; | |
666 | ||
02f2f764 | 667 | return __irqsoff_tracer_init(tr); |
6cd8a4bb | 668 | } |
81d68a96 SR |
669 | static struct tracer irqsoff_tracer __read_mostly = |
670 | { | |
671 | .name = "irqsoff", | |
672 | .init = irqsoff_tracer_init, | |
673 | .reset = irqsoff_tracer_reset, | |
9036990d SR |
674 | .start = irqsoff_tracer_start, |
675 | .stop = irqsoff_tracer_stop, | |
f43c738b | 676 | .print_max = true, |
62b915f1 JO |
677 | .print_header = irqsoff_print_header, |
678 | .print_line = irqsoff_print_line, | |
328df475 | 679 | .flag_changed = irqsoff_flag_changed, |
60a11774 SR |
680 | #ifdef CONFIG_FTRACE_SELFTEST |
681 | .selftest = trace_selftest_startup_irqsoff, | |
682 | #endif | |
62b915f1 JO |
683 | .open = irqsoff_trace_open, |
684 | .close = irqsoff_trace_close, | |
02f2f764 | 685 | .allow_instances = true, |
f43c738b | 686 | .use_max_tr = true, |
81d68a96 | 687 | }; |
6cd8a4bb SR |
688 | # define register_irqsoff(trace) register_tracer(&trace) |
689 | #else | |
690 | # define register_irqsoff(trace) do { } while (0) | |
691 | #endif | |
692 | ||
693 | #ifdef CONFIG_PREEMPT_TRACER | |
1c80025a | 694 | static int preemptoff_tracer_init(struct trace_array *tr) |
6cd8a4bb SR |
695 | { |
696 | trace_type = TRACER_PREEMPT_OFF; | |
697 | ||
02f2f764 | 698 | return __irqsoff_tracer_init(tr); |
6cd8a4bb SR |
699 | } |
700 | ||
701 | static struct tracer preemptoff_tracer __read_mostly = | |
702 | { | |
703 | .name = "preemptoff", | |
704 | .init = preemptoff_tracer_init, | |
705 | .reset = irqsoff_tracer_reset, | |
9036990d SR |
706 | .start = irqsoff_tracer_start, |
707 | .stop = irqsoff_tracer_stop, | |
f43c738b | 708 | .print_max = true, |
62b915f1 JO |
709 | .print_header = irqsoff_print_header, |
710 | .print_line = irqsoff_print_line, | |
328df475 | 711 | .flag_changed = irqsoff_flag_changed, |
60a11774 SR |
712 | #ifdef CONFIG_FTRACE_SELFTEST |
713 | .selftest = trace_selftest_startup_preemptoff, | |
714 | #endif | |
62b915f1 JO |
715 | .open = irqsoff_trace_open, |
716 | .close = irqsoff_trace_close, | |
02f2f764 | 717 | .allow_instances = true, |
f43c738b | 718 | .use_max_tr = true, |
6cd8a4bb SR |
719 | }; |
720 | # define register_preemptoff(trace) register_tracer(&trace) | |
721 | #else | |
722 | # define register_preemptoff(trace) do { } while (0) | |
723 | #endif | |
724 | ||
725 | #if defined(CONFIG_IRQSOFF_TRACER) && \ | |
726 | defined(CONFIG_PREEMPT_TRACER) | |
727 | ||
1c80025a | 728 | static int preemptirqsoff_tracer_init(struct trace_array *tr) |
6cd8a4bb SR |
729 | { |
730 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; | |
731 | ||
02f2f764 | 732 | return __irqsoff_tracer_init(tr); |
6cd8a4bb SR |
733 | } |
734 | ||
735 | static struct tracer preemptirqsoff_tracer __read_mostly = | |
736 | { | |
737 | .name = "preemptirqsoff", | |
738 | .init = preemptirqsoff_tracer_init, | |
739 | .reset = irqsoff_tracer_reset, | |
9036990d SR |
740 | .start = irqsoff_tracer_start, |
741 | .stop = irqsoff_tracer_stop, | |
f43c738b | 742 | .print_max = true, |
62b915f1 JO |
743 | .print_header = irqsoff_print_header, |
744 | .print_line = irqsoff_print_line, | |
328df475 | 745 | .flag_changed = irqsoff_flag_changed, |
60a11774 SR |
746 | #ifdef CONFIG_FTRACE_SELFTEST |
747 | .selftest = trace_selftest_startup_preemptirqsoff, | |
748 | #endif | |
62b915f1 JO |
749 | .open = irqsoff_trace_open, |
750 | .close = irqsoff_trace_close, | |
02f2f764 | 751 | .allow_instances = true, |
f43c738b | 752 | .use_max_tr = true, |
6cd8a4bb SR |
753 | }; |
754 | ||
755 | # define register_preemptirqsoff(trace) register_tracer(&trace) | |
756 | #else | |
757 | # define register_preemptirqsoff(trace) do { } while (0) | |
758 | #endif | |
81d68a96 SR |
759 | |
760 | __init static int init_irqsoff_tracer(void) | |
761 | { | |
6cd8a4bb SR |
762 | register_irqsoff(irqsoff_tracer); |
763 | register_preemptoff(preemptoff_tracer); | |
764 | register_preemptirqsoff(preemptirqsoff_tracer); | |
81d68a96 SR |
765 | |
766 | return 0; | |
767 | } | |
6f415672 | 768 | core_initcall(init_irqsoff_tracer); |