]>
Commit | Line | Data |
---|---|---|
bcea3f96 | 1 | // SPDX-License-Identifier: GPL-2.0 |
81d68a96 | 2 | /* |
73d8b8bc | 3 | * trace irqs off critical timings |
81d68a96 SR |
4 | * |
5 | * Copyright (C) 2007-2008 Steven Rostedt <[email protected]> | |
6 | * Copyright (C) 2008 Ingo Molnar <[email protected]> | |
7 | * | |
8 | * From code in the latency_tracer, that is: | |
9 | * | |
10 | * Copyright (C) 2004-2006 Ingo Molnar | |
6d49e352 | 11 | * Copyright (C) 2004 Nadia Yvette Chambers |
81d68a96 SR |
12 | */ |
13 | #include <linux/kallsyms.h> | |
81d68a96 SR |
14 | #include <linux/uaccess.h> |
15 | #include <linux/module.h> | |
16 | #include <linux/ftrace.h> | |
81d68a96 SR |
17 | |
18 | #include "trace.h" | |
19 | ||
d5915816 JF |
20 | #include <trace/events/preemptirq.h> |
21 | ||
aaecaa0b | 22 | #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER) |
81d68a96 SR |
23 | static struct trace_array *irqsoff_trace __read_mostly; |
24 | static int tracer_enabled __read_mostly; | |
25 | ||
6cd8a4bb SR |
26 | static DEFINE_PER_CPU(int, tracing_cpu); |
27 | ||
5389f6fa | 28 | static DEFINE_RAW_SPINLOCK(max_trace_lock); |
89b2f978 | 29 | |
6cd8a4bb SR |
30 | enum { |
31 | TRACER_IRQS_OFF = (1 << 1), | |
32 | TRACER_PREEMPT_OFF = (1 << 2), | |
33 | }; | |
34 | ||
35 | static int trace_type __read_mostly; | |
36 | ||
613f04a0 | 37 | static int save_flags; |
e9d25fe6 | 38 | |
62b915f1 JO |
39 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph); |
40 | static int start_irqsoff_tracer(struct trace_array *tr, int graph); | |
41 | ||
6cd8a4bb | 42 | #ifdef CONFIG_PREEMPT_TRACER |
e309b41d | 43 | static inline int |
f27107fa | 44 | preempt_trace(int pc) |
6cd8a4bb | 45 | { |
f27107fa | 46 | return ((trace_type & TRACER_PREEMPT_OFF) && pc); |
6cd8a4bb SR |
47 | } |
48 | #else | |
f27107fa | 49 | # define preempt_trace(pc) (0) |
6cd8a4bb SR |
50 | #endif |
51 | ||
52 | #ifdef CONFIG_IRQSOFF_TRACER | |
e309b41d | 53 | static inline int |
6cd8a4bb SR |
54 | irq_trace(void) |
55 | { | |
56 | return ((trace_type & TRACER_IRQS_OFF) && | |
57 | irqs_disabled()); | |
58 | } | |
59 | #else | |
60 | # define irq_trace() (0) | |
61 | #endif | |
62 | ||
62b915f1 | 63 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
03905582 | 64 | static int irqsoff_display_graph(struct trace_array *tr, int set); |
983f938a | 65 | # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH) |
03905582 SRRH |
66 | #else |
67 | static inline int irqsoff_display_graph(struct trace_array *tr, int set) | |
68 | { | |
69 | return -EINVAL; | |
70 | } | |
983f938a | 71 | # define is_graph(tr) false |
62b915f1 | 72 | #endif |
62b915f1 | 73 | |
81d68a96 SR |
74 | /* |
75 | * Sequence count - we record it when starting a measurement and | |
76 | * skip the latency if the sequence has changed - some other section | |
77 | * did a maximum and could disturb our measurement with serial console | |
78 | * printouts, etc. Truly coinciding maximum latencies should be rare | |
25985edc | 79 | * and what happens together happens separately as well, so this doesn't |
81d68a96 SR |
80 | * decrease the validity of the maximum found: |
81 | */ | |
82 | static __cacheline_aligned_in_smp unsigned long max_sequence; | |
83 | ||
606576ce | 84 | #ifdef CONFIG_FUNCTION_TRACER |
81d68a96 | 85 | /* |
5e6d2b9c SR |
86 | * Prologue for the preempt and irqs off function tracers. |
87 | * | |
88 | * Returns 1 if it is OK to continue, and data->disabled is | |
89 | * incremented. | |
90 | * 0 if the trace is to be ignored, and data->disabled | |
91 | * is kept the same. | |
92 | * | |
93 | * Note, this function is also used outside this ifdef but | |
94 | * inside the #ifdef of the function graph tracer below. | |
95 | * This is OK, since the function graph tracer is | |
96 | * dependent on the function tracer. | |
81d68a96 | 97 | */ |
5e6d2b9c SR |
98 | static int func_prolog_dec(struct trace_array *tr, |
99 | struct trace_array_cpu **data, | |
100 | unsigned long *flags) | |
81d68a96 | 101 | { |
81d68a96 SR |
102 | long disabled; |
103 | int cpu; | |
104 | ||
361943ad SR |
105 | /* |
106 | * Does not matter if we preempt. We test the flags | |
107 | * afterward, to see if irqs are disabled or not. | |
108 | * If we preempt and get a false positive, the flags | |
109 | * test will fail. | |
110 | */ | |
111 | cpu = raw_smp_processor_id(); | |
112 | if (likely(!per_cpu(tracing_cpu, cpu))) | |
5e6d2b9c | 113 | return 0; |
81d68a96 | 114 | |
5e6d2b9c | 115 | local_save_flags(*flags); |
cb86e053 SRRH |
116 | /* |
117 | * Slight chance to get a false positive on tracing_cpu, | |
118 | * although I'm starting to think there isn't a chance. | |
119 | * Leave this for now just to be paranoid. | |
120 | */ | |
121 | if (!irqs_disabled_flags(*flags) && !preempt_count()) | |
5e6d2b9c | 122 | return 0; |
81d68a96 | 123 | |
12883efb | 124 | *data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
5e6d2b9c | 125 | disabled = atomic_inc_return(&(*data)->disabled); |
81d68a96 SR |
126 | |
127 | if (likely(disabled == 1)) | |
5e6d2b9c SR |
128 | return 1; |
129 | ||
130 | atomic_dec(&(*data)->disabled); | |
131 | ||
132 | return 0; | |
133 | } | |
134 | ||
135 | /* | |
136 | * irqsoff uses its own tracer function to keep the overhead down: | |
137 | */ | |
138 | static void | |
2f5f6ad9 | 139 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip, |
a1e2e31d | 140 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
5e6d2b9c SR |
141 | { |
142 | struct trace_array *tr = irqsoff_trace; | |
143 | struct trace_array_cpu *data; | |
144 | unsigned long flags; | |
145 | ||
146 | if (!func_prolog_dec(tr, &data, &flags)) | |
147 | return; | |
148 | ||
149 | trace_function(tr, ip, parent_ip, flags, preempt_count()); | |
81d68a96 SR |
150 | |
151 | atomic_dec(&data->disabled); | |
152 | } | |
606576ce | 153 | #endif /* CONFIG_FUNCTION_TRACER */ |
81d68a96 | 154 | |
62b915f1 | 155 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
03905582 | 156 | static int irqsoff_display_graph(struct trace_array *tr, int set) |
62b915f1 JO |
157 | { |
158 | int cpu; | |
159 | ||
983f938a | 160 | if (!(is_graph(tr) ^ set)) |
62b915f1 JO |
161 | return 0; |
162 | ||
163 | stop_irqsoff_tracer(irqsoff_trace, !set); | |
164 | ||
165 | for_each_possible_cpu(cpu) | |
166 | per_cpu(tracing_cpu, cpu) = 0; | |
167 | ||
6d9b3fa5 | 168 | tr->max_latency = 0; |
12883efb | 169 | tracing_reset_online_cpus(&irqsoff_trace->trace_buffer); |
62b915f1 JO |
170 | |
171 | return start_irqsoff_tracer(irqsoff_trace, set); | |
172 | } | |
173 | ||
174 | static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) | |
175 | { | |
176 | struct trace_array *tr = irqsoff_trace; | |
177 | struct trace_array_cpu *data; | |
178 | unsigned long flags; | |
62b915f1 | 179 | int ret; |
62b915f1 JO |
180 | int pc; |
181 | ||
1a414428 SRRH |
182 | if (ftrace_graph_ignore_func(trace)) |
183 | return 0; | |
184 | /* | |
185 | * Do not trace a function if it's filtered by set_graph_notrace. | |
186 | * Make the index of ret stack negative to indicate that it should | |
187 | * ignore further functions. But it needs its own ret stack entry | |
188 | * to recover the original index in order to continue tracing after | |
189 | * returning from the function. | |
190 | */ | |
191 | if (ftrace_graph_notrace_addr(trace->func)) | |
192 | return 1; | |
193 | ||
5e6d2b9c | 194 | if (!func_prolog_dec(tr, &data, &flags)) |
62b915f1 JO |
195 | return 0; |
196 | ||
5e6d2b9c SR |
197 | pc = preempt_count(); |
198 | ret = __trace_graph_entry(tr, trace, flags, pc); | |
62b915f1 | 199 | atomic_dec(&data->disabled); |
5e6d2b9c | 200 | |
62b915f1 JO |
201 | return ret; |
202 | } | |
203 | ||
204 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) | |
205 | { | |
206 | struct trace_array *tr = irqsoff_trace; | |
207 | struct trace_array_cpu *data; | |
208 | unsigned long flags; | |
62b915f1 JO |
209 | int pc; |
210 | ||
5cf99a0f SRV |
211 | ftrace_graph_addr_finish(trace); |
212 | ||
5e6d2b9c | 213 | if (!func_prolog_dec(tr, &data, &flags)) |
62b915f1 JO |
214 | return; |
215 | ||
5e6d2b9c SR |
216 | pc = preempt_count(); |
217 | __trace_graph_return(tr, trace, flags, pc); | |
62b915f1 JO |
218 | atomic_dec(&data->disabled); |
219 | } | |
220 | ||
221 | static void irqsoff_trace_open(struct trace_iterator *iter) | |
222 | { | |
983f938a | 223 | if (is_graph(iter->tr)) |
62b915f1 JO |
224 | graph_trace_open(iter); |
225 | ||
226 | } | |
227 | ||
228 | static void irqsoff_trace_close(struct trace_iterator *iter) | |
229 | { | |
230 | if (iter->private) | |
231 | graph_trace_close(iter); | |
232 | } | |
233 | ||
234 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ | |
321e68b0 JO |
235 | TRACE_GRAPH_PRINT_PROC | \ |
236 | TRACE_GRAPH_PRINT_ABS_TIME | \ | |
237 | TRACE_GRAPH_PRINT_DURATION) | |
62b915f1 JO |
238 | |
239 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) | |
240 | { | |
62b915f1 JO |
241 | /* |
242 | * In graph mode call the graph tracer output function, | |
243 | * otherwise go with the TRACE_FN event handler | |
244 | */ | |
983f938a | 245 | if (is_graph(iter->tr)) |
0a772620 | 246 | return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); |
62b915f1 JO |
247 | |
248 | return TRACE_TYPE_UNHANDLED; | |
249 | } | |
250 | ||
251 | static void irqsoff_print_header(struct seq_file *s) | |
252 | { | |
983f938a SRRH |
253 | struct trace_array *tr = irqsoff_trace; |
254 | ||
255 | if (is_graph(tr)) | |
0a772620 JO |
256 | print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); |
257 | else | |
62b915f1 JO |
258 | trace_default_header(s); |
259 | } | |
260 | ||
62b915f1 JO |
261 | static void |
262 | __trace_function(struct trace_array *tr, | |
263 | unsigned long ip, unsigned long parent_ip, | |
264 | unsigned long flags, int pc) | |
265 | { | |
983f938a | 266 | if (is_graph(tr)) |
0a772620 JO |
267 | trace_graph_function(tr, ip, parent_ip, flags, pc); |
268 | else | |
62b915f1 | 269 | trace_function(tr, ip, parent_ip, flags, pc); |
62b915f1 JO |
270 | } |
271 | ||
272 | #else | |
273 | #define __trace_function trace_function | |
274 | ||
8179e8a1 | 275 | #ifdef CONFIG_FUNCTION_TRACER |
62b915f1 JO |
276 | static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) |
277 | { | |
278 | return -1; | |
279 | } | |
8179e8a1 | 280 | #endif |
62b915f1 JO |
281 | |
282 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) | |
283 | { | |
284 | return TRACE_TYPE_UNHANDLED; | |
285 | } | |
286 | ||
62b915f1 JO |
287 | static void irqsoff_trace_open(struct trace_iterator *iter) { } |
288 | static void irqsoff_trace_close(struct trace_iterator *iter) { } | |
7e9a49ef JO |
289 | |
290 | #ifdef CONFIG_FUNCTION_TRACER | |
8179e8a1 | 291 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } |
7e9a49ef JO |
292 | static void irqsoff_print_header(struct seq_file *s) |
293 | { | |
294 | trace_default_header(s); | |
295 | } | |
296 | #else | |
297 | static void irqsoff_print_header(struct seq_file *s) | |
298 | { | |
299 | trace_latency_header(s); | |
300 | } | |
301 | #endif /* CONFIG_FUNCTION_TRACER */ | |
62b915f1 JO |
302 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
303 | ||
81d68a96 SR |
304 | /* |
305 | * Should this new latency be reported/recorded? | |
306 | */ | |
a5a1d1c2 | 307 | static bool report_latency(struct trace_array *tr, u64 delta) |
81d68a96 SR |
308 | { |
309 | if (tracing_thresh) { | |
310 | if (delta < tracing_thresh) | |
79851821 | 311 | return false; |
81d68a96 | 312 | } else { |
6d9b3fa5 | 313 | if (delta <= tr->max_latency) |
79851821 | 314 | return false; |
81d68a96 | 315 | } |
79851821 | 316 | return true; |
81d68a96 SR |
317 | } |
318 | ||
e309b41d | 319 | static void |
81d68a96 SR |
320 | check_critical_timing(struct trace_array *tr, |
321 | struct trace_array_cpu *data, | |
322 | unsigned long parent_ip, | |
323 | int cpu) | |
324 | { | |
a5a1d1c2 | 325 | u64 T0, T1, delta; |
81d68a96 | 326 | unsigned long flags; |
38697053 | 327 | int pc; |
81d68a96 | 328 | |
81d68a96 | 329 | T0 = data->preempt_timestamp; |
750ed1a4 | 330 | T1 = ftrace_now(cpu); |
81d68a96 SR |
331 | delta = T1-T0; |
332 | ||
333 | local_save_flags(flags); | |
334 | ||
6450c1d3 SR |
335 | pc = preempt_count(); |
336 | ||
6d9b3fa5 | 337 | if (!report_latency(tr, delta)) |
81d68a96 SR |
338 | goto out; |
339 | ||
5389f6fa | 340 | raw_spin_lock_irqsave(&max_trace_lock, flags); |
81d68a96 | 341 | |
89b2f978 | 342 | /* check if we are still the max latency */ |
6d9b3fa5 | 343 | if (!report_latency(tr, delta)) |
89b2f978 SR |
344 | goto out_unlock; |
345 | ||
62b915f1 | 346 | __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
cc51a0fc SR |
347 | /* Skip 5 functions to get to the irq/preempt enable function */ |
348 | __trace_stack(tr, flags, 5, pc); | |
81d68a96 | 349 | |
81d68a96 | 350 | if (data->critical_sequence != max_sequence) |
89b2f978 | 351 | goto out_unlock; |
81d68a96 | 352 | |
81d68a96 SR |
353 | data->critical_end = parent_ip; |
354 | ||
b5130b1e | 355 | if (likely(!is_tracing_stopped())) { |
6d9b3fa5 | 356 | tr->max_latency = delta; |
b5130b1e CE |
357 | update_max_tr_single(tr, current, cpu); |
358 | } | |
81d68a96 | 359 | |
81d68a96 SR |
360 | max_sequence++; |
361 | ||
89b2f978 | 362 | out_unlock: |
5389f6fa | 363 | raw_spin_unlock_irqrestore(&max_trace_lock, flags); |
89b2f978 | 364 | |
81d68a96 SR |
365 | out: |
366 | data->critical_sequence = max_sequence; | |
750ed1a4 | 367 | data->preempt_timestamp = ftrace_now(cpu); |
62b915f1 | 368 | __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
81d68a96 SR |
369 | } |
370 | ||
e309b41d | 371 | static inline void |
f27107fa | 372 | start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc) |
81d68a96 SR |
373 | { |
374 | int cpu; | |
375 | struct trace_array *tr = irqsoff_trace; | |
376 | struct trace_array_cpu *data; | |
377 | unsigned long flags; | |
378 | ||
10246fa3 | 379 | if (!tracer_enabled || !tracing_is_enabled()) |
81d68a96 SR |
380 | return; |
381 | ||
c5f888ca SR |
382 | cpu = raw_smp_processor_id(); |
383 | ||
384 | if (per_cpu(tracing_cpu, cpu)) | |
6cd8a4bb SR |
385 | return; |
386 | ||
12883efb | 387 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
81d68a96 | 388 | |
c5f888ca | 389 | if (unlikely(!data) || atomic_read(&data->disabled)) |
81d68a96 SR |
390 | return; |
391 | ||
392 | atomic_inc(&data->disabled); | |
393 | ||
394 | data->critical_sequence = max_sequence; | |
750ed1a4 | 395 | data->preempt_timestamp = ftrace_now(cpu); |
6cd8a4bb | 396 | data->critical_start = parent_ip ? : ip; |
81d68a96 SR |
397 | |
398 | local_save_flags(flags); | |
6cd8a4bb | 399 | |
f27107fa | 400 | __trace_function(tr, ip, parent_ip, flags, pc); |
81d68a96 | 401 | |
c5f888ca | 402 | per_cpu(tracing_cpu, cpu) = 1; |
6cd8a4bb | 403 | |
81d68a96 SR |
404 | atomic_dec(&data->disabled); |
405 | } | |
406 | ||
e309b41d | 407 | static inline void |
f27107fa | 408 | stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc) |
81d68a96 SR |
409 | { |
410 | int cpu; | |
411 | struct trace_array *tr = irqsoff_trace; | |
412 | struct trace_array_cpu *data; | |
413 | unsigned long flags; | |
414 | ||
c5f888ca | 415 | cpu = raw_smp_processor_id(); |
6cd8a4bb | 416 | /* Always clear the tracing cpu on stopping the trace */ |
c5f888ca SR |
417 | if (unlikely(per_cpu(tracing_cpu, cpu))) |
418 | per_cpu(tracing_cpu, cpu) = 0; | |
6cd8a4bb SR |
419 | else |
420 | return; | |
421 | ||
10246fa3 | 422 | if (!tracer_enabled || !tracing_is_enabled()) |
81d68a96 SR |
423 | return; |
424 | ||
12883efb | 425 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
81d68a96 | 426 | |
3928a8a2 | 427 | if (unlikely(!data) || |
81d68a96 SR |
428 | !data->critical_start || atomic_read(&data->disabled)) |
429 | return; | |
430 | ||
431 | atomic_inc(&data->disabled); | |
c5f888ca | 432 | |
81d68a96 | 433 | local_save_flags(flags); |
f27107fa | 434 | __trace_function(tr, ip, parent_ip, flags, pc); |
6cd8a4bb | 435 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
81d68a96 SR |
436 | data->critical_start = 0; |
437 | atomic_dec(&data->disabled); | |
438 | } | |
439 | ||
6cd8a4bb | 440 | /* start and stop critical timings used to for stoppage (in idle) */ |
e309b41d | 441 | void start_critical_timings(void) |
81d68a96 | 442 | { |
f27107fa SRV |
443 | int pc = preempt_count(); |
444 | ||
445 | if (preempt_trace(pc) || irq_trace()) | |
446 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc); | |
81d68a96 | 447 | } |
1fe37104 | 448 | EXPORT_SYMBOL_GPL(start_critical_timings); |
81d68a96 | 449 | |
e309b41d | 450 | void stop_critical_timings(void) |
81d68a96 | 451 | { |
f27107fa SRV |
452 | int pc = preempt_count(); |
453 | ||
454 | if (preempt_trace(pc) || irq_trace()) | |
455 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc); | |
81d68a96 | 456 | } |
1fe37104 | 457 | EXPORT_SYMBOL_GPL(stop_critical_timings); |
81d68a96 | 458 | |
8179e8a1 SRRH |
459 | #ifdef CONFIG_FUNCTION_TRACER |
460 | static bool function_enabled; | |
461 | ||
4104d326 | 462 | static int register_irqsoff_function(struct trace_array *tr, int graph, int set) |
81d68a96 | 463 | { |
328df475 | 464 | int ret; |
62b915f1 | 465 | |
328df475 | 466 | /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ |
983f938a | 467 | if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION))) |
328df475 SRRH |
468 | return 0; |
469 | ||
470 | if (graph) | |
62b915f1 JO |
471 | ret = register_ftrace_graph(&irqsoff_graph_return, |
472 | &irqsoff_graph_entry); | |
328df475 | 473 | else |
4104d326 | 474 | ret = register_ftrace_function(tr->ops); |
328df475 SRRH |
475 | |
476 | if (!ret) | |
477 | function_enabled = true; | |
478 | ||
479 | return ret; | |
480 | } | |
481 | ||
4104d326 | 482 | static void unregister_irqsoff_function(struct trace_array *tr, int graph) |
328df475 SRRH |
483 | { |
484 | if (!function_enabled) | |
485 | return; | |
486 | ||
487 | if (graph) | |
488 | unregister_ftrace_graph(); | |
489 | else | |
4104d326 | 490 | unregister_ftrace_function(tr->ops); |
328df475 SRRH |
491 | |
492 | function_enabled = false; | |
493 | } | |
494 | ||
8179e8a1 | 495 | static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set) |
328df475 | 496 | { |
8179e8a1 SRRH |
497 | if (!(mask & TRACE_ITER_FUNCTION)) |
498 | return 0; | |
499 | ||
328df475 | 500 | if (set) |
983f938a | 501 | register_irqsoff_function(tr, is_graph(tr), 1); |
328df475 | 502 | else |
983f938a | 503 | unregister_irqsoff_function(tr, is_graph(tr)); |
8179e8a1 SRRH |
504 | return 1; |
505 | } | |
506 | #else | |
507 | static int register_irqsoff_function(struct trace_array *tr, int graph, int set) | |
508 | { | |
03905582 | 509 | return 0; |
328df475 | 510 | } |
8179e8a1 SRRH |
511 | static void unregister_irqsoff_function(struct trace_array *tr, int graph) { } |
512 | static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set) | |
513 | { | |
514 | return 0; | |
515 | } | |
516 | #endif /* CONFIG_FUNCTION_TRACER */ | |
328df475 | 517 | |
bf6065b5 | 518 | static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) |
328df475 | 519 | { |
bf6065b5 SRRH |
520 | struct tracer *tracer = tr->current_trace; |
521 | ||
8179e8a1 SRRH |
522 | if (irqsoff_function_set(tr, mask, set)) |
523 | return 0; | |
03905582 | 524 | |
729358da | 525 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
03905582 SRRH |
526 | if (mask & TRACE_ITER_DISPLAY_GRAPH) |
527 | return irqsoff_display_graph(tr, set); | |
729358da | 528 | #endif |
328df475 SRRH |
529 | |
530 | return trace_keep_overwrite(tracer, mask, set); | |
531 | } | |
532 | ||
533 | static int start_irqsoff_tracer(struct trace_array *tr, int graph) | |
534 | { | |
535 | int ret; | |
536 | ||
4104d326 | 537 | ret = register_irqsoff_function(tr, graph, 0); |
62b915f1 JO |
538 | |
539 | if (!ret && tracing_is_enabled()) | |
9036990d | 540 | tracer_enabled = 1; |
94523e81 | 541 | else |
9036990d | 542 | tracer_enabled = 0; |
62b915f1 JO |
543 | |
544 | return ret; | |
81d68a96 SR |
545 | } |
546 | ||
62b915f1 | 547 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph) |
81d68a96 | 548 | { |
81d68a96 | 549 | tracer_enabled = 0; |
62b915f1 | 550 | |
4104d326 | 551 | unregister_irqsoff_function(tr, graph); |
81d68a96 SR |
552 | } |
553 | ||
02f2f764 SRRH |
554 | static bool irqsoff_busy; |
555 | ||
556 | static int __irqsoff_tracer_init(struct trace_array *tr) | |
81d68a96 | 557 | { |
02f2f764 SRRH |
558 | if (irqsoff_busy) |
559 | return -EBUSY; | |
560 | ||
983f938a | 561 | save_flags = tr->trace_flags; |
613f04a0 SRRH |
562 | |
563 | /* non overwrite screws up the latency tracers */ | |
2b6080f2 SR |
564 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); |
565 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); | |
e9d25fe6 | 566 | |
6d9b3fa5 | 567 | tr->max_latency = 0; |
81d68a96 | 568 | irqsoff_trace = tr; |
c5f888ca | 569 | /* make sure that the tracer is visible */ |
81d68a96 | 570 | smp_wmb(); |
62b915f1 | 571 | |
4104d326 SRRH |
572 | ftrace_init_array_ops(tr, irqsoff_tracer_call); |
573 | ||
574 | /* Only toplevel instance supports graph tracing */ | |
575 | if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL && | |
983f938a | 576 | is_graph(tr)))) |
62b915f1 | 577 | printk(KERN_ERR "failed to start irqsoff tracer\n"); |
02f2f764 SRRH |
578 | |
579 | irqsoff_busy = true; | |
580 | return 0; | |
81d68a96 SR |
581 | } |
582 | ||
2b27ece6 | 583 | static void __irqsoff_tracer_reset(struct trace_array *tr) |
81d68a96 | 584 | { |
613f04a0 SRRH |
585 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; |
586 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; | |
587 | ||
983f938a | 588 | stop_irqsoff_tracer(tr, is_graph(tr)); |
e9d25fe6 | 589 | |
2b6080f2 SR |
590 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); |
591 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); | |
4104d326 | 592 | ftrace_reset_array_ops(tr); |
02f2f764 SRRH |
593 | |
594 | irqsoff_busy = false; | |
81d68a96 SR |
595 | } |
596 | ||
9036990d SR |
597 | static void irqsoff_tracer_start(struct trace_array *tr) |
598 | { | |
9036990d | 599 | tracer_enabled = 1; |
9036990d SR |
600 | } |
601 | ||
602 | static void irqsoff_tracer_stop(struct trace_array *tr) | |
603 | { | |
604 | tracer_enabled = 0; | |
81d68a96 SR |
605 | } |
606 | ||
6cd8a4bb | 607 | #ifdef CONFIG_IRQSOFF_TRACER |
c3bc8fd6 JFG |
608 | /* |
609 | * We are only interested in hardirq on/off events: | |
610 | */ | |
3f1756dc | 611 | void tracer_hardirqs_on(unsigned long a0, unsigned long a1) |
c3bc8fd6 | 612 | { |
f27107fa SRV |
613 | unsigned int pc = preempt_count(); |
614 | ||
f27107fa SRV |
615 | if (!preempt_trace(pc) && irq_trace()) |
616 | stop_critical_timing(a0, a1, pc); | |
c3bc8fd6 JFG |
617 | } |
618 | ||
3f1756dc | 619 | void tracer_hardirqs_off(unsigned long a0, unsigned long a1) |
c3bc8fd6 | 620 | { |
f27107fa SRV |
621 | unsigned int pc = preempt_count(); |
622 | ||
f27107fa SRV |
623 | if (!preempt_trace(pc) && irq_trace()) |
624 | start_critical_timing(a0, a1, pc); | |
c3bc8fd6 JFG |
625 | } |
626 | ||
1c80025a | 627 | static int irqsoff_tracer_init(struct trace_array *tr) |
6cd8a4bb SR |
628 | { |
629 | trace_type = TRACER_IRQS_OFF; | |
630 | ||
02f2f764 | 631 | return __irqsoff_tracer_init(tr); |
6cd8a4bb | 632 | } |
2b27ece6 JFG |
633 | |
634 | static void irqsoff_tracer_reset(struct trace_array *tr) | |
635 | { | |
636 | __irqsoff_tracer_reset(tr); | |
637 | } | |
638 | ||
81d68a96 SR |
639 | static struct tracer irqsoff_tracer __read_mostly = |
640 | { | |
641 | .name = "irqsoff", | |
642 | .init = irqsoff_tracer_init, | |
643 | .reset = irqsoff_tracer_reset, | |
9036990d SR |
644 | .start = irqsoff_tracer_start, |
645 | .stop = irqsoff_tracer_stop, | |
f43c738b | 646 | .print_max = true, |
62b915f1 JO |
647 | .print_header = irqsoff_print_header, |
648 | .print_line = irqsoff_print_line, | |
328df475 | 649 | .flag_changed = irqsoff_flag_changed, |
60a11774 SR |
650 | #ifdef CONFIG_FTRACE_SELFTEST |
651 | .selftest = trace_selftest_startup_irqsoff, | |
652 | #endif | |
62b915f1 JO |
653 | .open = irqsoff_trace_open, |
654 | .close = irqsoff_trace_close, | |
02f2f764 | 655 | .allow_instances = true, |
f43c738b | 656 | .use_max_tr = true, |
81d68a96 | 657 | }; |
c3bc8fd6 | 658 | #endif /* CONFIG_IRQSOFF_TRACER */ |
6cd8a4bb SR |
659 | |
660 | #ifdef CONFIG_PREEMPT_TRACER | |
3f1756dc | 661 | void tracer_preempt_on(unsigned long a0, unsigned long a1) |
c3bc8fd6 | 662 | { |
f27107fa SRV |
663 | int pc = preempt_count(); |
664 | ||
665 | if (preempt_trace(pc) && !irq_trace()) | |
666 | stop_critical_timing(a0, a1, pc); | |
c3bc8fd6 JFG |
667 | } |
668 | ||
3f1756dc | 669 | void tracer_preempt_off(unsigned long a0, unsigned long a1) |
c3bc8fd6 | 670 | { |
f27107fa SRV |
671 | int pc = preempt_count(); |
672 | ||
673 | if (preempt_trace(pc) && !irq_trace()) | |
674 | start_critical_timing(a0, a1, pc); | |
c3bc8fd6 JFG |
675 | } |
676 | ||
1c80025a | 677 | static int preemptoff_tracer_init(struct trace_array *tr) |
6cd8a4bb SR |
678 | { |
679 | trace_type = TRACER_PREEMPT_OFF; | |
680 | ||
02f2f764 | 681 | return __irqsoff_tracer_init(tr); |
6cd8a4bb SR |
682 | } |
683 | ||
2b27ece6 JFG |
684 | static void preemptoff_tracer_reset(struct trace_array *tr) |
685 | { | |
686 | __irqsoff_tracer_reset(tr); | |
687 | } | |
688 | ||
6cd8a4bb SR |
689 | static struct tracer preemptoff_tracer __read_mostly = |
690 | { | |
691 | .name = "preemptoff", | |
692 | .init = preemptoff_tracer_init, | |
2b27ece6 | 693 | .reset = preemptoff_tracer_reset, |
9036990d SR |
694 | .start = irqsoff_tracer_start, |
695 | .stop = irqsoff_tracer_stop, | |
f43c738b | 696 | .print_max = true, |
62b915f1 JO |
697 | .print_header = irqsoff_print_header, |
698 | .print_line = irqsoff_print_line, | |
328df475 | 699 | .flag_changed = irqsoff_flag_changed, |
60a11774 SR |
700 | #ifdef CONFIG_FTRACE_SELFTEST |
701 | .selftest = trace_selftest_startup_preemptoff, | |
702 | #endif | |
62b915f1 JO |
703 | .open = irqsoff_trace_open, |
704 | .close = irqsoff_trace_close, | |
02f2f764 | 705 | .allow_instances = true, |
f43c738b | 706 | .use_max_tr = true, |
6cd8a4bb | 707 | }; |
c3bc8fd6 | 708 | #endif /* CONFIG_PREEMPT_TRACER */ |
6cd8a4bb | 709 | |
c3bc8fd6 | 710 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) |
6cd8a4bb | 711 | |
1c80025a | 712 | static int preemptirqsoff_tracer_init(struct trace_array *tr) |
6cd8a4bb SR |
713 | { |
714 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; | |
715 | ||
02f2f764 | 716 | return __irqsoff_tracer_init(tr); |
6cd8a4bb SR |
717 | } |
718 | ||
2b27ece6 JFG |
719 | static void preemptirqsoff_tracer_reset(struct trace_array *tr) |
720 | { | |
721 | __irqsoff_tracer_reset(tr); | |
722 | } | |
723 | ||
6cd8a4bb SR |
724 | static struct tracer preemptirqsoff_tracer __read_mostly = |
725 | { | |
726 | .name = "preemptirqsoff", | |
727 | .init = preemptirqsoff_tracer_init, | |
2b27ece6 | 728 | .reset = preemptirqsoff_tracer_reset, |
9036990d SR |
729 | .start = irqsoff_tracer_start, |
730 | .stop = irqsoff_tracer_stop, | |
f43c738b | 731 | .print_max = true, |
62b915f1 JO |
732 | .print_header = irqsoff_print_header, |
733 | .print_line = irqsoff_print_line, | |
328df475 | 734 | .flag_changed = irqsoff_flag_changed, |
60a11774 SR |
735 | #ifdef CONFIG_FTRACE_SELFTEST |
736 | .selftest = trace_selftest_startup_preemptirqsoff, | |
737 | #endif | |
62b915f1 JO |
738 | .open = irqsoff_trace_open, |
739 | .close = irqsoff_trace_close, | |
02f2f764 | 740 | .allow_instances = true, |
f43c738b | 741 | .use_max_tr = true, |
6cd8a4bb | 742 | }; |
6cd8a4bb | 743 | #endif |
81d68a96 SR |
744 | |
745 | __init static int init_irqsoff_tracer(void) | |
746 | { | |
c3bc8fd6 JFG |
747 | #ifdef CONFIG_IRQSOFF_TRACER |
748 | register_tracer(&irqsoff_tracer); | |
aaecaa0b | 749 | #endif |
c3bc8fd6 JFG |
750 | #ifdef CONFIG_PREEMPT_TRACER |
751 | register_tracer(&preemptoff_tracer); | |
aaecaa0b | 752 | #endif |
c3bc8fd6 JFG |
753 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) |
754 | register_tracer(&preemptirqsoff_tracer); | |
aaecaa0b JF |
755 | #endif |
756 | ||
c3bc8fd6 | 757 | return 0; |
aaecaa0b | 758 | } |
c3bc8fd6 JFG |
759 | core_initcall(init_irqsoff_tracer); |
760 | #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */ |