]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
352ad25a SR |
2 | /* |
3 | * trace task wakeup timings | |
4 | * | |
5 | * Copyright (C) 2007-2008 Steven Rostedt <[email protected]> | |
6 | * Copyright (C) 2008 Ingo Molnar <[email protected]> | |
7 | * | |
8 | * Based on code from the latency_tracer, that is: | |
9 | * | |
10 | * Copyright (C) 2004-2006 Ingo Molnar | |
6d49e352 | 11 | * Copyright (C) 2004 Nadia Yvette Chambers |
352ad25a SR |
12 | */ |
13 | #include <linux/module.h> | |
352ad25a SR |
14 | #include <linux/kallsyms.h> |
15 | #include <linux/uaccess.h> | |
16 | #include <linux/ftrace.h> | |
8bd75c77 | 17 | #include <linux/sched/rt.h> |
2d3d891d | 18 | #include <linux/sched/deadline.h> |
ad8d75ff | 19 | #include <trace/events/sched.h> |
352ad25a SR |
20 | #include "trace.h" |
21 | ||
22 | static struct trace_array *wakeup_trace; | |
23 | static int __read_mostly tracer_enabled; | |
24 | ||
25 | static struct task_struct *wakeup_task; | |
26 | static int wakeup_cpu; | |
478142c3 | 27 | static int wakeup_current_cpu; |
352ad25a | 28 | static unsigned wakeup_prio = -1; |
3244351c | 29 | static int wakeup_rt; |
af6ace76 DF |
30 | static int wakeup_dl; |
31 | static int tracing_dl = 0; | |
352ad25a | 32 | |
445c8951 | 33 | static arch_spinlock_t wakeup_lock = |
edc35bd7 | 34 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
352ad25a | 35 | |
7495a5be | 36 | static void wakeup_reset(struct trace_array *tr); |
e309b41d | 37 | static void __wakeup_reset(struct trace_array *tr); |
352ad25a | 38 | |
613f04a0 | 39 | static int save_flags; |
7495a5be | 40 | |
7495a5be | 41 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
03905582 | 42 | static int wakeup_display_graph(struct trace_array *tr, int set); |
983f938a | 43 | # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH) |
03905582 SRRH |
44 | #else |
45 | static inline int wakeup_display_graph(struct trace_array *tr, int set) | |
46 | { | |
8179e8a1 | 47 | return 0; |
03905582 | 48 | } |
983f938a | 49 | # define is_graph(tr) false |
7495a5be | 50 | #endif |
7495a5be | 51 | |
7495a5be | 52 | |
606576ce | 53 | #ifdef CONFIG_FUNCTION_TRACER |
542181d3 | 54 | |
8179e8a1 SRRH |
55 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace); |
56 | static void wakeup_graph_return(struct ftrace_graph_ret *trace); | |
57 | ||
58 | static bool function_enabled; | |
59 | ||
7e18d8e7 | 60 | /* |
542181d3 SR |
61 | * Prologue for the wakeup function tracers. |
62 | * | |
63 | * Returns 1 if it is OK to continue, and preemption | |
64 | * is disabled and data->disabled is incremented. | |
65 | * 0 if the trace is to be ignored, and preemption | |
66 | * is not disabled and data->disabled is | |
67 | * kept the same. | |
68 | * | |
69 | * Note, this function is also used outside this ifdef but | |
70 | * inside the #ifdef of the function graph tracer below. | |
71 | * This is OK, since the function graph tracer is | |
72 | * dependent on the function tracer. | |
7e18d8e7 | 73 | */ |
542181d3 SR |
74 | static int |
75 | func_prolog_preempt_disable(struct trace_array *tr, | |
76 | struct trace_array_cpu **data, | |
77 | int *pc) | |
7e18d8e7 | 78 | { |
7e18d8e7 | 79 | long disabled; |
7e18d8e7 SR |
80 | int cpu; |
81 | ||
82 | if (likely(!wakeup_task)) | |
542181d3 | 83 | return 0; |
7e18d8e7 | 84 | |
542181d3 | 85 | *pc = preempt_count(); |
5168ae50 | 86 | preempt_disable_notrace(); |
7e18d8e7 SR |
87 | |
88 | cpu = raw_smp_processor_id(); | |
478142c3 SR |
89 | if (cpu != wakeup_current_cpu) |
90 | goto out_enable; | |
91 | ||
12883efb | 92 | *data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
542181d3 | 93 | disabled = atomic_inc_return(&(*data)->disabled); |
7e18d8e7 SR |
94 | if (unlikely(disabled != 1)) |
95 | goto out; | |
96 | ||
542181d3 | 97 | return 1; |
7e18d8e7 | 98 | |
542181d3 SR |
99 | out: |
100 | atomic_dec(&(*data)->disabled); | |
7e18d8e7 | 101 | |
542181d3 SR |
102 | out_enable: |
103 | preempt_enable_notrace(); | |
104 | return 0; | |
105 | } | |
106 | ||
107 | /* | |
108 | * wakeup uses its own tracer function to keep the overhead down: | |
109 | */ | |
110 | static void | |
a1e2e31d SR |
111 | wakeup_tracer_call(unsigned long ip, unsigned long parent_ip, |
112 | struct ftrace_ops *op, struct pt_regs *pt_regs) | |
542181d3 SR |
113 | { |
114 | struct trace_array *tr = wakeup_trace; | |
115 | struct trace_array_cpu *data; | |
116 | unsigned long flags; | |
117 | int pc; | |
118 | ||
119 | if (!func_prolog_preempt_disable(tr, &data, &pc)) | |
120 | return; | |
121 | ||
122 | local_irq_save(flags); | |
123 | trace_function(tr, ip, parent_ip, flags, pc); | |
e59494f4 | 124 | local_irq_restore(flags); |
7e18d8e7 | 125 | |
7e18d8e7 | 126 | atomic_dec(&data->disabled); |
5168ae50 | 127 | preempt_enable_notrace(); |
7e18d8e7 | 128 | } |
7495a5be | 129 | |
4104d326 | 130 | static int register_wakeup_function(struct trace_array *tr, int graph, int set) |
7495a5be JO |
131 | { |
132 | int ret; | |
133 | ||
328df475 | 134 | /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ |
983f938a | 135 | if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION))) |
328df475 SRRH |
136 | return 0; |
137 | ||
138 | if (graph) | |
7495a5be JO |
139 | ret = register_ftrace_graph(&wakeup_graph_return, |
140 | &wakeup_graph_entry); | |
328df475 | 141 | else |
4104d326 | 142 | ret = register_ftrace_function(tr->ops); |
328df475 SRRH |
143 | |
144 | if (!ret) | |
145 | function_enabled = true; | |
146 | ||
147 | return ret; | |
148 | } | |
149 | ||
4104d326 | 150 | static void unregister_wakeup_function(struct trace_array *tr, int graph) |
328df475 SRRH |
151 | { |
152 | if (!function_enabled) | |
153 | return; | |
154 | ||
155 | if (graph) | |
156 | unregister_ftrace_graph(); | |
157 | else | |
4104d326 | 158 | unregister_ftrace_function(tr->ops); |
328df475 SRRH |
159 | |
160 | function_enabled = false; | |
161 | } | |
162 | ||
8179e8a1 | 163 | static int wakeup_function_set(struct trace_array *tr, u32 mask, int set) |
328df475 | 164 | { |
8179e8a1 SRRH |
165 | if (!(mask & TRACE_ITER_FUNCTION)) |
166 | return 0; | |
167 | ||
328df475 | 168 | if (set) |
983f938a | 169 | register_wakeup_function(tr, is_graph(tr), 1); |
328df475 | 170 | else |
983f938a | 171 | unregister_wakeup_function(tr, is_graph(tr)); |
8179e8a1 SRRH |
172 | return 1; |
173 | } | |
174 | #else | |
175 | static int register_wakeup_function(struct trace_array *tr, int graph, int set) | |
176 | { | |
03905582 | 177 | return 0; |
328df475 | 178 | } |
8179e8a1 SRRH |
179 | static void unregister_wakeup_function(struct trace_array *tr, int graph) { } |
180 | static int wakeup_function_set(struct trace_array *tr, u32 mask, int set) | |
181 | { | |
182 | return 0; | |
328df475 | 183 | } |
8179e8a1 | 184 | #endif /* CONFIG_FUNCTION_TRACER */ |
328df475 | 185 | |
bf6065b5 | 186 | static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) |
328df475 | 187 | { |
bf6065b5 SRRH |
188 | struct tracer *tracer = tr->current_trace; |
189 | ||
8179e8a1 SRRH |
190 | if (wakeup_function_set(tr, mask, set)) |
191 | return 0; | |
03905582 | 192 | |
729358da | 193 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
03905582 SRRH |
194 | if (mask & TRACE_ITER_DISPLAY_GRAPH) |
195 | return wakeup_display_graph(tr, set); | |
729358da | 196 | #endif |
328df475 SRRH |
197 | |
198 | return trace_keep_overwrite(tracer, mask, set); | |
199 | } | |
200 | ||
4104d326 | 201 | static int start_func_tracer(struct trace_array *tr, int graph) |
328df475 SRRH |
202 | { |
203 | int ret; | |
204 | ||
4104d326 | 205 | ret = register_wakeup_function(tr, graph, 0); |
7495a5be JO |
206 | |
207 | if (!ret && tracing_is_enabled()) | |
208 | tracer_enabled = 1; | |
209 | else | |
210 | tracer_enabled = 0; | |
211 | ||
212 | return ret; | |
213 | } | |
214 | ||
4104d326 | 215 | static void stop_func_tracer(struct trace_array *tr, int graph) |
7495a5be JO |
216 | { |
217 | tracer_enabled = 0; | |
218 | ||
4104d326 | 219 | unregister_wakeup_function(tr, graph); |
7495a5be JO |
220 | } |
221 | ||
7495a5be | 222 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
03905582 | 223 | static int wakeup_display_graph(struct trace_array *tr, int set) |
7495a5be | 224 | { |
983f938a | 225 | if (!(is_graph(tr) ^ set)) |
7495a5be JO |
226 | return 0; |
227 | ||
4104d326 | 228 | stop_func_tracer(tr, !set); |
7495a5be JO |
229 | |
230 | wakeup_reset(wakeup_trace); | |
6d9b3fa5 | 231 | tr->max_latency = 0; |
7495a5be | 232 | |
4104d326 | 233 | return start_func_tracer(tr, set); |
7495a5be JO |
234 | } |
235 | ||
236 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace) | |
237 | { | |
238 | struct trace_array *tr = wakeup_trace; | |
239 | struct trace_array_cpu *data; | |
240 | unsigned long flags; | |
542181d3 | 241 | int pc, ret = 0; |
7495a5be | 242 | |
1a414428 SRRH |
243 | if (ftrace_graph_ignore_func(trace)) |
244 | return 0; | |
245 | /* | |
246 | * Do not trace a function if it's filtered by set_graph_notrace. | |
247 | * Make the index of ret stack negative to indicate that it should | |
248 | * ignore further functions. But it needs its own ret stack entry | |
249 | * to recover the original index in order to continue tracing after | |
250 | * returning from the function. | |
251 | */ | |
252 | if (ftrace_graph_notrace_addr(trace->func)) | |
253 | return 1; | |
254 | ||
542181d3 | 255 | if (!func_prolog_preempt_disable(tr, &data, &pc)) |
7495a5be JO |
256 | return 0; |
257 | ||
7495a5be JO |
258 | local_save_flags(flags); |
259 | ret = __trace_graph_entry(tr, trace, flags, pc); | |
7495a5be | 260 | atomic_dec(&data->disabled); |
7495a5be | 261 | preempt_enable_notrace(); |
542181d3 | 262 | |
7495a5be JO |
263 | return ret; |
264 | } | |
265 | ||
266 | static void wakeup_graph_return(struct ftrace_graph_ret *trace) | |
267 | { | |
268 | struct trace_array *tr = wakeup_trace; | |
269 | struct trace_array_cpu *data; | |
270 | unsigned long flags; | |
542181d3 | 271 | int pc; |
7495a5be | 272 | |
5cf99a0f SRV |
273 | ftrace_graph_addr_finish(trace); |
274 | ||
542181d3 | 275 | if (!func_prolog_preempt_disable(tr, &data, &pc)) |
7495a5be JO |
276 | return; |
277 | ||
7495a5be JO |
278 | local_save_flags(flags); |
279 | __trace_graph_return(tr, trace, flags, pc); | |
7495a5be JO |
280 | atomic_dec(&data->disabled); |
281 | ||
7495a5be JO |
282 | preempt_enable_notrace(); |
283 | return; | |
284 | } | |
285 | ||
286 | static void wakeup_trace_open(struct trace_iterator *iter) | |
287 | { | |
983f938a | 288 | if (is_graph(iter->tr)) |
7495a5be JO |
289 | graph_trace_open(iter); |
290 | } | |
291 | ||
292 | static void wakeup_trace_close(struct trace_iterator *iter) | |
293 | { | |
294 | if (iter->private) | |
295 | graph_trace_close(iter); | |
296 | } | |
297 | ||
321e68b0 JO |
298 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \ |
299 | TRACE_GRAPH_PRINT_ABS_TIME | \ | |
300 | TRACE_GRAPH_PRINT_DURATION) | |
7495a5be JO |
301 | |
302 | static enum print_line_t wakeup_print_line(struct trace_iterator *iter) | |
303 | { | |
304 | /* | |
305 | * In graph mode call the graph tracer output function, | |
306 | * otherwise go with the TRACE_FN event handler | |
307 | */ | |
983f938a | 308 | if (is_graph(iter->tr)) |
7495a5be JO |
309 | return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); |
310 | ||
311 | return TRACE_TYPE_UNHANDLED; | |
312 | } | |
313 | ||
314 | static void wakeup_print_header(struct seq_file *s) | |
315 | { | |
983f938a | 316 | if (is_graph(wakeup_trace)) |
7495a5be JO |
317 | print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); |
318 | else | |
319 | trace_default_header(s); | |
320 | } | |
321 | ||
322 | static void | |
323 | __trace_function(struct trace_array *tr, | |
324 | unsigned long ip, unsigned long parent_ip, | |
325 | unsigned long flags, int pc) | |
326 | { | |
983f938a | 327 | if (is_graph(tr)) |
7495a5be JO |
328 | trace_graph_function(tr, ip, parent_ip, flags, pc); |
329 | else | |
330 | trace_function(tr, ip, parent_ip, flags, pc); | |
331 | } | |
332 | #else | |
333 | #define __trace_function trace_function | |
334 | ||
7495a5be JO |
335 | static enum print_line_t wakeup_print_line(struct trace_iterator *iter) |
336 | { | |
337 | return TRACE_TYPE_UNHANDLED; | |
338 | } | |
339 | ||
7495a5be JO |
340 | static void wakeup_trace_open(struct trace_iterator *iter) { } |
341 | static void wakeup_trace_close(struct trace_iterator *iter) { } | |
7e9a49ef JO |
342 | |
343 | #ifdef CONFIG_FUNCTION_TRACER | |
8179e8a1 SRRH |
344 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace) |
345 | { | |
346 | return -1; | |
347 | } | |
348 | static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } | |
7e9a49ef JO |
349 | static void wakeup_print_header(struct seq_file *s) |
350 | { | |
351 | trace_default_header(s); | |
352 | } | |
353 | #else | |
354 | static void wakeup_print_header(struct seq_file *s) | |
355 | { | |
356 | trace_latency_header(s); | |
357 | } | |
358 | #endif /* CONFIG_FUNCTION_TRACER */ | |
7495a5be JO |
359 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
360 | ||
352ad25a SR |
361 | /* |
362 | * Should this new latency be reported/recorded? | |
363 | */ | |
a5a1d1c2 | 364 | static bool report_latency(struct trace_array *tr, u64 delta) |
352ad25a SR |
365 | { |
366 | if (tracing_thresh) { | |
367 | if (delta < tracing_thresh) | |
26ab2ef4 | 368 | return false; |
352ad25a | 369 | } else { |
6d9b3fa5 | 370 | if (delta <= tr->max_latency) |
26ab2ef4 | 371 | return false; |
352ad25a | 372 | } |
26ab2ef4 | 373 | return true; |
352ad25a SR |
374 | } |
375 | ||
38516ab5 SR |
376 | static void |
377 | probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu) | |
478142c3 SR |
378 | { |
379 | if (task != wakeup_task) | |
380 | return; | |
381 | ||
382 | wakeup_current_cpu = cpu; | |
383 | } | |
384 | ||
243f7610 SRRH |
385 | static void |
386 | tracing_sched_switch_trace(struct trace_array *tr, | |
387 | struct task_struct *prev, | |
388 | struct task_struct *next, | |
389 | unsigned long flags, int pc) | |
390 | { | |
2425bcb9 | 391 | struct trace_event_call *call = &event_context_switch; |
243f7610 SRRH |
392 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
393 | struct ring_buffer_event *event; | |
394 | struct ctx_switch_entry *entry; | |
395 | ||
396 | event = trace_buffer_lock_reserve(buffer, TRACE_CTX, | |
397 | sizeof(*entry), flags, pc); | |
398 | if (!event) | |
399 | return; | |
400 | entry = ring_buffer_event_data(event); | |
401 | entry->prev_pid = prev->pid; | |
402 | entry->prev_prio = prev->prio; | |
1d48b080 | 403 | entry->prev_state = task_state_index(prev); |
243f7610 SRRH |
404 | entry->next_pid = next->pid; |
405 | entry->next_prio = next->prio; | |
1d48b080 | 406 | entry->next_state = task_state_index(next); |
243f7610 SRRH |
407 | entry->next_cpu = task_cpu(next); |
408 | ||
409 | if (!call_filter_check_discard(call, entry, buffer, event)) | |
b7f0c959 | 410 | trace_buffer_unlock_commit(tr, buffer, event, flags, pc); |
243f7610 SRRH |
411 | } |
412 | ||
413 | static void | |
414 | tracing_sched_wakeup_trace(struct trace_array *tr, | |
415 | struct task_struct *wakee, | |
416 | struct task_struct *curr, | |
417 | unsigned long flags, int pc) | |
418 | { | |
2425bcb9 | 419 | struct trace_event_call *call = &event_wakeup; |
243f7610 SRRH |
420 | struct ring_buffer_event *event; |
421 | struct ctx_switch_entry *entry; | |
422 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | |
423 | ||
424 | event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, | |
425 | sizeof(*entry), flags, pc); | |
426 | if (!event) | |
427 | return; | |
428 | entry = ring_buffer_event_data(event); | |
429 | entry->prev_pid = curr->pid; | |
430 | entry->prev_prio = curr->prio; | |
1d48b080 | 431 | entry->prev_state = task_state_index(curr); |
243f7610 SRRH |
432 | entry->next_pid = wakee->pid; |
433 | entry->next_prio = wakee->prio; | |
1d48b080 | 434 | entry->next_state = task_state_index(wakee); |
243f7610 SRRH |
435 | entry->next_cpu = task_cpu(wakee); |
436 | ||
437 | if (!call_filter_check_discard(call, entry, buffer, event)) | |
b7f0c959 | 438 | trace_buffer_unlock_commit(tr, buffer, event, flags, pc); |
243f7610 SRRH |
439 | } |
440 | ||
5b82a1b0 | 441 | static void notrace |
c73464b1 | 442 | probe_wakeup_sched_switch(void *ignore, bool preempt, |
38516ab5 | 443 | struct task_struct *prev, struct task_struct *next) |
352ad25a | 444 | { |
352ad25a | 445 | struct trace_array_cpu *data; |
a5a1d1c2 | 446 | u64 T0, T1, delta; |
352ad25a SR |
447 | unsigned long flags; |
448 | long disabled; | |
449 | int cpu; | |
38697053 | 450 | int pc; |
352ad25a | 451 | |
b07c3f19 MD |
452 | tracing_record_cmdline(prev); |
453 | ||
352ad25a SR |
454 | if (unlikely(!tracer_enabled)) |
455 | return; | |
456 | ||
457 | /* | |
458 | * When we start a new trace, we set wakeup_task to NULL | |
459 | * and then set tracer_enabled = 1. We want to make sure | |
460 | * that another CPU does not see the tracer_enabled = 1 | |
461 | * and the wakeup_task with an older task, that might | |
462 | * actually be the same as next. | |
463 | */ | |
464 | smp_rmb(); | |
465 | ||
466 | if (next != wakeup_task) | |
467 | return; | |
468 | ||
38697053 SR |
469 | pc = preempt_count(); |
470 | ||
352ad25a SR |
471 | /* disable local data, not wakeup_cpu data */ |
472 | cpu = raw_smp_processor_id(); | |
12883efb | 473 | disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
352ad25a SR |
474 | if (likely(disabled != 1)) |
475 | goto out; | |
476 | ||
e59494f4 | 477 | local_irq_save(flags); |
0199c4e6 | 478 | arch_spin_lock(&wakeup_lock); |
352ad25a SR |
479 | |
480 | /* We could race with grabbing wakeup_lock */ | |
481 | if (unlikely(!tracer_enabled || next != wakeup_task)) | |
482 | goto out_unlock; | |
483 | ||
9be24414 | 484 | /* The task we are waiting for is waking up */ |
12883efb | 485 | data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); |
9be24414 | 486 | |
7495a5be | 487 | __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); |
7be42151 | 488 | tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); |
352ad25a | 489 | |
352ad25a | 490 | T0 = data->preempt_timestamp; |
750ed1a4 | 491 | T1 = ftrace_now(cpu); |
352ad25a SR |
492 | delta = T1-T0; |
493 | ||
6d9b3fa5 | 494 | if (!report_latency(wakeup_trace, delta)) |
352ad25a SR |
495 | goto out_unlock; |
496 | ||
b5130b1e | 497 | if (likely(!is_tracing_stopped())) { |
6d9b3fa5 | 498 | wakeup_trace->max_latency = delta; |
b5130b1e CE |
499 | update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); |
500 | } | |
352ad25a | 501 | |
352ad25a | 502 | out_unlock: |
b07c3f19 | 503 | __wakeup_reset(wakeup_trace); |
0199c4e6 | 504 | arch_spin_unlock(&wakeup_lock); |
e59494f4 | 505 | local_irq_restore(flags); |
352ad25a | 506 | out: |
12883efb | 507 | atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
5b82a1b0 MD |
508 | } |
509 | ||
e309b41d | 510 | static void __wakeup_reset(struct trace_array *tr) |
352ad25a | 511 | { |
352ad25a SR |
512 | wakeup_cpu = -1; |
513 | wakeup_prio = -1; | |
af6ace76 | 514 | tracing_dl = 0; |
352ad25a SR |
515 | |
516 | if (wakeup_task) | |
517 | put_task_struct(wakeup_task); | |
518 | ||
519 | wakeup_task = NULL; | |
520 | } | |
521 | ||
e309b41d | 522 | static void wakeup_reset(struct trace_array *tr) |
352ad25a SR |
523 | { |
524 | unsigned long flags; | |
525 | ||
12883efb | 526 | tracing_reset_online_cpus(&tr->trace_buffer); |
2f26ebd5 | 527 | |
e59494f4 | 528 | local_irq_save(flags); |
0199c4e6 | 529 | arch_spin_lock(&wakeup_lock); |
352ad25a | 530 | __wakeup_reset(tr); |
0199c4e6 | 531 | arch_spin_unlock(&wakeup_lock); |
e59494f4 | 532 | local_irq_restore(flags); |
352ad25a SR |
533 | } |
534 | ||
e309b41d | 535 | static void |
fbd705a0 | 536 | probe_wakeup(void *ignore, struct task_struct *p) |
352ad25a | 537 | { |
f8ec1062 | 538 | struct trace_array_cpu *data; |
352ad25a SR |
539 | int cpu = smp_processor_id(); |
540 | unsigned long flags; | |
541 | long disabled; | |
38697053 | 542 | int pc; |
352ad25a | 543 | |
b07c3f19 MD |
544 | if (likely(!tracer_enabled)) |
545 | return; | |
546 | ||
547 | tracing_record_cmdline(p); | |
548 | tracing_record_cmdline(current); | |
549 | ||
af6ace76 DF |
550 | /* |
551 | * Semantic is like this: | |
552 | * - wakeup tracer handles all tasks in the system, independently | |
553 | * from their scheduling class; | |
554 | * - wakeup_rt tracer handles tasks belonging to sched_dl and | |
555 | * sched_rt class; | |
556 | * - wakeup_dl handles tasks belonging to sched_dl class only. | |
557 | */ | |
558 | if (tracing_dl || (wakeup_dl && !dl_task(p)) || | |
559 | (wakeup_rt && !dl_task(p) && !rt_task(p)) || | |
560 | (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio))) | |
352ad25a SR |
561 | return; |
562 | ||
38697053 | 563 | pc = preempt_count(); |
12883efb | 564 | disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
352ad25a SR |
565 | if (unlikely(disabled != 1)) |
566 | goto out; | |
567 | ||
568 | /* interrupts should be off from try_to_wake_up */ | |
0199c4e6 | 569 | arch_spin_lock(&wakeup_lock); |
352ad25a SR |
570 | |
571 | /* check for races. */ | |
af6ace76 DF |
572 | if (!tracer_enabled || tracing_dl || |
573 | (!dl_task(p) && p->prio >= wakeup_prio)) | |
352ad25a SR |
574 | goto out_locked; |
575 | ||
576 | /* reset the trace */ | |
b07c3f19 | 577 | __wakeup_reset(wakeup_trace); |
352ad25a SR |
578 | |
579 | wakeup_cpu = task_cpu(p); | |
478142c3 | 580 | wakeup_current_cpu = wakeup_cpu; |
352ad25a SR |
581 | wakeup_prio = p->prio; |
582 | ||
af6ace76 DF |
583 | /* |
584 | * Once you start tracing a -deadline task, don't bother tracing | |
585 | * another task until the first one wakes up. | |
586 | */ | |
587 | if (dl_task(p)) | |
588 | tracing_dl = 1; | |
589 | else | |
590 | tracing_dl = 0; | |
591 | ||
352ad25a SR |
592 | wakeup_task = p; |
593 | get_task_struct(wakeup_task); | |
594 | ||
595 | local_save_flags(flags); | |
596 | ||
12883efb | 597 | data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); |
f8ec1062 | 598 | data->preempt_timestamp = ftrace_now(cpu); |
7be42151 | 599 | tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); |
301fd748 SR |
600 | |
601 | /* | |
602 | * We must be careful in using CALLER_ADDR2. But since wake_up | |
603 | * is not called by an assembly function (where as schedule is) | |
604 | * it should be safe to use it here. | |
605 | */ | |
7495a5be | 606 | __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); |
352ad25a SR |
607 | |
608 | out_locked: | |
0199c4e6 | 609 | arch_spin_unlock(&wakeup_lock); |
352ad25a | 610 | out: |
12883efb | 611 | atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
352ad25a SR |
612 | } |
613 | ||
e309b41d | 614 | static void start_wakeup_tracer(struct trace_array *tr) |
352ad25a | 615 | { |
5b82a1b0 MD |
616 | int ret; |
617 | ||
38516ab5 | 618 | ret = register_trace_sched_wakeup(probe_wakeup, NULL); |
5b82a1b0 | 619 | if (ret) { |
b07c3f19 | 620 | pr_info("wakeup trace: Couldn't activate tracepoint" |
5b82a1b0 MD |
621 | " probe to kernel_sched_wakeup\n"); |
622 | return; | |
623 | } | |
624 | ||
38516ab5 | 625 | ret = register_trace_sched_wakeup_new(probe_wakeup, NULL); |
5b82a1b0 | 626 | if (ret) { |
b07c3f19 | 627 | pr_info("wakeup trace: Couldn't activate tracepoint" |
5b82a1b0 MD |
628 | " probe to kernel_sched_wakeup_new\n"); |
629 | goto fail_deprobe; | |
630 | } | |
631 | ||
38516ab5 | 632 | ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL); |
5b82a1b0 | 633 | if (ret) { |
b07c3f19 | 634 | pr_info("sched trace: Couldn't activate tracepoint" |
73d8b8bc | 635 | " probe to kernel_sched_switch\n"); |
5b82a1b0 MD |
636 | goto fail_deprobe_wake_new; |
637 | } | |
638 | ||
38516ab5 | 639 | ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); |
478142c3 SR |
640 | if (ret) { |
641 | pr_info("wakeup trace: Couldn't activate tracepoint" | |
642 | " probe to kernel_sched_migrate_task\n"); | |
643 | return; | |
644 | } | |
645 | ||
352ad25a SR |
646 | wakeup_reset(tr); |
647 | ||
648 | /* | |
649 | * Don't let the tracer_enabled = 1 show up before | |
650 | * the wakeup_task is reset. This may be overkill since | |
651 | * wakeup_reset does a spin_unlock after setting the | |
652 | * wakeup_task to NULL, but I want to be safe. | |
653 | * This is a slow path anyway. | |
654 | */ | |
655 | smp_wmb(); | |
656 | ||
983f938a | 657 | if (start_func_tracer(tr, is_graph(tr))) |
7495a5be | 658 | printk(KERN_ERR "failed to start wakeup tracer\n"); |
ad591240 | 659 | |
352ad25a | 660 | return; |
5b82a1b0 | 661 | fail_deprobe_wake_new: |
38516ab5 | 662 | unregister_trace_sched_wakeup_new(probe_wakeup, NULL); |
5b82a1b0 | 663 | fail_deprobe: |
38516ab5 | 664 | unregister_trace_sched_wakeup(probe_wakeup, NULL); |
352ad25a SR |
665 | } |
666 | ||
e309b41d | 667 | static void stop_wakeup_tracer(struct trace_array *tr) |
352ad25a SR |
668 | { |
669 | tracer_enabled = 0; | |
983f938a | 670 | stop_func_tracer(tr, is_graph(tr)); |
38516ab5 SR |
671 | unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); |
672 | unregister_trace_sched_wakeup_new(probe_wakeup, NULL); | |
673 | unregister_trace_sched_wakeup(probe_wakeup, NULL); | |
674 | unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); | |
352ad25a SR |
675 | } |
676 | ||
65daaca7 SRRH |
677 | static bool wakeup_busy; |
678 | ||
3244351c | 679 | static int __wakeup_tracer_init(struct trace_array *tr) |
352ad25a | 680 | { |
983f938a | 681 | save_flags = tr->trace_flags; |
613f04a0 SRRH |
682 | |
683 | /* non overwrite screws up the latency tracers */ | |
2b6080f2 SR |
684 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); |
685 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); | |
e9d25fe6 | 686 | |
6d9b3fa5 | 687 | tr->max_latency = 0; |
352ad25a | 688 | wakeup_trace = tr; |
4104d326 | 689 | ftrace_init_array_ops(tr, wakeup_tracer_call); |
c76f0694 | 690 | start_wakeup_tracer(tr); |
65daaca7 SRRH |
691 | |
692 | wakeup_busy = true; | |
1c80025a | 693 | return 0; |
352ad25a SR |
694 | } |
695 | ||
3244351c SR |
696 | static int wakeup_tracer_init(struct trace_array *tr) |
697 | { | |
65daaca7 SRRH |
698 | if (wakeup_busy) |
699 | return -EBUSY; | |
700 | ||
af6ace76 | 701 | wakeup_dl = 0; |
3244351c SR |
702 | wakeup_rt = 0; |
703 | return __wakeup_tracer_init(tr); | |
704 | } | |
705 | ||
706 | static int wakeup_rt_tracer_init(struct trace_array *tr) | |
707 | { | |
65daaca7 SRRH |
708 | if (wakeup_busy) |
709 | return -EBUSY; | |
710 | ||
af6ace76 | 711 | wakeup_dl = 0; |
3244351c SR |
712 | wakeup_rt = 1; |
713 | return __wakeup_tracer_init(tr); | |
714 | } | |
715 | ||
af6ace76 DF |
716 | static int wakeup_dl_tracer_init(struct trace_array *tr) |
717 | { | |
65daaca7 SRRH |
718 | if (wakeup_busy) |
719 | return -EBUSY; | |
720 | ||
af6ace76 DF |
721 | wakeup_dl = 1; |
722 | wakeup_rt = 0; | |
723 | return __wakeup_tracer_init(tr); | |
724 | } | |
725 | ||
e309b41d | 726 | static void wakeup_tracer_reset(struct trace_array *tr) |
352ad25a | 727 | { |
613f04a0 SRRH |
728 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; |
729 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; | |
730 | ||
c76f0694 SR |
731 | stop_wakeup_tracer(tr); |
732 | /* make sure we put back any tasks we are tracing */ | |
733 | wakeup_reset(tr); | |
e9d25fe6 | 734 | |
2b6080f2 SR |
735 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); |
736 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); | |
4104d326 | 737 | ftrace_reset_array_ops(tr); |
65daaca7 | 738 | wakeup_busy = false; |
352ad25a SR |
739 | } |
740 | ||
9036990d SR |
741 | static void wakeup_tracer_start(struct trace_array *tr) |
742 | { | |
743 | wakeup_reset(tr); | |
744 | tracer_enabled = 1; | |
9036990d SR |
745 | } |
746 | ||
747 | static void wakeup_tracer_stop(struct trace_array *tr) | |
748 | { | |
749 | tracer_enabled = 0; | |
352ad25a SR |
750 | } |
751 | ||
752 | static struct tracer wakeup_tracer __read_mostly = | |
753 | { | |
754 | .name = "wakeup", | |
755 | .init = wakeup_tracer_init, | |
756 | .reset = wakeup_tracer_reset, | |
9036990d SR |
757 | .start = wakeup_tracer_start, |
758 | .stop = wakeup_tracer_stop, | |
f43c738b | 759 | .print_max = true, |
7495a5be JO |
760 | .print_header = wakeup_print_header, |
761 | .print_line = wakeup_print_line, | |
328df475 | 762 | .flag_changed = wakeup_flag_changed, |
60a11774 SR |
763 | #ifdef CONFIG_FTRACE_SELFTEST |
764 | .selftest = trace_selftest_startup_wakeup, | |
765 | #endif | |
7495a5be JO |
766 | .open = wakeup_trace_open, |
767 | .close = wakeup_trace_close, | |
65daaca7 | 768 | .allow_instances = true, |
f43c738b | 769 | .use_max_tr = true, |
352ad25a SR |
770 | }; |
771 | ||
3244351c SR |
772 | static struct tracer wakeup_rt_tracer __read_mostly = |
773 | { | |
774 | .name = "wakeup_rt", | |
775 | .init = wakeup_rt_tracer_init, | |
776 | .reset = wakeup_tracer_reset, | |
777 | .start = wakeup_tracer_start, | |
778 | .stop = wakeup_tracer_stop, | |
f43c738b | 779 | .print_max = true, |
7495a5be JO |
780 | .print_header = wakeup_print_header, |
781 | .print_line = wakeup_print_line, | |
328df475 | 782 | .flag_changed = wakeup_flag_changed, |
3244351c SR |
783 | #ifdef CONFIG_FTRACE_SELFTEST |
784 | .selftest = trace_selftest_startup_wakeup, | |
785 | #endif | |
7495a5be JO |
786 | .open = wakeup_trace_open, |
787 | .close = wakeup_trace_close, | |
65daaca7 | 788 | .allow_instances = true, |
f43c738b | 789 | .use_max_tr = true, |
3244351c SR |
790 | }; |
791 | ||
af6ace76 DF |
792 | static struct tracer wakeup_dl_tracer __read_mostly = |
793 | { | |
794 | .name = "wakeup_dl", | |
795 | .init = wakeup_dl_tracer_init, | |
796 | .reset = wakeup_tracer_reset, | |
797 | .start = wakeup_tracer_start, | |
798 | .stop = wakeup_tracer_stop, | |
af6ace76 DF |
799 | .print_max = true, |
800 | .print_header = wakeup_print_header, | |
801 | .print_line = wakeup_print_line, | |
af6ace76 DF |
802 | .flag_changed = wakeup_flag_changed, |
803 | #ifdef CONFIG_FTRACE_SELFTEST | |
804 | .selftest = trace_selftest_startup_wakeup, | |
805 | #endif | |
806 | .open = wakeup_trace_open, | |
807 | .close = wakeup_trace_close, | |
8d414bd2 | 808 | .allow_instances = true, |
af6ace76 DF |
809 | .use_max_tr = true, |
810 | }; | |
811 | ||
352ad25a SR |
812 | __init static int init_wakeup_tracer(void) |
813 | { | |
814 | int ret; | |
815 | ||
816 | ret = register_tracer(&wakeup_tracer); | |
817 | if (ret) | |
818 | return ret; | |
819 | ||
3244351c SR |
820 | ret = register_tracer(&wakeup_rt_tracer); |
821 | if (ret) | |
822 | return ret; | |
823 | ||
af6ace76 DF |
824 | ret = register_tracer(&wakeup_dl_tracer); |
825 | if (ret) | |
826 | return ret; | |
827 | ||
352ad25a SR |
828 | return 0; |
829 | } | |
6f415672 | 830 | core_initcall(init_wakeup_tracer); |