2 * trace task wakeup timings
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/module.h>
13 #include <linux/kallsyms.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/sched/rt.h>
17 #include <linux/sched/deadline.h>
18 #include <trace/events/sched.h>
21 static struct trace_array *wakeup_trace;
22 static int __read_mostly tracer_enabled;
24 static struct task_struct *wakeup_task;
25 static int wakeup_cpu;
26 static int wakeup_current_cpu;
27 static unsigned wakeup_prio = -1;
30 static int tracing_dl = 0;
32 static arch_spinlock_t wakeup_lock =
33 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
35 static void wakeup_reset(struct trace_array *tr);
36 static void __wakeup_reset(struct trace_array *tr);
38 static int save_flags;
40 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
41 static int wakeup_display_graph(struct trace_array *tr, int set);
42 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
44 static inline int wakeup_display_graph(struct trace_array *tr, int set)
48 # define is_graph(tr) false
52 #ifdef CONFIG_FUNCTION_TRACER
54 static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
55 static void wakeup_graph_return(struct ftrace_graph_ret *trace);
57 static bool function_enabled;
60 * Prologue for the wakeup function tracers.
62 * Returns 1 if it is OK to continue, and preemption
63 * is disabled and data->disabled is incremented.
64 * 0 if the trace is to be ignored, and preemption
65 * is not disabled and data->disabled is
68 * Note, this function is also used outside this ifdef but
69 * inside the #ifdef of the function graph tracer below.
70 * This is OK, since the function graph tracer is
71 * dependent on the function tracer.
74 func_prolog_preempt_disable(struct trace_array *tr,
75 struct trace_array_cpu **data,
81 if (likely(!wakeup_task))
84 *pc = preempt_count();
85 preempt_disable_notrace();
87 cpu = raw_smp_processor_id();
88 if (cpu != wakeup_current_cpu)
91 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
92 disabled = atomic_inc_return(&(*data)->disabled);
93 if (unlikely(disabled != 1))
99 atomic_dec(&(*data)->disabled);
102 preempt_enable_notrace();
107 * wakeup uses its own tracer function to keep the overhead down:
110 wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
111 struct ftrace_ops *op, struct pt_regs *pt_regs)
113 struct trace_array *tr = wakeup_trace;
114 struct trace_array_cpu *data;
118 if (!func_prolog_preempt_disable(tr, &data, &pc))
121 local_irq_save(flags);
122 trace_function(tr, ip, parent_ip, flags, pc);
123 local_irq_restore(flags);
125 atomic_dec(&data->disabled);
126 preempt_enable_notrace();
129 static int register_wakeup_function(struct trace_array *tr, int graph, int set)
133 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
134 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
138 ret = register_ftrace_graph(&wakeup_graph_return,
139 &wakeup_graph_entry);
141 ret = register_ftrace_function(tr->ops);
144 function_enabled = true;
149 static void unregister_wakeup_function(struct trace_array *tr, int graph)
151 if (!function_enabled)
155 unregister_ftrace_graph();
157 unregister_ftrace_function(tr->ops);
159 function_enabled = false;
162 static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
164 if (!(mask & TRACE_ITER_FUNCTION))
168 register_wakeup_function(tr, is_graph(tr), 1);
170 unregister_wakeup_function(tr, is_graph(tr));
174 static int register_wakeup_function(struct trace_array *tr, int graph, int set)
178 static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
179 static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
183 #endif /* CONFIG_FUNCTION_TRACER */
185 static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
187 struct tracer *tracer = tr->current_trace;
189 if (wakeup_function_set(tr, mask, set))
192 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
193 if (mask & TRACE_ITER_DISPLAY_GRAPH)
194 return wakeup_display_graph(tr, set);
197 return trace_keep_overwrite(tracer, mask, set);
200 static int start_func_tracer(struct trace_array *tr, int graph)
204 ret = register_wakeup_function(tr, graph, 0);
206 if (!ret && tracing_is_enabled())
214 static void stop_func_tracer(struct trace_array *tr, int graph)
218 unregister_wakeup_function(tr, graph);
221 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
222 static int wakeup_display_graph(struct trace_array *tr, int set)
224 if (!(is_graph(tr) ^ set))
227 stop_func_tracer(tr, !set);
229 wakeup_reset(wakeup_trace);
232 return start_func_tracer(tr, set);
235 static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
237 struct trace_array *tr = wakeup_trace;
238 struct trace_array_cpu *data;
242 if (!func_prolog_preempt_disable(tr, &data, &pc))
245 local_save_flags(flags);
246 ret = __trace_graph_entry(tr, trace, flags, pc);
247 atomic_dec(&data->disabled);
248 preempt_enable_notrace();
253 static void wakeup_graph_return(struct ftrace_graph_ret *trace)
255 struct trace_array *tr = wakeup_trace;
256 struct trace_array_cpu *data;
260 if (!func_prolog_preempt_disable(tr, &data, &pc))
263 local_save_flags(flags);
264 __trace_graph_return(tr, trace, flags, pc);
265 atomic_dec(&data->disabled);
267 preempt_enable_notrace();
271 static void wakeup_trace_open(struct trace_iterator *iter)
273 if (is_graph(iter->tr))
274 graph_trace_open(iter);
277 static void wakeup_trace_close(struct trace_iterator *iter)
280 graph_trace_close(iter);
283 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
284 TRACE_GRAPH_PRINT_ABS_TIME | \
285 TRACE_GRAPH_PRINT_DURATION)
287 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
290 * In graph mode call the graph tracer output function,
291 * otherwise go with the TRACE_FN event handler
293 if (is_graph(iter->tr))
294 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
296 return TRACE_TYPE_UNHANDLED;
299 static void wakeup_print_header(struct seq_file *s)
301 if (is_graph(wakeup_trace))
302 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
304 trace_default_header(s);
308 __trace_function(struct trace_array *tr,
309 unsigned long ip, unsigned long parent_ip,
310 unsigned long flags, int pc)
313 trace_graph_function(tr, ip, parent_ip, flags, pc);
315 trace_function(tr, ip, parent_ip, flags, pc);
318 #define __trace_function trace_function
320 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
322 return TRACE_TYPE_UNHANDLED;
325 static void wakeup_trace_open(struct trace_iterator *iter) { }
326 static void wakeup_trace_close(struct trace_iterator *iter) { }
328 #ifdef CONFIG_FUNCTION_TRACER
329 static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
333 static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
334 static void wakeup_print_header(struct seq_file *s)
336 trace_default_header(s);
339 static void wakeup_print_header(struct seq_file *s)
341 trace_latency_header(s);
343 #endif /* CONFIG_FUNCTION_TRACER */
344 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
347 * Should this new latency be reported/recorded?
349 static bool report_latency(struct trace_array *tr, cycle_t delta)
351 if (tracing_thresh) {
352 if (delta < tracing_thresh)
355 if (delta <= tr->max_latency)
362 probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
364 if (task != wakeup_task)
367 wakeup_current_cpu = cpu;
371 tracing_sched_switch_trace(struct trace_array *tr,
372 struct task_struct *prev,
373 struct task_struct *next,
374 unsigned long flags, int pc)
376 struct trace_event_call *call = &event_context_switch;
377 struct ring_buffer *buffer = tr->trace_buffer.buffer;
378 struct ring_buffer_event *event;
379 struct ctx_switch_entry *entry;
381 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
382 sizeof(*entry), flags, pc);
385 entry = ring_buffer_event_data(event);
386 entry->prev_pid = prev->pid;
387 entry->prev_prio = prev->prio;
388 entry->prev_state = prev->state;
389 entry->next_pid = next->pid;
390 entry->next_prio = next->prio;
391 entry->next_state = next->state;
392 entry->next_cpu = task_cpu(next);
394 if (!call_filter_check_discard(call, entry, buffer, event))
395 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
399 tracing_sched_wakeup_trace(struct trace_array *tr,
400 struct task_struct *wakee,
401 struct task_struct *curr,
402 unsigned long flags, int pc)
404 struct trace_event_call *call = &event_wakeup;
405 struct ring_buffer_event *event;
406 struct ctx_switch_entry *entry;
407 struct ring_buffer *buffer = tr->trace_buffer.buffer;
409 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
410 sizeof(*entry), flags, pc);
413 entry = ring_buffer_event_data(event);
414 entry->prev_pid = curr->pid;
415 entry->prev_prio = curr->prio;
416 entry->prev_state = curr->state;
417 entry->next_pid = wakee->pid;
418 entry->next_prio = wakee->prio;
419 entry->next_state = wakee->state;
420 entry->next_cpu = task_cpu(wakee);
422 if (!call_filter_check_discard(call, entry, buffer, event))
423 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
427 probe_wakeup_sched_switch(void *ignore, bool preempt,
428 struct task_struct *prev, struct task_struct *next)
430 struct trace_array_cpu *data;
431 cycle_t T0, T1, delta;
437 tracing_record_cmdline(prev);
439 if (unlikely(!tracer_enabled))
443 * When we start a new trace, we set wakeup_task to NULL
444 * and then set tracer_enabled = 1. We want to make sure
445 * that another CPU does not see the tracer_enabled = 1
446 * and the wakeup_task with an older task, that might
447 * actually be the same as next.
451 if (next != wakeup_task)
454 pc = preempt_count();
456 /* disable local data, not wakeup_cpu data */
457 cpu = raw_smp_processor_id();
458 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
459 if (likely(disabled != 1))
462 local_irq_save(flags);
463 arch_spin_lock(&wakeup_lock);
465 /* We could race with grabbing wakeup_lock */
466 if (unlikely(!tracer_enabled || next != wakeup_task))
469 /* The task we are waiting for is waking up */
470 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
472 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
473 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
475 T0 = data->preempt_timestamp;
476 T1 = ftrace_now(cpu);
479 if (!report_latency(wakeup_trace, delta))
482 if (likely(!is_tracing_stopped())) {
483 wakeup_trace->max_latency = delta;
484 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
488 __wakeup_reset(wakeup_trace);
489 arch_spin_unlock(&wakeup_lock);
490 local_irq_restore(flags);
492 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
495 static void __wakeup_reset(struct trace_array *tr)
502 put_task_struct(wakeup_task);
507 static void wakeup_reset(struct trace_array *tr)
511 tracing_reset_online_cpus(&tr->trace_buffer);
513 local_irq_save(flags);
514 arch_spin_lock(&wakeup_lock);
516 arch_spin_unlock(&wakeup_lock);
517 local_irq_restore(flags);
521 probe_wakeup(void *ignore, struct task_struct *p)
523 struct trace_array_cpu *data;
524 int cpu = smp_processor_id();
529 if (likely(!tracer_enabled))
532 tracing_record_cmdline(p);
533 tracing_record_cmdline(current);
536 * Semantic is like this:
537 * - wakeup tracer handles all tasks in the system, independently
538 * from their scheduling class;
539 * - wakeup_rt tracer handles tasks belonging to sched_dl and
541 * - wakeup_dl handles tasks belonging to sched_dl class only.
543 if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
544 (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
545 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
548 pc = preempt_count();
549 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
550 if (unlikely(disabled != 1))
553 /* interrupts should be off from try_to_wake_up */
554 arch_spin_lock(&wakeup_lock);
556 /* check for races. */
557 if (!tracer_enabled || tracing_dl ||
558 (!dl_task(p) && p->prio >= wakeup_prio))
561 /* reset the trace */
562 __wakeup_reset(wakeup_trace);
564 wakeup_cpu = task_cpu(p);
565 wakeup_current_cpu = wakeup_cpu;
566 wakeup_prio = p->prio;
569 * Once you start tracing a -deadline task, don't bother tracing
570 * another task until the first one wakes up.
578 get_task_struct(wakeup_task);
580 local_save_flags(flags);
582 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
583 data->preempt_timestamp = ftrace_now(cpu);
584 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
587 * We must be careful in using CALLER_ADDR2. But since wake_up
588 * is not called by an assembly function (where as schedule is)
589 * it should be safe to use it here.
591 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
594 arch_spin_unlock(&wakeup_lock);
596 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
599 static void start_wakeup_tracer(struct trace_array *tr)
603 ret = register_trace_sched_wakeup(probe_wakeup, NULL);
605 pr_info("wakeup trace: Couldn't activate tracepoint"
606 " probe to kernel_sched_wakeup\n");
610 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
612 pr_info("wakeup trace: Couldn't activate tracepoint"
613 " probe to kernel_sched_wakeup_new\n");
617 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
619 pr_info("sched trace: Couldn't activate tracepoint"
620 " probe to kernel_sched_switch\n");
621 goto fail_deprobe_wake_new;
624 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
626 pr_info("wakeup trace: Couldn't activate tracepoint"
627 " probe to kernel_sched_migrate_task\n");
634 * Don't let the tracer_enabled = 1 show up before
635 * the wakeup_task is reset. This may be overkill since
636 * wakeup_reset does a spin_unlock after setting the
637 * wakeup_task to NULL, but I want to be safe.
638 * This is a slow path anyway.
642 if (start_func_tracer(tr, is_graph(tr)))
643 printk(KERN_ERR "failed to start wakeup tracer\n");
646 fail_deprobe_wake_new:
647 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
649 unregister_trace_sched_wakeup(probe_wakeup, NULL);
652 static void stop_wakeup_tracer(struct trace_array *tr)
655 stop_func_tracer(tr, is_graph(tr));
656 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
657 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
658 unregister_trace_sched_wakeup(probe_wakeup, NULL);
659 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
662 static bool wakeup_busy;
664 static int __wakeup_tracer_init(struct trace_array *tr)
666 save_flags = tr->trace_flags;
668 /* non overwrite screws up the latency tracers */
669 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
670 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
674 ftrace_init_array_ops(tr, wakeup_tracer_call);
675 start_wakeup_tracer(tr);
681 static int wakeup_tracer_init(struct trace_array *tr)
688 return __wakeup_tracer_init(tr);
691 static int wakeup_rt_tracer_init(struct trace_array *tr)
698 return __wakeup_tracer_init(tr);
701 static int wakeup_dl_tracer_init(struct trace_array *tr)
708 return __wakeup_tracer_init(tr);
711 static void wakeup_tracer_reset(struct trace_array *tr)
713 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
714 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
716 stop_wakeup_tracer(tr);
717 /* make sure we put back any tasks we are tracing */
720 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
721 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
722 ftrace_reset_array_ops(tr);
726 static void wakeup_tracer_start(struct trace_array *tr)
732 static void wakeup_tracer_stop(struct trace_array *tr)
737 static struct tracer wakeup_tracer __read_mostly =
740 .init = wakeup_tracer_init,
741 .reset = wakeup_tracer_reset,
742 .start = wakeup_tracer_start,
743 .stop = wakeup_tracer_stop,
745 .print_header = wakeup_print_header,
746 .print_line = wakeup_print_line,
747 .flag_changed = wakeup_flag_changed,
748 #ifdef CONFIG_FTRACE_SELFTEST
749 .selftest = trace_selftest_startup_wakeup,
751 .open = wakeup_trace_open,
752 .close = wakeup_trace_close,
753 .allow_instances = true,
757 static struct tracer wakeup_rt_tracer __read_mostly =
760 .init = wakeup_rt_tracer_init,
761 .reset = wakeup_tracer_reset,
762 .start = wakeup_tracer_start,
763 .stop = wakeup_tracer_stop,
765 .print_header = wakeup_print_header,
766 .print_line = wakeup_print_line,
767 .flag_changed = wakeup_flag_changed,
768 #ifdef CONFIG_FTRACE_SELFTEST
769 .selftest = trace_selftest_startup_wakeup,
771 .open = wakeup_trace_open,
772 .close = wakeup_trace_close,
773 .allow_instances = true,
777 static struct tracer wakeup_dl_tracer __read_mostly =
780 .init = wakeup_dl_tracer_init,
781 .reset = wakeup_tracer_reset,
782 .start = wakeup_tracer_start,
783 .stop = wakeup_tracer_stop,
785 .print_header = wakeup_print_header,
786 .print_line = wakeup_print_line,
787 .flag_changed = wakeup_flag_changed,
788 #ifdef CONFIG_FTRACE_SELFTEST
789 .selftest = trace_selftest_startup_wakeup,
791 .open = wakeup_trace_open,
792 .close = wakeup_trace_close,
796 __init static int init_wakeup_tracer(void)
800 ret = register_tracer(&wakeup_tracer);
804 ret = register_tracer(&wakeup_rt_tracer);
808 ret = register_tracer(&wakeup_dl_tracer);
814 core_initcall(init_wakeup_tracer);