]>
Commit | Line | Data |
---|---|---|
35e8e302 SR |
1 | /* |
2 | * trace context switch | |
3 | * | |
4 | * Copyright (C) 2007 Steven Rostedt <[email protected]> | |
5 | * | |
6 | */ | |
7 | #include <linux/module.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/debugfs.h> | |
10 | #include <linux/kallsyms.h> | |
11 | #include <linux/uaccess.h> | |
12 | #include <linux/marker.h> | |
13 | #include <linux/ftrace.h> | |
14 | ||
15 | #include "trace.h" | |
16 | ||
17 | static struct trace_array *ctx_trace; | |
18 | static int __read_mostly tracer_enabled; | |
19 | ||
e309b41d | 20 | static void |
35e8e302 SR |
21 | ctx_switch_func(struct task_struct *prev, struct task_struct *next) |
22 | { | |
23 | struct trace_array *tr = ctx_trace; | |
24 | struct trace_array_cpu *data; | |
25 | unsigned long flags; | |
26 | long disabled; | |
27 | int cpu; | |
28 | ||
29 | if (!tracer_enabled) | |
30 | return; | |
31 | ||
18cef379 | 32 | local_irq_save(flags); |
35e8e302 SR |
33 | cpu = raw_smp_processor_id(); |
34 | data = tr->data[cpu]; | |
35 | disabled = atomic_inc_return(&data->disabled); | |
36 | ||
37 | if (likely(disabled == 1)) | |
38 | tracing_sched_switch_trace(tr, data, prev, next, flags); | |
39 | ||
40 | atomic_dec(&data->disabled); | |
18cef379 | 41 | local_irq_restore(flags); |
35e8e302 SR |
42 | } |
43 | ||
44 | void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next) | |
45 | { | |
46 | tracing_record_cmdline(prev); | |
47 | ||
48 | /* | |
49 | * If tracer_switch_func only points to the local | |
50 | * switch func, it still needs the ptr passed to it. | |
51 | */ | |
52 | ctx_switch_func(prev, next); | |
53 | ||
54 | /* | |
55 | * Chain to the wakeup tracer (this is a NOP if disabled): | |
56 | */ | |
57 | wakeup_sched_switch(prev, next); | |
58 | } | |
59 | ||
e309b41d | 60 | static void sched_switch_reset(struct trace_array *tr) |
35e8e302 SR |
61 | { |
62 | int cpu; | |
63 | ||
750ed1a4 | 64 | tr->time_start = ftrace_now(tr->cpu); |
35e8e302 SR |
65 | |
66 | for_each_online_cpu(cpu) | |
67 | tracing_reset(tr->data[cpu]); | |
68 | } | |
69 | ||
e309b41d | 70 | static void start_sched_trace(struct trace_array *tr) |
35e8e302 SR |
71 | { |
72 | sched_switch_reset(tr); | |
73 | tracer_enabled = 1; | |
74 | } | |
75 | ||
e309b41d | 76 | static void stop_sched_trace(struct trace_array *tr) |
35e8e302 SR |
77 | { |
78 | tracer_enabled = 0; | |
79 | } | |
80 | ||
e309b41d | 81 | static void sched_switch_trace_init(struct trace_array *tr) |
35e8e302 SR |
82 | { |
83 | ctx_trace = tr; | |
84 | ||
85 | if (tr->ctrl) | |
86 | start_sched_trace(tr); | |
87 | } | |
88 | ||
e309b41d | 89 | static void sched_switch_trace_reset(struct trace_array *tr) |
35e8e302 SR |
90 | { |
91 | if (tr->ctrl) | |
92 | stop_sched_trace(tr); | |
93 | } | |
94 | ||
95 | static void sched_switch_trace_ctrl_update(struct trace_array *tr) | |
96 | { | |
97 | /* When starting a new trace, reset the buffers */ | |
98 | if (tr->ctrl) | |
99 | start_sched_trace(tr); | |
100 | else | |
101 | stop_sched_trace(tr); | |
102 | } | |
103 | ||
104 | static struct tracer sched_switch_trace __read_mostly = | |
105 | { | |
106 | .name = "sched_switch", | |
107 | .init = sched_switch_trace_init, | |
108 | .reset = sched_switch_trace_reset, | |
109 | .ctrl_update = sched_switch_trace_ctrl_update, | |
60a11774 SR |
110 | #ifdef CONFIG_FTRACE_SELFTEST |
111 | .selftest = trace_selftest_startup_sched_switch, | |
112 | #endif | |
35e8e302 SR |
113 | }; |
114 | ||
115 | __init static int init_sched_switch_trace(void) | |
116 | { | |
117 | return register_tracer(&sched_switch_trace); | |
118 | } | |
119 | device_initcall(init_sched_switch_trace); |