]>
Commit | Line | Data |
---|---|---|
3e51f33f PZ |
1 | /* |
2 | * sched_clock for unstable cpu clocks | |
3 | * | |
4 | * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <[email protected]> | |
5 | * | |
c300ba25 SR |
6 | * Updates and enhancements: |
7 | * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <[email protected]> | |
8 | * | |
3e51f33f PZ |
9 | * Based on code by: |
10 | * Ingo Molnar <[email protected]> | |
11 | * Guillaume Chazarain <[email protected]> | |
12 | * | |
13 | * Create a semi stable clock from a mixture of other events, including: | |
14 | * - gtod | |
3e51f33f PZ |
15 | * - sched_clock() |
16 | * - explicit idle events | |
17 | * | |
18 | * We use gtod as base and the unstable clock deltas. The deltas are filtered, | |
354879bb | 19 | * making it monotonic and keeping it within an expected window. |
3e51f33f PZ |
20 | * |
21 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time | |
22 | * that is otherwise invisible (TSC gets stopped). | |
23 | * | |
24 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat | |
354879bb | 25 | * consistent between cpus (never more than 2 jiffies difference). |
3e51f33f | 26 | */ |
3e51f33f | 27 | #include <linux/spinlock.h> |
6409c4da | 28 | #include <linux/hardirq.h> |
3e51f33f | 29 | #include <linux/module.h> |
b342501c IM |
30 | #include <linux/percpu.h> |
31 | #include <linux/ktime.h> | |
32 | #include <linux/sched.h> | |
3e51f33f | 33 | |
2c3d103b HD |
34 | /* |
35 | * Scheduler clock - returns current time in nanosec units. | |
36 | * This is default implementation. | |
37 | * Architectures and sub-architectures can override this. | |
38 | */ | |
39 | unsigned long long __attribute__((weak)) sched_clock(void) | |
40 | { | |
92d23f70 R |
41 | return (unsigned long long)(jiffies - INITIAL_JIFFIES) |
42 | * (NSEC_PER_SEC / HZ); | |
2c3d103b | 43 | } |
b6ac23af | 44 | EXPORT_SYMBOL_GPL(sched_clock); |
3e51f33f | 45 | |
c1955a3d PZ |
46 | static __read_mostly int sched_clock_running; |
47 | ||
3e51f33f | 48 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
b342501c | 49 | __read_mostly int sched_clock_stable; |
3e51f33f PZ |
50 | |
51 | struct sched_clock_data { | |
3e51f33f PZ |
52 | u64 tick_raw; |
53 | u64 tick_gtod; | |
54 | u64 clock; | |
55 | }; | |
56 | ||
57 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); | |
58 | ||
59 | static inline struct sched_clock_data *this_scd(void) | |
60 | { | |
61 | return &__get_cpu_var(sched_clock_data); | |
62 | } | |
63 | ||
64 | static inline struct sched_clock_data *cpu_sdc(int cpu) | |
65 | { | |
66 | return &per_cpu(sched_clock_data, cpu); | |
67 | } | |
68 | ||
69 | void sched_clock_init(void) | |
70 | { | |
71 | u64 ktime_now = ktime_to_ns(ktime_get()); | |
3e51f33f PZ |
72 | int cpu; |
73 | ||
74 | for_each_possible_cpu(cpu) { | |
75 | struct sched_clock_data *scd = cpu_sdc(cpu); | |
76 | ||
a381759d | 77 | scd->tick_raw = 0; |
3e51f33f PZ |
78 | scd->tick_gtod = ktime_now; |
79 | scd->clock = ktime_now; | |
80 | } | |
a381759d PZ |
81 | |
82 | sched_clock_running = 1; | |
3e51f33f PZ |
83 | } |
84 | ||
354879bb | 85 | /* |
b342501c | 86 | * min, max except they take wrapping into account |
354879bb PZ |
87 | */ |
88 | ||
89 | static inline u64 wrap_min(u64 x, u64 y) | |
90 | { | |
91 | return (s64)(x - y) < 0 ? x : y; | |
92 | } | |
93 | ||
94 | static inline u64 wrap_max(u64 x, u64 y) | |
95 | { | |
96 | return (s64)(x - y) > 0 ? x : y; | |
97 | } | |
98 | ||
3e51f33f PZ |
99 | /* |
100 | * update the percpu scd from the raw @now value | |
101 | * | |
102 | * - filter out backward motion | |
354879bb | 103 | * - use the GTOD tick value to create a window to filter crazy TSC values |
3e51f33f | 104 | */ |
def0a9b2 | 105 | static u64 sched_clock_local(struct sched_clock_data *scd) |
3e51f33f | 106 | { |
def0a9b2 PZ |
107 | u64 now, clock, old_clock, min_clock, max_clock; |
108 | s64 delta; | |
3e51f33f | 109 | |
def0a9b2 PZ |
110 | again: |
111 | now = sched_clock(); | |
112 | delta = now - scd->tick_raw; | |
354879bb PZ |
113 | if (unlikely(delta < 0)) |
114 | delta = 0; | |
3e51f33f | 115 | |
def0a9b2 PZ |
116 | old_clock = scd->clock; |
117 | ||
354879bb PZ |
118 | /* |
119 | * scd->clock = clamp(scd->tick_gtod + delta, | |
b342501c IM |
120 | * max(scd->tick_gtod, scd->clock), |
121 | * scd->tick_gtod + TICK_NSEC); | |
354879bb | 122 | */ |
3e51f33f | 123 | |
354879bb | 124 | clock = scd->tick_gtod + delta; |
def0a9b2 PZ |
125 | min_clock = wrap_max(scd->tick_gtod, old_clock); |
126 | max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); | |
3e51f33f | 127 | |
354879bb PZ |
128 | clock = wrap_max(clock, min_clock); |
129 | clock = wrap_min(clock, max_clock); | |
3e51f33f | 130 | |
152f9d07 | 131 | if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock) |
def0a9b2 | 132 | goto again; |
56b90612 | 133 | |
def0a9b2 | 134 | return clock; |
3e51f33f PZ |
135 | } |
136 | ||
def0a9b2 | 137 | static u64 sched_clock_remote(struct sched_clock_data *scd) |
3e51f33f | 138 | { |
def0a9b2 PZ |
139 | struct sched_clock_data *my_scd = this_scd(); |
140 | u64 this_clock, remote_clock; | |
141 | u64 *ptr, old_val, val; | |
142 | ||
143 | sched_clock_local(my_scd); | |
144 | again: | |
145 | this_clock = my_scd->clock; | |
146 | remote_clock = scd->clock; | |
147 | ||
148 | /* | |
149 | * Use the opportunity that we have both locks | |
150 | * taken to couple the two clocks: we take the | |
151 | * larger time as the latest time for both | |
152 | * runqueues. (this creates monotonic movement) | |
153 | */ | |
154 | if (likely((s64)(remote_clock - this_clock) < 0)) { | |
155 | ptr = &scd->clock; | |
156 | old_val = remote_clock; | |
157 | val = this_clock; | |
3e51f33f | 158 | } else { |
def0a9b2 PZ |
159 | /* |
160 | * Should be rare, but possible: | |
161 | */ | |
162 | ptr = &my_scd->clock; | |
163 | old_val = this_clock; | |
164 | val = remote_clock; | |
3e51f33f | 165 | } |
def0a9b2 | 166 | |
152f9d07 | 167 | if (cmpxchg64(ptr, old_val, val) != old_val) |
def0a9b2 PZ |
168 | goto again; |
169 | ||
170 | return val; | |
3e51f33f PZ |
171 | } |
172 | ||
173 | u64 sched_clock_cpu(int cpu) | |
174 | { | |
b342501c | 175 | struct sched_clock_data *scd; |
def0a9b2 PZ |
176 | u64 clock; |
177 | ||
178 | WARN_ON_ONCE(!irqs_disabled()); | |
3e51f33f | 179 | |
b342501c IM |
180 | if (sched_clock_stable) |
181 | return sched_clock(); | |
a381759d | 182 | |
a381759d PZ |
183 | if (unlikely(!sched_clock_running)) |
184 | return 0ull; | |
185 | ||
def0a9b2 | 186 | scd = cpu_sdc(cpu); |
3e51f33f | 187 | |
def0a9b2 PZ |
188 | if (cpu != smp_processor_id()) |
189 | clock = sched_clock_remote(scd); | |
190 | else | |
191 | clock = sched_clock_local(scd); | |
e4e4e534 | 192 | |
3e51f33f PZ |
193 | return clock; |
194 | } | |
195 | ||
196 | void sched_clock_tick(void) | |
197 | { | |
8325d9c0 | 198 | struct sched_clock_data *scd; |
3e51f33f PZ |
199 | u64 now, now_gtod; |
200 | ||
8325d9c0 PZ |
201 | if (sched_clock_stable) |
202 | return; | |
203 | ||
a381759d PZ |
204 | if (unlikely(!sched_clock_running)) |
205 | return; | |
206 | ||
3e51f33f PZ |
207 | WARN_ON_ONCE(!irqs_disabled()); |
208 | ||
8325d9c0 | 209 | scd = this_scd(); |
3e51f33f | 210 | now_gtod = ktime_to_ns(ktime_get()); |
a83bc47c | 211 | now = sched_clock(); |
3e51f33f | 212 | |
3e51f33f PZ |
213 | scd->tick_raw = now; |
214 | scd->tick_gtod = now_gtod; | |
def0a9b2 | 215 | sched_clock_local(scd); |
3e51f33f PZ |
216 | } |
217 | ||
218 | /* | |
219 | * We are going deep-idle (irqs are disabled): | |
220 | */ | |
221 | void sched_clock_idle_sleep_event(void) | |
222 | { | |
223 | sched_clock_cpu(smp_processor_id()); | |
224 | } | |
225 | EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); | |
226 | ||
227 | /* | |
228 | * We just idled delta nanoseconds (called with irqs disabled): | |
229 | */ | |
230 | void sched_clock_idle_wakeup_event(u64 delta_ns) | |
231 | { | |
1c5745aa TG |
232 | if (timekeeping_suspended) |
233 | return; | |
234 | ||
354879bb | 235 | sched_clock_tick(); |
3e51f33f PZ |
236 | touch_softlockup_watchdog(); |
237 | } | |
238 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | |
239 | ||
b9f8fcd5 DM |
240 | unsigned long long cpu_clock(int cpu) |
241 | { | |
242 | unsigned long long clock; | |
243 | unsigned long flags; | |
244 | ||
245 | local_irq_save(flags); | |
246 | clock = sched_clock_cpu(cpu); | |
247 | local_irq_restore(flags); | |
248 | ||
249 | return clock; | |
250 | } | |
251 | ||
8325d9c0 PZ |
252 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
253 | ||
254 | void sched_clock_init(void) | |
255 | { | |
256 | sched_clock_running = 1; | |
257 | } | |
258 | ||
259 | u64 sched_clock_cpu(int cpu) | |
260 | { | |
261 | if (unlikely(!sched_clock_running)) | |
262 | return 0; | |
263 | ||
264 | return sched_clock(); | |
265 | } | |
266 | ||
3e51f33f | 267 | |
76a2a6ee PZ |
268 | unsigned long long cpu_clock(int cpu) |
269 | { | |
b9f8fcd5 DM |
270 | return sched_clock_cpu(cpu); |
271 | } | |
76a2a6ee | 272 | |
b9f8fcd5 | 273 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
76a2a6ee | 274 | |
4c9fe8ad | 275 | EXPORT_SYMBOL_GPL(cpu_clock); |