]>
Commit | Line | Data |
---|---|---|
734efb46 JS |
1 | /* |
2 | * linux/kernel/time/clocksource.c | |
3 | * | |
4 | * This file contains the functions which manage clocksource drivers. | |
5 | * | |
6 | * Copyright (C) 2004, 2005 IBM, John Stultz ([email protected]) | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | * | |
22 | * TODO WishList: | |
23 | * o Allow clocksource drivers to be unregistered | |
734efb46 JS |
24 | */ |
25 | ||
45bbfe64 JP |
26 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
27 | ||
d369a5d8 | 28 | #include <linux/device.h> |
734efb46 | 29 | #include <linux/clocksource.h> |
734efb46 JS |
30 | #include <linux/init.h> |
31 | #include <linux/module.h> | |
dc29a365 | 32 | #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ |
79bf2bb3 | 33 | #include <linux/tick.h> |
01548f4d | 34 | #include <linux/kthread.h> |
734efb46 | 35 | |
c1797baf | 36 | #include "tick-internal.h" |
3a978377 | 37 | #include "timekeeping_internal.h" |
03e13cf5 | 38 | |
7d2f944a TG |
39 | /** |
40 | * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks | |
41 | * @mult: pointer to mult variable | |
42 | * @shift: pointer to shift variable | |
43 | * @from: frequency to convert from | |
44 | * @to: frequency to convert to | |
5fdade95 | 45 | * @maxsec: guaranteed runtime conversion range in seconds |
7d2f944a TG |
46 | * |
47 | * The function evaluates the shift/mult pair for the scaled math | |
48 | * operations of clocksources and clockevents. | |
49 | * | |
50 | * @to and @from are frequency values in HZ. For clock sources @to is | |
51 | * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock | |
52 | * event @to is the counter frequency and @from is NSEC_PER_SEC. | |
53 | * | |
5fdade95 | 54 | * The @maxsec conversion range argument controls the time frame in |
7d2f944a TG |
55 | * seconds which must be covered by the runtime conversion with the |
56 | * calculated mult and shift factors. This guarantees that no 64bit | |
57 | * overflow happens when the input value of the conversion is | |
58 | * multiplied with the calculated mult factor. Larger ranges may | |
59 | * reduce the conversion accuracy by chosing smaller mult and shift | |
60 | * factors. | |
61 | */ | |
62 | void | |
5fdade95 | 63 | clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) |
7d2f944a TG |
64 | { |
65 | u64 tmp; | |
66 | u32 sft, sftacc= 32; | |
67 | ||
68 | /* | |
69 | * Calculate the shift factor which is limiting the conversion | |
70 | * range: | |
71 | */ | |
5fdade95 | 72 | tmp = ((u64)maxsec * from) >> 32; |
7d2f944a TG |
73 | while (tmp) { |
74 | tmp >>=1; | |
75 | sftacc--; | |
76 | } | |
77 | ||
78 | /* | |
79 | * Find the conversion shift/mult pair which has the best | |
80 | * accuracy and fits the maxsec conversion range: | |
81 | */ | |
82 | for (sft = 32; sft > 0; sft--) { | |
83 | tmp = (u64) to << sft; | |
b5776c4a | 84 | tmp += from / 2; |
7d2f944a TG |
85 | do_div(tmp, from); |
86 | if ((tmp >> sftacc) == 0) | |
87 | break; | |
88 | } | |
89 | *mult = tmp; | |
90 | *shift = sft; | |
91 | } | |
5304121a | 92 | EXPORT_SYMBOL_GPL(clocks_calc_mult_shift); |
7d2f944a | 93 | |
734efb46 JS |
94 | /*[Clocksource internal variables]--------- |
95 | * curr_clocksource: | |
f1b82746 | 96 | * currently selected clocksource. |
734efb46 JS |
97 | * clocksource_list: |
98 | * linked list with the registered clocksources | |
75c5158f MS |
99 | * clocksource_mutex: |
100 | * protects manipulations to curr_clocksource and the clocksource_list | |
734efb46 JS |
101 | * override_name: |
102 | * Name of the user-specified clocksource. | |
103 | */ | |
f1b82746 | 104 | static struct clocksource *curr_clocksource; |
734efb46 | 105 | static LIST_HEAD(clocksource_list); |
75c5158f | 106 | static DEFINE_MUTEX(clocksource_mutex); |
29b54078 | 107 | static char override_name[CS_NAME_LEN]; |
54a6bc0b | 108 | static int finished_booting; |
734efb46 | 109 | |
5d8b34fd | 110 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
f79e0258 | 111 | static void clocksource_watchdog_work(struct work_struct *work); |
332962f2 | 112 | static void clocksource_select(void); |
f79e0258 | 113 | |
5d8b34fd TG |
114 | static LIST_HEAD(watchdog_list); |
115 | static struct clocksource *watchdog; | |
116 | static struct timer_list watchdog_timer; | |
f79e0258 | 117 | static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); |
5d8b34fd | 118 | static DEFINE_SPINLOCK(watchdog_lock); |
fb63a0eb | 119 | static int watchdog_running; |
9fb60336 | 120 | static atomic_t watchdog_reset_pending; |
b52f52a0 | 121 | |
01548f4d | 122 | static int clocksource_watchdog_kthread(void *data); |
d0981a1b | 123 | static void __clocksource_change_rating(struct clocksource *cs, int rating); |
c55c87c8 | 124 | |
5d8b34fd | 125 | /* |
35c35d1a | 126 | * Interval: 0.5sec Threshold: 0.0625s |
5d8b34fd TG |
127 | */ |
128 | #define WATCHDOG_INTERVAL (HZ >> 1) | |
35c35d1a | 129 | #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) |
5d8b34fd | 130 | |
01548f4d MS |
131 | static void clocksource_watchdog_work(struct work_struct *work) |
132 | { | |
133 | /* | |
134 | * If kthread_run fails the next watchdog scan over the | |
135 | * watchdog_list will find the unstable clock again. | |
136 | */ | |
137 | kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); | |
138 | } | |
139 | ||
7285dd7f | 140 | static void __clocksource_unstable(struct clocksource *cs) |
5d8b34fd | 141 | { |
5d8b34fd | 142 | cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); |
c55c87c8 | 143 | cs->flags |= CLOCK_SOURCE_UNSTABLE; |
12907fbb TG |
144 | |
145 | if (cs->mark_unstable) | |
146 | cs->mark_unstable(cs); | |
147 | ||
54a6bc0b TG |
148 | if (finished_booting) |
149 | schedule_work(&watchdog_work); | |
5d8b34fd TG |
150 | } |
151 | ||
7285dd7f TG |
152 | /** |
153 | * clocksource_mark_unstable - mark clocksource unstable via watchdog | |
154 | * @cs: clocksource to be marked unstable | |
155 | * | |
156 | * This function is called instead of clocksource_change_rating from | |
157 | * cpu hotplug code to avoid a deadlock between the clocksource mutex | |
158 | * and the cpu hotplug mutex. It defers the update of the clocksource | |
159 | * to the watchdog thread. | |
160 | */ | |
161 | void clocksource_mark_unstable(struct clocksource *cs) | |
162 | { | |
163 | unsigned long flags; | |
164 | ||
165 | spin_lock_irqsave(&watchdog_lock, flags); | |
166 | if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { | |
167 | if (list_empty(&cs->wd_list)) | |
168 | list_add(&cs->wd_list, &watchdog_list); | |
169 | __clocksource_unstable(cs); | |
170 | } | |
171 | spin_unlock_irqrestore(&watchdog_lock, flags); | |
172 | } | |
173 | ||
5d8b34fd TG |
174 | static void clocksource_watchdog(unsigned long data) |
175 | { | |
c55c87c8 | 176 | struct clocksource *cs; |
a5a1d1c2 | 177 | u64 csnow, wdnow, cslast, wdlast, delta; |
5d8b34fd | 178 | int64_t wd_nsec, cs_nsec; |
9fb60336 | 179 | int next_cpu, reset_pending; |
5d8b34fd TG |
180 | |
181 | spin_lock(&watchdog_lock); | |
fb63a0eb MS |
182 | if (!watchdog_running) |
183 | goto out; | |
5d8b34fd | 184 | |
9fb60336 TG |
185 | reset_pending = atomic_read(&watchdog_reset_pending); |
186 | ||
c55c87c8 MS |
187 | list_for_each_entry(cs, &watchdog_list, wd_list) { |
188 | ||
189 | /* Clocksource already marked unstable? */ | |
01548f4d | 190 | if (cs->flags & CLOCK_SOURCE_UNSTABLE) { |
54a6bc0b TG |
191 | if (finished_booting) |
192 | schedule_work(&watchdog_work); | |
c55c87c8 | 193 | continue; |
01548f4d | 194 | } |
c55c87c8 | 195 | |
b5199515 | 196 | local_irq_disable(); |
8e19608e | 197 | csnow = cs->read(cs); |
b5199515 TG |
198 | wdnow = watchdog->read(watchdog); |
199 | local_irq_enable(); | |
b52f52a0 | 200 | |
8cf4e750 | 201 | /* Clocksource initialized ? */ |
9fb60336 TG |
202 | if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || |
203 | atomic_read(&watchdog_reset_pending)) { | |
8cf4e750 | 204 | cs->flags |= CLOCK_SOURCE_WATCHDOG; |
b5199515 TG |
205 | cs->wd_last = wdnow; |
206 | cs->cs_last = csnow; | |
b52f52a0 TG |
207 | continue; |
208 | } | |
209 | ||
3a978377 TG |
210 | delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); |
211 | wd_nsec = clocksource_cyc2ns(delta, watchdog->mult, | |
212 | watchdog->shift); | |
b5199515 | 213 | |
3a978377 TG |
214 | delta = clocksource_delta(csnow, cs->cs_last, cs->mask); |
215 | cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); | |
0b046b21 JS |
216 | wdlast = cs->wd_last; /* save these in case we print them */ |
217 | cslast = cs->cs_last; | |
b5199515 TG |
218 | cs->cs_last = csnow; |
219 | cs->wd_last = wdnow; | |
220 | ||
9fb60336 TG |
221 | if (atomic_read(&watchdog_reset_pending)) |
222 | continue; | |
223 | ||
b5199515 | 224 | /* Check the deviation from the watchdog clocksource. */ |
79211c8e | 225 | if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { |
390dd67c SI |
226 | pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n", |
227 | smp_processor_id(), cs->name); | |
45bbfe64 | 228 | pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n", |
0b046b21 | 229 | watchdog->name, wdnow, wdlast, watchdog->mask); |
45bbfe64 | 230 | pr_warn(" '%s' cs_now: %llx cs_last: %llx mask: %llx\n", |
0b046b21 JS |
231 | cs->name, csnow, cslast, cs->mask); |
232 | __clocksource_unstable(cs); | |
8cf4e750 MS |
233 | continue; |
234 | } | |
235 | ||
b421b22b PZ |
236 | if (cs == curr_clocksource && cs->tick_stable) |
237 | cs->tick_stable(cs); | |
238 | ||
8cf4e750 MS |
239 | if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && |
240 | (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && | |
241 | (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { | |
332962f2 | 242 | /* Mark it valid for high-res. */ |
8cf4e750 | 243 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
332962f2 TG |
244 | |
245 | /* | |
246 | * clocksource_done_booting() will sort it if | |
247 | * finished_booting is not set yet. | |
248 | */ | |
249 | if (!finished_booting) | |
250 | continue; | |
251 | ||
8cf4e750 | 252 | /* |
332962f2 TG |
253 | * If this is not the current clocksource let |
254 | * the watchdog thread reselect it. Due to the | |
255 | * change to high res this clocksource might | |
256 | * be preferred now. If it is the current | |
257 | * clocksource let the tick code know about | |
258 | * that change. | |
8cf4e750 | 259 | */ |
332962f2 TG |
260 | if (cs != curr_clocksource) { |
261 | cs->flags |= CLOCK_SOURCE_RESELECT; | |
262 | schedule_work(&watchdog_work); | |
263 | } else { | |
264 | tick_clock_notify(); | |
265 | } | |
5d8b34fd TG |
266 | } |
267 | } | |
268 | ||
9fb60336 TG |
269 | /* |
270 | * We only clear the watchdog_reset_pending, when we did a | |
271 | * full cycle through all clocksources. | |
272 | */ | |
273 | if (reset_pending) | |
274 | atomic_dec(&watchdog_reset_pending); | |
275 | ||
c55c87c8 MS |
276 | /* |
277 | * Cycle through CPUs to check if the CPUs stay synchronized | |
278 | * to each other. | |
279 | */ | |
280 | next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); | |
281 | if (next_cpu >= nr_cpu_ids) | |
282 | next_cpu = cpumask_first(cpu_online_mask); | |
283 | watchdog_timer.expires += WATCHDOG_INTERVAL; | |
284 | add_timer_on(&watchdog_timer, next_cpu); | |
fb63a0eb | 285 | out: |
5d8b34fd TG |
286 | spin_unlock(&watchdog_lock); |
287 | } | |
0f8e8ef7 | 288 | |
fb63a0eb MS |
289 | static inline void clocksource_start_watchdog(void) |
290 | { | |
291 | if (watchdog_running || !watchdog || list_empty(&watchdog_list)) | |
292 | return; | |
293 | init_timer(&watchdog_timer); | |
294 | watchdog_timer.function = clocksource_watchdog; | |
fb63a0eb MS |
295 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; |
296 | add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); | |
297 | watchdog_running = 1; | |
298 | } | |
299 | ||
300 | static inline void clocksource_stop_watchdog(void) | |
301 | { | |
302 | if (!watchdog_running || (watchdog && !list_empty(&watchdog_list))) | |
303 | return; | |
304 | del_timer(&watchdog_timer); | |
305 | watchdog_running = 0; | |
306 | } | |
307 | ||
0f8e8ef7 MS |
308 | static inline void clocksource_reset_watchdog(void) |
309 | { | |
310 | struct clocksource *cs; | |
311 | ||
312 | list_for_each_entry(cs, &watchdog_list, wd_list) | |
313 | cs->flags &= ~CLOCK_SOURCE_WATCHDOG; | |
314 | } | |
315 | ||
b52f52a0 TG |
316 | static void clocksource_resume_watchdog(void) |
317 | { | |
9fb60336 | 318 | atomic_inc(&watchdog_reset_pending); |
b52f52a0 TG |
319 | } |
320 | ||
fb63a0eb | 321 | static void clocksource_enqueue_watchdog(struct clocksource *cs) |
5d8b34fd | 322 | { |
5d8b34fd TG |
323 | unsigned long flags; |
324 | ||
325 | spin_lock_irqsave(&watchdog_lock, flags); | |
326 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { | |
fb63a0eb | 327 | /* cs is a clocksource to be watched. */ |
5d8b34fd | 328 | list_add(&cs->wd_list, &watchdog_list); |
fb63a0eb | 329 | cs->flags &= ~CLOCK_SOURCE_WATCHDOG; |
948ac6d7 | 330 | } else { |
fb63a0eb | 331 | /* cs is a watchdog. */ |
948ac6d7 | 332 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) |
5d8b34fd | 333 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
bbf66d89 VK |
334 | } |
335 | spin_unlock_irqrestore(&watchdog_lock, flags); | |
336 | } | |
337 | ||
338 | static void clocksource_select_watchdog(bool fallback) | |
339 | { | |
340 | struct clocksource *cs, *old_wd; | |
341 | unsigned long flags; | |
342 | ||
343 | spin_lock_irqsave(&watchdog_lock, flags); | |
344 | /* save current watchdog */ | |
345 | old_wd = watchdog; | |
346 | if (fallback) | |
347 | watchdog = NULL; | |
348 | ||
349 | list_for_each_entry(cs, &clocksource_list, list) { | |
350 | /* cs is a clocksource to be watched. */ | |
351 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) | |
352 | continue; | |
353 | ||
354 | /* Skip current if we were requested for a fallback. */ | |
355 | if (fallback && cs == old_wd) | |
356 | continue; | |
357 | ||
fb63a0eb | 358 | /* Pick the best watchdog. */ |
bbf66d89 | 359 | if (!watchdog || cs->rating > watchdog->rating) |
5d8b34fd | 360 | watchdog = cs; |
5d8b34fd | 361 | } |
bbf66d89 VK |
362 | /* If we failed to find a fallback restore the old one. */ |
363 | if (!watchdog) | |
364 | watchdog = old_wd; | |
365 | ||
366 | /* If we changed the watchdog we need to reset cycles. */ | |
367 | if (watchdog != old_wd) | |
368 | clocksource_reset_watchdog(); | |
369 | ||
fb63a0eb MS |
370 | /* Check if the watchdog timer needs to be started. */ |
371 | clocksource_start_watchdog(); | |
5d8b34fd TG |
372 | spin_unlock_irqrestore(&watchdog_lock, flags); |
373 | } | |
fb63a0eb MS |
374 | |
375 | static void clocksource_dequeue_watchdog(struct clocksource *cs) | |
376 | { | |
fb63a0eb MS |
377 | unsigned long flags; |
378 | ||
379 | spin_lock_irqsave(&watchdog_lock, flags); | |
a89c7edb TG |
380 | if (cs != watchdog) { |
381 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { | |
382 | /* cs is a watched clocksource. */ | |
383 | list_del_init(&cs->wd_list); | |
384 | /* Check if the watchdog timer needs to be stopped. */ | |
385 | clocksource_stop_watchdog(); | |
fb63a0eb MS |
386 | } |
387 | } | |
fb63a0eb MS |
388 | spin_unlock_irqrestore(&watchdog_lock, flags); |
389 | } | |
390 | ||
332962f2 | 391 | static int __clocksource_watchdog_kthread(void) |
c55c87c8 MS |
392 | { |
393 | struct clocksource *cs, *tmp; | |
394 | unsigned long flags; | |
6ea41d25 | 395 | LIST_HEAD(unstable); |
332962f2 | 396 | int select = 0; |
c55c87c8 MS |
397 | |
398 | spin_lock_irqsave(&watchdog_lock, flags); | |
332962f2 | 399 | list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { |
c55c87c8 MS |
400 | if (cs->flags & CLOCK_SOURCE_UNSTABLE) { |
401 | list_del_init(&cs->wd_list); | |
6ea41d25 | 402 | list_add(&cs->wd_list, &unstable); |
332962f2 TG |
403 | select = 1; |
404 | } | |
405 | if (cs->flags & CLOCK_SOURCE_RESELECT) { | |
406 | cs->flags &= ~CLOCK_SOURCE_RESELECT; | |
407 | select = 1; | |
c55c87c8 | 408 | } |
332962f2 | 409 | } |
c55c87c8 MS |
410 | /* Check if the watchdog timer needs to be stopped. */ |
411 | clocksource_stop_watchdog(); | |
6ea41d25 TG |
412 | spin_unlock_irqrestore(&watchdog_lock, flags); |
413 | ||
414 | /* Needs to be done outside of watchdog lock */ | |
415 | list_for_each_entry_safe(cs, tmp, &unstable, wd_list) { | |
416 | list_del_init(&cs->wd_list); | |
d0981a1b | 417 | __clocksource_change_rating(cs, 0); |
6ea41d25 | 418 | } |
332962f2 TG |
419 | return select; |
420 | } | |
421 | ||
422 | static int clocksource_watchdog_kthread(void *data) | |
423 | { | |
424 | mutex_lock(&clocksource_mutex); | |
425 | if (__clocksource_watchdog_kthread()) | |
426 | clocksource_select(); | |
d0981a1b | 427 | mutex_unlock(&clocksource_mutex); |
01548f4d | 428 | return 0; |
c55c87c8 MS |
429 | } |
430 | ||
7eaeb343 TG |
431 | static bool clocksource_is_watchdog(struct clocksource *cs) |
432 | { | |
433 | return cs == watchdog; | |
434 | } | |
435 | ||
fb63a0eb MS |
436 | #else /* CONFIG_CLOCKSOURCE_WATCHDOG */ |
437 | ||
438 | static void clocksource_enqueue_watchdog(struct clocksource *cs) | |
5d8b34fd TG |
439 | { |
440 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) | |
441 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; | |
442 | } | |
b52f52a0 | 443 | |
bbf66d89 | 444 | static void clocksource_select_watchdog(bool fallback) { } |
fb63a0eb | 445 | static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } |
b52f52a0 | 446 | static inline void clocksource_resume_watchdog(void) { } |
332962f2 | 447 | static inline int __clocksource_watchdog_kthread(void) { return 0; } |
7eaeb343 | 448 | static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } |
397bbf6d | 449 | void clocksource_mark_unstable(struct clocksource *cs) { } |
fb63a0eb MS |
450 | |
451 | #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ | |
5d8b34fd | 452 | |
c54a42b1 MD |
453 | /** |
454 | * clocksource_suspend - suspend the clocksource(s) | |
455 | */ | |
456 | void clocksource_suspend(void) | |
457 | { | |
458 | struct clocksource *cs; | |
459 | ||
460 | list_for_each_entry_reverse(cs, &clocksource_list, list) | |
461 | if (cs->suspend) | |
462 | cs->suspend(cs); | |
463 | } | |
464 | ||
b52f52a0 TG |
465 | /** |
466 | * clocksource_resume - resume the clocksource(s) | |
467 | */ | |
468 | void clocksource_resume(void) | |
469 | { | |
2e197586 | 470 | struct clocksource *cs; |
b52f52a0 | 471 | |
75c5158f | 472 | list_for_each_entry(cs, &clocksource_list, list) |
b52f52a0 | 473 | if (cs->resume) |
17622339 | 474 | cs->resume(cs); |
b52f52a0 TG |
475 | |
476 | clocksource_resume_watchdog(); | |
b52f52a0 TG |
477 | } |
478 | ||
7c3078b6 JW |
479 | /** |
480 | * clocksource_touch_watchdog - Update watchdog | |
481 | * | |
482 | * Update the watchdog after exception contexts such as kgdb so as not | |
7b7422a5 TG |
483 | * to incorrectly trip the watchdog. This might fail when the kernel |
484 | * was stopped in code which holds watchdog_lock. | |
7c3078b6 JW |
485 | */ |
486 | void clocksource_touch_watchdog(void) | |
487 | { | |
488 | clocksource_resume_watchdog(); | |
489 | } | |
490 | ||
d65670a7 JS |
491 | /** |
492 | * clocksource_max_adjustment- Returns max adjustment amount | |
493 | * @cs: Pointer to clocksource | |
494 | * | |
495 | */ | |
496 | static u32 clocksource_max_adjustment(struct clocksource *cs) | |
497 | { | |
498 | u64 ret; | |
499 | /* | |
88b28adf | 500 | * We won't try to correct for more than 11% adjustments (110,000 ppm), |
d65670a7 JS |
501 | */ |
502 | ret = (u64)cs->mult * 11; | |
503 | do_div(ret,100); | |
504 | return (u32)ret; | |
505 | } | |
506 | ||
98962465 | 507 | /** |
87d8b9eb SB |
508 | * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted |
509 | * @mult: cycle to nanosecond multiplier | |
510 | * @shift: cycle to nanosecond divisor (power of two) | |
511 | * @maxadj: maximum adjustment value to mult (~11%) | |
512 | * @mask: bitmask for two's complement subtraction of non 64 bit counters | |
fb82fe2f JS |
513 | * @max_cyc: maximum cycle value before potential overflow (does not include |
514 | * any safety margin) | |
362fde04 | 515 | * |
8e56f33f JS |
516 | * NOTE: This function includes a safety margin of 50%, in other words, we |
517 | * return half the number of nanoseconds the hardware counter can technically | |
518 | * cover. This is done so that we can potentially detect problems caused by | |
519 | * delayed timers or bad hardware, which might result in time intervals that | |
571af55a | 520 | * are larger than what the math used can handle without overflows. |
98962465 | 521 | */ |
fb82fe2f | 522 | u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) |
98962465 JH |
523 | { |
524 | u64 max_nsecs, max_cycles; | |
525 | ||
526 | /* | |
527 | * Calculate the maximum number of cycles that we can pass to the | |
6086e346 | 528 | * cyc2ns() function without overflowing a 64-bit result. |
98962465 | 529 | */ |
6086e346 JS |
530 | max_cycles = ULLONG_MAX; |
531 | do_div(max_cycles, mult+maxadj); | |
98962465 JH |
532 | |
533 | /* | |
534 | * The actual maximum number of cycles we can defer the clocksource is | |
87d8b9eb | 535 | * determined by the minimum of max_cycles and mask. |
d65670a7 JS |
536 | * Note: Here we subtract the maxadj to make sure we don't sleep for |
537 | * too long if there's a large negative adjustment. | |
98962465 | 538 | */ |
87d8b9eb SB |
539 | max_cycles = min(max_cycles, mask); |
540 | max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift); | |
541 | ||
fb82fe2f JS |
542 | /* return the max_cycles value as well if requested */ |
543 | if (max_cyc) | |
544 | *max_cyc = max_cycles; | |
545 | ||
362fde04 JS |
546 | /* Return 50% of the actual maximum, so we can detect bad values */ |
547 | max_nsecs >>= 1; | |
548 | ||
87d8b9eb SB |
549 | return max_nsecs; |
550 | } | |
551 | ||
552 | /** | |
fb82fe2f JS |
553 | * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles |
554 | * @cs: Pointer to clocksource to be updated | |
87d8b9eb SB |
555 | * |
556 | */ | |
fb82fe2f | 557 | static inline void clocksource_update_max_deferment(struct clocksource *cs) |
87d8b9eb | 558 | { |
fb82fe2f JS |
559 | cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift, |
560 | cs->maxadj, cs->mask, | |
561 | &cs->max_cycles); | |
98962465 JH |
562 | } |
563 | ||
592913ec | 564 | #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET |
734efb46 | 565 | |
f5a2e343 | 566 | static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur) |
5d33b883 TG |
567 | { |
568 | struct clocksource *cs; | |
569 | ||
570 | if (!finished_booting || list_empty(&clocksource_list)) | |
571 | return NULL; | |
572 | ||
573 | /* | |
574 | * We pick the clocksource with the highest rating. If oneshot | |
575 | * mode is active, we pick the highres valid clocksource with | |
576 | * the best rating. | |
577 | */ | |
578 | list_for_each_entry(cs, &clocksource_list, list) { | |
f5a2e343 TG |
579 | if (skipcur && cs == curr_clocksource) |
580 | continue; | |
5d33b883 TG |
581 | if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES)) |
582 | continue; | |
583 | return cs; | |
584 | } | |
585 | return NULL; | |
586 | } | |
587 | ||
f5a2e343 | 588 | static void __clocksource_select(bool skipcur) |
734efb46 | 589 | { |
5d33b883 | 590 | bool oneshot = tick_oneshot_mode_active(); |
f1b82746 | 591 | struct clocksource *best, *cs; |
5d8b34fd | 592 | |
5d33b883 | 593 | /* Find the best suitable clocksource */ |
f5a2e343 | 594 | best = clocksource_find_best(oneshot, skipcur); |
5d33b883 | 595 | if (!best) |
f1b82746 | 596 | return; |
5d33b883 | 597 | |
f1b82746 MS |
598 | /* Check for the override clocksource. */ |
599 | list_for_each_entry(cs, &clocksource_list, list) { | |
f5a2e343 TG |
600 | if (skipcur && cs == curr_clocksource) |
601 | continue; | |
f1b82746 MS |
602 | if (strcmp(cs->name, override_name) != 0) |
603 | continue; | |
604 | /* | |
605 | * Check to make sure we don't switch to a non-highres | |
606 | * capable clocksource if the tick code is in oneshot | |
607 | * mode (highres or nohz) | |
608 | */ | |
5d33b883 | 609 | if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { |
f1b82746 | 610 | /* Override clocksource cannot be used. */ |
36374583 KW |
611 | if (cs->flags & CLOCK_SOURCE_UNSTABLE) { |
612 | pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n", | |
613 | cs->name); | |
614 | override_name[0] = 0; | |
615 | } else { | |
616 | /* | |
617 | * The override cannot be currently verified. | |
618 | * Deferring to let the watchdog check. | |
619 | */ | |
620 | pr_info("Override clocksource %s is not currently HRT compatible - deferring\n", | |
621 | cs->name); | |
622 | } | |
f1b82746 MS |
623 | } else |
624 | /* Override clocksource can be used. */ | |
625 | best = cs; | |
626 | break; | |
627 | } | |
ba919d1c TG |
628 | |
629 | if (curr_clocksource != best && !timekeeping_notify(best)) { | |
630 | pr_info("Switched to clocksource %s\n", best->name); | |
75c5158f | 631 | curr_clocksource = best; |
75c5158f | 632 | } |
f1b82746 | 633 | } |
734efb46 | 634 | |
f5a2e343 TG |
635 | /** |
636 | * clocksource_select - Select the best clocksource available | |
637 | * | |
638 | * Private function. Must hold clocksource_mutex when called. | |
639 | * | |
640 | * Select the clocksource with the best rating, or the clocksource, | |
641 | * which is selected by userspace override. | |
642 | */ | |
643 | static void clocksource_select(void) | |
644 | { | |
cfed432d | 645 | __clocksource_select(false); |
f5a2e343 TG |
646 | } |
647 | ||
7eaeb343 TG |
648 | static void clocksource_select_fallback(void) |
649 | { | |
cfed432d | 650 | __clocksource_select(true); |
7eaeb343 TG |
651 | } |
652 | ||
592913ec | 653 | #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */ |
54a6bc0b | 654 | static inline void clocksource_select(void) { } |
1eaff672 | 655 | static inline void clocksource_select_fallback(void) { } |
54a6bc0b TG |
656 | |
657 | #endif | |
658 | ||
75c5158f MS |
659 | /* |
660 | * clocksource_done_booting - Called near the end of core bootup | |
661 | * | |
662 | * Hack to avoid lots of clocksource churn at boot time. | |
663 | * We use fs_initcall because we want this to start before | |
664 | * device_initcall but after subsys_initcall. | |
665 | */ | |
666 | static int __init clocksource_done_booting(void) | |
667 | { | |
ad6759fb JS |
668 | mutex_lock(&clocksource_mutex); |
669 | curr_clocksource = clocksource_default_clock(); | |
75c5158f | 670 | finished_booting = 1; |
54a6bc0b TG |
671 | /* |
672 | * Run the watchdog first to eliminate unstable clock sources | |
673 | */ | |
332962f2 | 674 | __clocksource_watchdog_kthread(); |
75c5158f | 675 | clocksource_select(); |
e6c73305 | 676 | mutex_unlock(&clocksource_mutex); |
75c5158f MS |
677 | return 0; |
678 | } | |
679 | fs_initcall(clocksource_done_booting); | |
680 | ||
92c7e002 TG |
681 | /* |
682 | * Enqueue the clocksource sorted by rating | |
734efb46 | 683 | */ |
f1b82746 | 684 | static void clocksource_enqueue(struct clocksource *cs) |
734efb46 | 685 | { |
f1b82746 MS |
686 | struct list_head *entry = &clocksource_list; |
687 | struct clocksource *tmp; | |
92c7e002 | 688 | |
0fb71d34 | 689 | list_for_each_entry(tmp, &clocksource_list, list) { |
92c7e002 | 690 | /* Keep track of the place, where to insert */ |
0fb71d34 MH |
691 | if (tmp->rating < cs->rating) |
692 | break; | |
693 | entry = &tmp->list; | |
694 | } | |
f1b82746 | 695 | list_add(&cs->list, entry); |
734efb46 JS |
696 | } |
697 | ||
d7e81c26 | 698 | /** |
fba9e072 | 699 | * __clocksource_update_freq_scale - Used update clocksource with new freq |
b1b73d09 | 700 | * @cs: clocksource to be registered |
d7e81c26 JS |
701 | * @scale: Scale factor multiplied against freq to get clocksource hz |
702 | * @freq: clocksource frequency (cycles per second) divided by scale | |
703 | * | |
852db46d | 704 | * This should only be called from the clocksource->enable() method. |
d7e81c26 JS |
705 | * |
706 | * This *SHOULD NOT* be called directly! Please use the | |
fba9e072 JS |
707 | * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper |
708 | * functions. | |
d7e81c26 | 709 | */ |
fba9e072 | 710 | void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq) |
d7e81c26 | 711 | { |
c0e299b1 | 712 | u64 sec; |
f8935983 | 713 | |
d7e81c26 | 714 | /* |
f8935983 JS |
715 | * Default clocksources are *special* and self-define their mult/shift. |
716 | * But, you're not special, so you should specify a freq value. | |
d7e81c26 | 717 | */ |
f8935983 JS |
718 | if (freq) { |
719 | /* | |
720 | * Calc the maximum number of seconds which we can run before | |
721 | * wrapping around. For clocksources which have a mask > 32-bit | |
722 | * we need to limit the max sleep time to have a good | |
723 | * conversion precision. 10 minutes is still a reasonable | |
724 | * amount. That results in a shift value of 24 for a | |
725 | * clocksource with mask >= 40-bit and f >= 4GHz. That maps to | |
726 | * ~ 0.06ppm granularity for NTP. | |
727 | */ | |
728 | sec = cs->mask; | |
729 | do_div(sec, freq); | |
730 | do_div(sec, scale); | |
731 | if (!sec) | |
732 | sec = 1; | |
733 | else if (sec > 600 && cs->mask > UINT_MAX) | |
734 | sec = 600; | |
735 | ||
736 | clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, | |
737 | NSEC_PER_SEC / scale, sec * scale); | |
738 | } | |
d65670a7 | 739 | /* |
362fde04 JS |
740 | * Ensure clocksources that have large 'mult' values don't overflow |
741 | * when adjusted. | |
d65670a7 JS |
742 | */ |
743 | cs->maxadj = clocksource_max_adjustment(cs); | |
f8935983 JS |
744 | while (freq && ((cs->mult + cs->maxadj < cs->mult) |
745 | || (cs->mult - cs->maxadj > cs->mult))) { | |
d65670a7 JS |
746 | cs->mult >>= 1; |
747 | cs->shift--; | |
748 | cs->maxadj = clocksource_max_adjustment(cs); | |
749 | } | |
750 | ||
f8935983 JS |
751 | /* |
752 | * Only warn for *special* clocksources that self-define | |
753 | * their mult/shift values and don't specify a freq. | |
754 | */ | |
755 | WARN_ONCE(cs->mult + cs->maxadj < cs->mult, | |
756 | "timekeeping: Clocksource %s might overflow on 11%% adjustment\n", | |
757 | cs->name); | |
758 | ||
fb82fe2f | 759 | clocksource_update_max_deferment(cs); |
8cc8c525 | 760 | |
45bbfe64 JP |
761 | pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n", |
762 | cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns); | |
852db46d | 763 | } |
fba9e072 | 764 | EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale); |
852db46d JS |
765 | |
766 | /** | |
767 | * __clocksource_register_scale - Used to install new clocksources | |
b1b73d09 | 768 | * @cs: clocksource to be registered |
852db46d JS |
769 | * @scale: Scale factor multiplied against freq to get clocksource hz |
770 | * @freq: clocksource frequency (cycles per second) divided by scale | |
771 | * | |
772 | * Returns -EBUSY if registration fails, zero otherwise. | |
773 | * | |
774 | * This *SHOULD NOT* be called directly! Please use the | |
775 | * clocksource_register_hz() or clocksource_register_khz helper functions. | |
776 | */ | |
777 | int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) | |
778 | { | |
779 | ||
b595076a | 780 | /* Initialize mult/shift and max_idle_ns */ |
fba9e072 | 781 | __clocksource_update_freq_scale(cs, scale, freq); |
d7e81c26 | 782 | |
be278e98 | 783 | /* Add clocksource to the clocksource list */ |
d7e81c26 JS |
784 | mutex_lock(&clocksource_mutex); |
785 | clocksource_enqueue(cs); | |
d7e81c26 | 786 | clocksource_enqueue_watchdog(cs); |
e05b2efb | 787 | clocksource_select(); |
bbf66d89 | 788 | clocksource_select_watchdog(false); |
d7e81c26 JS |
789 | mutex_unlock(&clocksource_mutex); |
790 | return 0; | |
791 | } | |
792 | EXPORT_SYMBOL_GPL(__clocksource_register_scale); | |
793 | ||
d0981a1b TG |
794 | static void __clocksource_change_rating(struct clocksource *cs, int rating) |
795 | { | |
796 | list_del(&cs->list); | |
797 | cs->rating = rating; | |
798 | clocksource_enqueue(cs); | |
d0981a1b TG |
799 | } |
800 | ||
734efb46 | 801 | /** |
92c7e002 | 802 | * clocksource_change_rating - Change the rating of a registered clocksource |
b1b73d09 KK |
803 | * @cs: clocksource to be changed |
804 | * @rating: new rating | |
734efb46 | 805 | */ |
92c7e002 | 806 | void clocksource_change_rating(struct clocksource *cs, int rating) |
734efb46 | 807 | { |
75c5158f | 808 | mutex_lock(&clocksource_mutex); |
d0981a1b | 809 | __clocksource_change_rating(cs, rating); |
332962f2 | 810 | clocksource_select(); |
bbf66d89 | 811 | clocksource_select_watchdog(false); |
75c5158f | 812 | mutex_unlock(&clocksource_mutex); |
734efb46 | 813 | } |
fb63a0eb | 814 | EXPORT_SYMBOL(clocksource_change_rating); |
734efb46 | 815 | |
7eaeb343 TG |
816 | /* |
817 | * Unbind clocksource @cs. Called with clocksource_mutex held | |
818 | */ | |
819 | static int clocksource_unbind(struct clocksource *cs) | |
820 | { | |
bbf66d89 VK |
821 | if (clocksource_is_watchdog(cs)) { |
822 | /* Select and try to install a replacement watchdog. */ | |
823 | clocksource_select_watchdog(true); | |
824 | if (clocksource_is_watchdog(cs)) | |
825 | return -EBUSY; | |
826 | } | |
7eaeb343 TG |
827 | |
828 | if (cs == curr_clocksource) { | |
829 | /* Select and try to install a replacement clock source */ | |
830 | clocksource_select_fallback(); | |
831 | if (curr_clocksource == cs) | |
832 | return -EBUSY; | |
833 | } | |
834 | clocksource_dequeue_watchdog(cs); | |
835 | list_del_init(&cs->list); | |
836 | return 0; | |
837 | } | |
838 | ||
4713e22c TG |
839 | /** |
840 | * clocksource_unregister - remove a registered clocksource | |
b1b73d09 | 841 | * @cs: clocksource to be unregistered |
4713e22c | 842 | */ |
a89c7edb | 843 | int clocksource_unregister(struct clocksource *cs) |
4713e22c | 844 | { |
a89c7edb TG |
845 | int ret = 0; |
846 | ||
75c5158f | 847 | mutex_lock(&clocksource_mutex); |
a89c7edb TG |
848 | if (!list_empty(&cs->list)) |
849 | ret = clocksource_unbind(cs); | |
75c5158f | 850 | mutex_unlock(&clocksource_mutex); |
a89c7edb | 851 | return ret; |
4713e22c | 852 | } |
fb63a0eb | 853 | EXPORT_SYMBOL(clocksource_unregister); |
4713e22c | 854 | |
2b013700 | 855 | #ifdef CONFIG_SYSFS |
734efb46 JS |
856 | /** |
857 | * sysfs_show_current_clocksources - sysfs interface for current clocksource | |
858 | * @dev: unused | |
b1b73d09 | 859 | * @attr: unused |
734efb46 JS |
860 | * @buf: char buffer to be filled with clocksource list |
861 | * | |
862 | * Provides sysfs interface for listing current clocksource. | |
863 | */ | |
864 | static ssize_t | |
d369a5d8 KS |
865 | sysfs_show_current_clocksources(struct device *dev, |
866 | struct device_attribute *attr, char *buf) | |
734efb46 | 867 | { |
5e2cb101 | 868 | ssize_t count = 0; |
734efb46 | 869 | |
75c5158f | 870 | mutex_lock(&clocksource_mutex); |
5e2cb101 | 871 | count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); |
75c5158f | 872 | mutex_unlock(&clocksource_mutex); |
734efb46 | 873 | |
5e2cb101 | 874 | return count; |
734efb46 JS |
875 | } |
876 | ||
891292a7 | 877 | ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt) |
29b54078 TG |
878 | { |
879 | size_t ret = cnt; | |
880 | ||
881 | /* strings from sysfs write are not 0 terminated! */ | |
882 | if (!cnt || cnt >= CS_NAME_LEN) | |
883 | return -EINVAL; | |
884 | ||
885 | /* strip of \n: */ | |
886 | if (buf[cnt-1] == '\n') | |
887 | cnt--; | |
888 | if (cnt > 0) | |
889 | memcpy(dst, buf, cnt); | |
890 | dst[cnt] = 0; | |
891 | return ret; | |
892 | } | |
893 | ||
734efb46 JS |
894 | /** |
895 | * sysfs_override_clocksource - interface for manually overriding clocksource | |
896 | * @dev: unused | |
b1b73d09 | 897 | * @attr: unused |
734efb46 JS |
898 | * @buf: name of override clocksource |
899 | * @count: length of buffer | |
900 | * | |
901 | * Takes input from sysfs interface for manually overriding the default | |
b71a8eb0 | 902 | * clocksource selection. |
734efb46 | 903 | */ |
d369a5d8 KS |
904 | static ssize_t sysfs_override_clocksource(struct device *dev, |
905 | struct device_attribute *attr, | |
734efb46 JS |
906 | const char *buf, size_t count) |
907 | { | |
233bcb41 | 908 | ssize_t ret; |
734efb46 | 909 | |
75c5158f | 910 | mutex_lock(&clocksource_mutex); |
734efb46 | 911 | |
03e13cf5 | 912 | ret = sysfs_get_uname(buf, override_name, count); |
29b54078 TG |
913 | if (ret >= 0) |
914 | clocksource_select(); | |
734efb46 | 915 | |
75c5158f | 916 | mutex_unlock(&clocksource_mutex); |
734efb46 JS |
917 | |
918 | return ret; | |
919 | } | |
920 | ||
7eaeb343 TG |
921 | /** |
922 | * sysfs_unbind_current_clocksource - interface for manually unbinding clocksource | |
923 | * @dev: unused | |
924 | * @attr: unused | |
925 | * @buf: unused | |
926 | * @count: length of buffer | |
927 | * | |
928 | * Takes input from sysfs interface for manually unbinding a clocksource. | |
929 | */ | |
930 | static ssize_t sysfs_unbind_clocksource(struct device *dev, | |
931 | struct device_attribute *attr, | |
932 | const char *buf, size_t count) | |
933 | { | |
934 | struct clocksource *cs; | |
935 | char name[CS_NAME_LEN]; | |
233bcb41 | 936 | ssize_t ret; |
7eaeb343 | 937 | |
03e13cf5 | 938 | ret = sysfs_get_uname(buf, name, count); |
7eaeb343 TG |
939 | if (ret < 0) |
940 | return ret; | |
941 | ||
942 | ret = -ENODEV; | |
943 | mutex_lock(&clocksource_mutex); | |
944 | list_for_each_entry(cs, &clocksource_list, list) { | |
945 | if (strcmp(cs->name, name)) | |
946 | continue; | |
947 | ret = clocksource_unbind(cs); | |
948 | break; | |
949 | } | |
950 | mutex_unlock(&clocksource_mutex); | |
951 | ||
952 | return ret ? ret : count; | |
953 | } | |
954 | ||
734efb46 JS |
955 | /** |
956 | * sysfs_show_available_clocksources - sysfs interface for listing clocksource | |
957 | * @dev: unused | |
b1b73d09 | 958 | * @attr: unused |
734efb46 JS |
959 | * @buf: char buffer to be filled with clocksource list |
960 | * | |
961 | * Provides sysfs interface for listing registered clocksources | |
962 | */ | |
963 | static ssize_t | |
d369a5d8 KS |
964 | sysfs_show_available_clocksources(struct device *dev, |
965 | struct device_attribute *attr, | |
4a0b2b4d | 966 | char *buf) |
734efb46 | 967 | { |
2e197586 | 968 | struct clocksource *src; |
5e2cb101 | 969 | ssize_t count = 0; |
734efb46 | 970 | |
75c5158f | 971 | mutex_lock(&clocksource_mutex); |
2e197586 | 972 | list_for_each_entry(src, &clocksource_list, list) { |
cd6d95d8 TG |
973 | /* |
974 | * Don't show non-HRES clocksource if the tick code is | |
975 | * in one shot mode (highres=on or nohz=on) | |
976 | */ | |
977 | if (!tick_oneshot_mode_active() || | |
978 | (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) | |
3f68535a | 979 | count += snprintf(buf + count, |
5e2cb101 MX |
980 | max((ssize_t)PAGE_SIZE - count, (ssize_t)0), |
981 | "%s ", src->name); | |
734efb46 | 982 | } |
75c5158f | 983 | mutex_unlock(&clocksource_mutex); |
734efb46 | 984 | |
5e2cb101 MX |
985 | count += snprintf(buf + count, |
986 | max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); | |
734efb46 | 987 | |
5e2cb101 | 988 | return count; |
734efb46 JS |
989 | } |
990 | ||
991 | /* | |
992 | * Sysfs setup bits: | |
993 | */ | |
d369a5d8 | 994 | static DEVICE_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources, |
f5f1a24a | 995 | sysfs_override_clocksource); |
734efb46 | 996 | |
7eaeb343 TG |
997 | static DEVICE_ATTR(unbind_clocksource, 0200, NULL, sysfs_unbind_clocksource); |
998 | ||
d369a5d8 | 999 | static DEVICE_ATTR(available_clocksource, 0444, |
f5f1a24a | 1000 | sysfs_show_available_clocksources, NULL); |
734efb46 | 1001 | |
d369a5d8 | 1002 | static struct bus_type clocksource_subsys = { |
af5ca3f4 | 1003 | .name = "clocksource", |
d369a5d8 | 1004 | .dev_name = "clocksource", |
734efb46 JS |
1005 | }; |
1006 | ||
d369a5d8 | 1007 | static struct device device_clocksource = { |
734efb46 | 1008 | .id = 0, |
d369a5d8 | 1009 | .bus = &clocksource_subsys, |
734efb46 JS |
1010 | }; |
1011 | ||
ad596171 | 1012 | static int __init init_clocksource_sysfs(void) |
734efb46 | 1013 | { |
d369a5d8 | 1014 | int error = subsys_system_register(&clocksource_subsys, NULL); |
734efb46 JS |
1015 | |
1016 | if (!error) | |
d369a5d8 | 1017 | error = device_register(&device_clocksource); |
734efb46 | 1018 | if (!error) |
d369a5d8 | 1019 | error = device_create_file( |
734efb46 | 1020 | &device_clocksource, |
d369a5d8 | 1021 | &dev_attr_current_clocksource); |
7eaeb343 TG |
1022 | if (!error) |
1023 | error = device_create_file(&device_clocksource, | |
1024 | &dev_attr_unbind_clocksource); | |
734efb46 | 1025 | if (!error) |
d369a5d8 | 1026 | error = device_create_file( |
734efb46 | 1027 | &device_clocksource, |
d369a5d8 | 1028 | &dev_attr_available_clocksource); |
734efb46 JS |
1029 | return error; |
1030 | } | |
1031 | ||
1032 | device_initcall(init_clocksource_sysfs); | |
2b013700 | 1033 | #endif /* CONFIG_SYSFS */ |
734efb46 JS |
1034 | |
1035 | /** | |
1036 | * boot_override_clocksource - boot clock override | |
1037 | * @str: override name | |
1038 | * | |
1039 | * Takes a clocksource= boot argument and uses it | |
1040 | * as the clocksource override name. | |
1041 | */ | |
1042 | static int __init boot_override_clocksource(char* str) | |
1043 | { | |
75c5158f | 1044 | mutex_lock(&clocksource_mutex); |
734efb46 JS |
1045 | if (str) |
1046 | strlcpy(override_name, str, sizeof(override_name)); | |
75c5158f | 1047 | mutex_unlock(&clocksource_mutex); |
734efb46 JS |
1048 | return 1; |
1049 | } | |
1050 | ||
1051 | __setup("clocksource=", boot_override_clocksource); | |
1052 | ||
1053 | /** | |
1054 | * boot_override_clock - Compatibility layer for deprecated boot option | |
1055 | * @str: override name | |
1056 | * | |
1057 | * DEPRECATED! Takes a clock= boot argument and uses it | |
1058 | * as the clocksource override name | |
1059 | */ | |
1060 | static int __init boot_override_clock(char* str) | |
1061 | { | |
5d0cf410 | 1062 | if (!strcmp(str, "pmtmr")) { |
45bbfe64 | 1063 | pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n"); |
5d0cf410 JS |
1064 | return boot_override_clocksource("acpi_pm"); |
1065 | } | |
45bbfe64 | 1066 | pr_warn("clock= boot option is deprecated - use clocksource=xyz\n"); |
734efb46 JS |
1067 | return boot_override_clocksource(str); |
1068 | } | |
1069 | ||
1070 | __setup("clock=", boot_override_clock); |