]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/timer.c | |
3 | * | |
4 | * Kernel internal timers, kernel timekeeping, basic process system calls | |
5 | * | |
6 | * Copyright (C) 1991, 1992 Linus Torvalds | |
7 | * | |
8 | * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. | |
9 | * | |
10 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | |
11 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | |
12 | * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to | |
13 | * serialize accesses to xtime/lost_ticks). | |
14 | * Copyright (C) 1998 Andrea Arcangeli | |
15 | * 1999-03-10 Improved NTP compatibility by Ulrich Windl | |
16 | * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love | |
17 | * 2000-10-05 Implemented scalable SMP per-CPU timer handling. | |
18 | * Copyright (C) 2000, 2001, 2002 Ingo Molnar | |
19 | * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar | |
20 | */ | |
21 | ||
22 | #include <linux/kernel_stat.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/percpu.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/swap.h> | |
29 | #include <linux/notifier.h> | |
30 | #include <linux/thread_info.h> | |
31 | #include <linux/time.h> | |
32 | #include <linux/jiffies.h> | |
33 | #include <linux/posix-timers.h> | |
34 | #include <linux/cpu.h> | |
35 | #include <linux/syscalls.h> | |
36 | ||
37 | #include <asm/uaccess.h> | |
38 | #include <asm/unistd.h> | |
39 | #include <asm/div64.h> | |
40 | #include <asm/timex.h> | |
41 | #include <asm/io.h> | |
42 | ||
43 | #ifdef CONFIG_TIME_INTERPOLATION | |
44 | static void time_interpolator_update(long delta_nsec); | |
45 | #else | |
46 | #define time_interpolator_update(x) | |
47 | #endif | |
48 | ||
49 | /* | |
50 | * per-CPU timer vector definitions: | |
51 | */ | |
52 | ||
53 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) | |
54 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) | |
55 | #define TVN_SIZE (1 << TVN_BITS) | |
56 | #define TVR_SIZE (1 << TVR_BITS) | |
57 | #define TVN_MASK (TVN_SIZE - 1) | |
58 | #define TVR_MASK (TVR_SIZE - 1) | |
59 | ||
55c888d6 ON |
60 | struct timer_base_s { |
61 | spinlock_t lock; | |
62 | struct timer_list *running_timer; | |
63 | }; | |
64 | ||
1da177e4 LT |
65 | typedef struct tvec_s { |
66 | struct list_head vec[TVN_SIZE]; | |
67 | } tvec_t; | |
68 | ||
69 | typedef struct tvec_root_s { | |
70 | struct list_head vec[TVR_SIZE]; | |
71 | } tvec_root_t; | |
72 | ||
73 | struct tvec_t_base_s { | |
55c888d6 | 74 | struct timer_base_s t_base; |
1da177e4 | 75 | unsigned long timer_jiffies; |
1da177e4 LT |
76 | tvec_root_t tv1; |
77 | tvec_t tv2; | |
78 | tvec_t tv3; | |
79 | tvec_t tv4; | |
80 | tvec_t tv5; | |
81 | } ____cacheline_aligned_in_smp; | |
82 | ||
83 | typedef struct tvec_t_base_s tvec_base_t; | |
55c888d6 | 84 | static DEFINE_PER_CPU(tvec_base_t, tvec_bases); |
1da177e4 LT |
85 | |
86 | static inline void set_running_timer(tvec_base_t *base, | |
87 | struct timer_list *timer) | |
88 | { | |
89 | #ifdef CONFIG_SMP | |
55c888d6 | 90 | base->t_base.running_timer = timer; |
1da177e4 LT |
91 | #endif |
92 | } | |
93 | ||
1da177e4 LT |
94 | static void check_timer_failed(struct timer_list *timer) |
95 | { | |
96 | static int whine_count; | |
97 | if (whine_count < 16) { | |
98 | whine_count++; | |
99 | printk("Uninitialised timer!\n"); | |
100 | printk("This is just a warning. Your computer is OK\n"); | |
101 | printk("function=0x%p, data=0x%lx\n", | |
102 | timer->function, timer->data); | |
103 | dump_stack(); | |
104 | } | |
105 | /* | |
106 | * Now fix it up | |
107 | */ | |
1da177e4 LT |
108 | timer->magic = TIMER_MAGIC; |
109 | } | |
110 | ||
111 | static inline void check_timer(struct timer_list *timer) | |
112 | { | |
113 | if (timer->magic != TIMER_MAGIC) | |
114 | check_timer_failed(timer); | |
115 | } | |
116 | ||
117 | ||
118 | static void internal_add_timer(tvec_base_t *base, struct timer_list *timer) | |
119 | { | |
120 | unsigned long expires = timer->expires; | |
121 | unsigned long idx = expires - base->timer_jiffies; | |
122 | struct list_head *vec; | |
123 | ||
124 | if (idx < TVR_SIZE) { | |
125 | int i = expires & TVR_MASK; | |
126 | vec = base->tv1.vec + i; | |
127 | } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { | |
128 | int i = (expires >> TVR_BITS) & TVN_MASK; | |
129 | vec = base->tv2.vec + i; | |
130 | } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { | |
131 | int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; | |
132 | vec = base->tv3.vec + i; | |
133 | } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { | |
134 | int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; | |
135 | vec = base->tv4.vec + i; | |
136 | } else if ((signed long) idx < 0) { | |
137 | /* | |
138 | * Can happen if you add a timer with expires == jiffies, | |
139 | * or you set a timer to go off in the past | |
140 | */ | |
141 | vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); | |
142 | } else { | |
143 | int i; | |
144 | /* If the timeout is larger than 0xffffffff on 64-bit | |
145 | * architectures then we use the maximum timeout: | |
146 | */ | |
147 | if (idx > 0xffffffffUL) { | |
148 | idx = 0xffffffffUL; | |
149 | expires = idx + base->timer_jiffies; | |
150 | } | |
151 | i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; | |
152 | vec = base->tv5.vec + i; | |
153 | } | |
154 | /* | |
155 | * Timers are FIFO: | |
156 | */ | |
157 | list_add_tail(&timer->entry, vec); | |
158 | } | |
159 | ||
55c888d6 ON |
160 | typedef struct timer_base_s timer_base_t; |
161 | /* | |
162 | * Used by TIMER_INITIALIZER, we can't use per_cpu(tvec_bases) | |
163 | * at compile time, and we need timer->base to lock the timer. | |
164 | */ | |
165 | timer_base_t __init_timer_base | |
166 | ____cacheline_aligned_in_smp = { .lock = SPIN_LOCK_UNLOCKED }; | |
167 | EXPORT_SYMBOL(__init_timer_base); | |
168 | ||
169 | /*** | |
170 | * init_timer - initialize a timer. | |
171 | * @timer: the timer to be initialized | |
172 | * | |
173 | * init_timer() must be done to a timer prior calling *any* of the | |
174 | * other timer functions. | |
175 | */ | |
176 | void fastcall init_timer(struct timer_list *timer) | |
177 | { | |
178 | timer->entry.next = NULL; | |
179 | timer->base = &per_cpu(tvec_bases, raw_smp_processor_id()).t_base; | |
180 | timer->magic = TIMER_MAGIC; | |
181 | } | |
182 | EXPORT_SYMBOL(init_timer); | |
183 | ||
184 | static inline void detach_timer(struct timer_list *timer, | |
185 | int clear_pending) | |
186 | { | |
187 | struct list_head *entry = &timer->entry; | |
188 | ||
189 | __list_del(entry->prev, entry->next); | |
190 | if (clear_pending) | |
191 | entry->next = NULL; | |
192 | entry->prev = LIST_POISON2; | |
193 | } | |
194 | ||
195 | /* | |
196 | * We are using hashed locking: holding per_cpu(tvec_bases).t_base.lock | |
197 | * means that all timers which are tied to this base via timer->base are | |
198 | * locked, and the base itself is locked too. | |
199 | * | |
200 | * So __run_timers/migrate_timers can safely modify all timers which could | |
201 | * be found on ->tvX lists. | |
202 | * | |
203 | * When the timer's base is locked, and the timer removed from list, it is | |
204 | * possible to set timer->base = NULL and drop the lock: the timer remains | |
205 | * locked. | |
206 | */ | |
207 | static timer_base_t *lock_timer_base(struct timer_list *timer, | |
208 | unsigned long *flags) | |
209 | { | |
210 | timer_base_t *base; | |
211 | ||
212 | for (;;) { | |
213 | base = timer->base; | |
214 | if (likely(base != NULL)) { | |
215 | spin_lock_irqsave(&base->lock, *flags); | |
216 | if (likely(base == timer->base)) | |
217 | return base; | |
218 | /* The timer has migrated to another CPU */ | |
219 | spin_unlock_irqrestore(&base->lock, *flags); | |
220 | } | |
221 | cpu_relax(); | |
222 | } | |
223 | } | |
224 | ||
1da177e4 LT |
225 | int __mod_timer(struct timer_list *timer, unsigned long expires) |
226 | { | |
55c888d6 ON |
227 | timer_base_t *base; |
228 | tvec_base_t *new_base; | |
1da177e4 LT |
229 | unsigned long flags; |
230 | int ret = 0; | |
231 | ||
232 | BUG_ON(!timer->function); | |
1da177e4 LT |
233 | check_timer(timer); |
234 | ||
55c888d6 ON |
235 | base = lock_timer_base(timer, &flags); |
236 | ||
237 | if (timer_pending(timer)) { | |
238 | detach_timer(timer, 0); | |
239 | ret = 1; | |
240 | } | |
241 | ||
1da177e4 | 242 | new_base = &__get_cpu_var(tvec_bases); |
1da177e4 | 243 | |
55c888d6 | 244 | if (base != &new_base->t_base) { |
1da177e4 | 245 | /* |
55c888d6 ON |
246 | * We are trying to schedule the timer on the local CPU. |
247 | * However we can't change timer's base while it is running, | |
248 | * otherwise del_timer_sync() can't detect that the timer's | |
249 | * handler yet has not finished. This also guarantees that | |
250 | * the timer is serialized wrt itself. | |
1da177e4 | 251 | */ |
55c888d6 ON |
252 | if (unlikely(base->running_timer == timer)) { |
253 | /* The timer remains on a former base */ | |
254 | new_base = container_of(base, tvec_base_t, t_base); | |
255 | } else { | |
256 | /* See the comment in lock_timer_base() */ | |
257 | timer->base = NULL; | |
258 | spin_unlock(&base->lock); | |
259 | spin_lock(&new_base->t_base.lock); | |
260 | timer->base = &new_base->t_base; | |
1da177e4 LT |
261 | } |
262 | } | |
263 | ||
1da177e4 LT |
264 | timer->expires = expires; |
265 | internal_add_timer(new_base, timer); | |
55c888d6 | 266 | spin_unlock_irqrestore(&new_base->t_base.lock, flags); |
1da177e4 LT |
267 | |
268 | return ret; | |
269 | } | |
270 | ||
271 | EXPORT_SYMBOL(__mod_timer); | |
272 | ||
273 | /*** | |
274 | * add_timer_on - start a timer on a particular CPU | |
275 | * @timer: the timer to be added | |
276 | * @cpu: the CPU to start it on | |
277 | * | |
278 | * This is not very scalable on SMP. Double adds are not possible. | |
279 | */ | |
280 | void add_timer_on(struct timer_list *timer, int cpu) | |
281 | { | |
282 | tvec_base_t *base = &per_cpu(tvec_bases, cpu); | |
283 | unsigned long flags; | |
55c888d6 | 284 | |
1da177e4 LT |
285 | BUG_ON(timer_pending(timer) || !timer->function); |
286 | ||
287 | check_timer(timer); | |
288 | ||
55c888d6 ON |
289 | spin_lock_irqsave(&base->t_base.lock, flags); |
290 | timer->base = &base->t_base; | |
1da177e4 | 291 | internal_add_timer(base, timer); |
55c888d6 | 292 | spin_unlock_irqrestore(&base->t_base.lock, flags); |
1da177e4 LT |
293 | } |
294 | ||
295 | ||
296 | /*** | |
297 | * mod_timer - modify a timer's timeout | |
298 | * @timer: the timer to be modified | |
299 | * | |
300 | * mod_timer is a more efficient way to update the expire field of an | |
301 | * active timer (if the timer is inactive it will be activated) | |
302 | * | |
303 | * mod_timer(timer, expires) is equivalent to: | |
304 | * | |
305 | * del_timer(timer); timer->expires = expires; add_timer(timer); | |
306 | * | |
307 | * Note that if there are multiple unserialized concurrent users of the | |
308 | * same timer, then mod_timer() is the only safe way to modify the timeout, | |
309 | * since add_timer() cannot modify an already running timer. | |
310 | * | |
311 | * The function returns whether it has modified a pending timer or not. | |
312 | * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an | |
313 | * active timer returns 1.) | |
314 | */ | |
315 | int mod_timer(struct timer_list *timer, unsigned long expires) | |
316 | { | |
317 | BUG_ON(!timer->function); | |
318 | ||
319 | check_timer(timer); | |
320 | ||
321 | /* | |
322 | * This is a common optimization triggered by the | |
323 | * networking code - if the timer is re-modified | |
324 | * to be the same thing then just return: | |
325 | */ | |
326 | if (timer->expires == expires && timer_pending(timer)) | |
327 | return 1; | |
328 | ||
329 | return __mod_timer(timer, expires); | |
330 | } | |
331 | ||
332 | EXPORT_SYMBOL(mod_timer); | |
333 | ||
334 | /*** | |
335 | * del_timer - deactive a timer. | |
336 | * @timer: the timer to be deactivated | |
337 | * | |
338 | * del_timer() deactivates a timer - this works on both active and inactive | |
339 | * timers. | |
340 | * | |
341 | * The function returns whether it has deactivated a pending timer or not. | |
342 | * (ie. del_timer() of an inactive timer returns 0, del_timer() of an | |
343 | * active timer returns 1.) | |
344 | */ | |
345 | int del_timer(struct timer_list *timer) | |
346 | { | |
55c888d6 | 347 | timer_base_t *base; |
1da177e4 | 348 | unsigned long flags; |
55c888d6 | 349 | int ret = 0; |
1da177e4 LT |
350 | |
351 | check_timer(timer); | |
352 | ||
55c888d6 ON |
353 | if (timer_pending(timer)) { |
354 | base = lock_timer_base(timer, &flags); | |
355 | if (timer_pending(timer)) { | |
356 | detach_timer(timer, 1); | |
357 | ret = 1; | |
358 | } | |
1da177e4 | 359 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 | 360 | } |
1da177e4 | 361 | |
55c888d6 | 362 | return ret; |
1da177e4 LT |
363 | } |
364 | ||
365 | EXPORT_SYMBOL(del_timer); | |
366 | ||
367 | #ifdef CONFIG_SMP | |
fd450b73 ON |
368 | /* |
369 | * This function tries to deactivate a timer. Upon successful (ret >= 0) | |
370 | * exit the timer is not queued and the handler is not running on any CPU. | |
371 | * | |
372 | * It must not be called from interrupt contexts. | |
373 | */ | |
374 | int try_to_del_timer_sync(struct timer_list *timer) | |
375 | { | |
376 | timer_base_t *base; | |
377 | unsigned long flags; | |
378 | int ret = -1; | |
379 | ||
380 | base = lock_timer_base(timer, &flags); | |
381 | ||
382 | if (base->running_timer == timer) | |
383 | goto out; | |
384 | ||
385 | ret = 0; | |
386 | if (timer_pending(timer)) { | |
387 | detach_timer(timer, 1); | |
388 | ret = 1; | |
389 | } | |
390 | out: | |
391 | spin_unlock_irqrestore(&base->lock, flags); | |
392 | ||
393 | return ret; | |
394 | } | |
395 | ||
1da177e4 LT |
396 | /*** |
397 | * del_timer_sync - deactivate a timer and wait for the handler to finish. | |
398 | * @timer: the timer to be deactivated | |
399 | * | |
400 | * This function only differs from del_timer() on SMP: besides deactivating | |
401 | * the timer it also makes sure the handler has finished executing on other | |
402 | * CPUs. | |
403 | * | |
404 | * Synchronization rules: callers must prevent restarting of the timer, | |
405 | * otherwise this function is meaningless. It must not be called from | |
406 | * interrupt contexts. The caller must not hold locks which would prevent | |
55c888d6 ON |
407 | * completion of the timer's handler. The timer's handler must not call |
408 | * add_timer_on(). Upon exit the timer is not queued and the handler is | |
409 | * not running on any CPU. | |
1da177e4 LT |
410 | * |
411 | * The function returns whether it has deactivated a pending timer or not. | |
1da177e4 LT |
412 | */ |
413 | int del_timer_sync(struct timer_list *timer) | |
414 | { | |
1da177e4 LT |
415 | check_timer(timer); |
416 | ||
fd450b73 ON |
417 | for (;;) { |
418 | int ret = try_to_del_timer_sync(timer); | |
419 | if (ret >= 0) | |
420 | return ret; | |
421 | } | |
1da177e4 | 422 | } |
1da177e4 | 423 | |
55c888d6 | 424 | EXPORT_SYMBOL(del_timer_sync); |
1da177e4 LT |
425 | #endif |
426 | ||
427 | static int cascade(tvec_base_t *base, tvec_t *tv, int index) | |
428 | { | |
429 | /* cascade all the timers from tv up one level */ | |
430 | struct list_head *head, *curr; | |
431 | ||
432 | head = tv->vec + index; | |
433 | curr = head->next; | |
434 | /* | |
435 | * We are removing _all_ timers from the list, so we don't have to | |
436 | * detach them individually, just clear the list afterwards. | |
437 | */ | |
438 | while (curr != head) { | |
439 | struct timer_list *tmp; | |
440 | ||
441 | tmp = list_entry(curr, struct timer_list, entry); | |
55c888d6 | 442 | BUG_ON(tmp->base != &base->t_base); |
1da177e4 LT |
443 | curr = curr->next; |
444 | internal_add_timer(base, tmp); | |
445 | } | |
446 | INIT_LIST_HEAD(head); | |
447 | ||
448 | return index; | |
449 | } | |
450 | ||
451 | /*** | |
452 | * __run_timers - run all expired timers (if any) on this CPU. | |
453 | * @base: the timer vector to be processed. | |
454 | * | |
455 | * This function cascades all vectors and executes all expired timer | |
456 | * vectors. | |
457 | */ | |
458 | #define INDEX(N) (base->timer_jiffies >> (TVR_BITS + N * TVN_BITS)) & TVN_MASK | |
459 | ||
460 | static inline void __run_timers(tvec_base_t *base) | |
461 | { | |
462 | struct timer_list *timer; | |
463 | ||
55c888d6 | 464 | spin_lock_irq(&base->t_base.lock); |
1da177e4 LT |
465 | while (time_after_eq(jiffies, base->timer_jiffies)) { |
466 | struct list_head work_list = LIST_HEAD_INIT(work_list); | |
467 | struct list_head *head = &work_list; | |
468 | int index = base->timer_jiffies & TVR_MASK; | |
469 | ||
470 | /* | |
471 | * Cascade timers: | |
472 | */ | |
473 | if (!index && | |
474 | (!cascade(base, &base->tv2, INDEX(0))) && | |
475 | (!cascade(base, &base->tv3, INDEX(1))) && | |
476 | !cascade(base, &base->tv4, INDEX(2))) | |
477 | cascade(base, &base->tv5, INDEX(3)); | |
478 | ++base->timer_jiffies; | |
479 | list_splice_init(base->tv1.vec + index, &work_list); | |
55c888d6 | 480 | while (!list_empty(head)) { |
1da177e4 LT |
481 | void (*fn)(unsigned long); |
482 | unsigned long data; | |
483 | ||
484 | timer = list_entry(head->next,struct timer_list,entry); | |
485 | fn = timer->function; | |
486 | data = timer->data; | |
487 | ||
1da177e4 | 488 | set_running_timer(base, timer); |
55c888d6 ON |
489 | detach_timer(timer, 1); |
490 | spin_unlock_irq(&base->t_base.lock); | |
1da177e4 | 491 | { |
be5b4fbd | 492 | int preempt_count = preempt_count(); |
1da177e4 LT |
493 | fn(data); |
494 | if (preempt_count != preempt_count()) { | |
be5b4fbd JJ |
495 | printk(KERN_WARNING "huh, entered %p " |
496 | "with preempt_count %08x, exited" | |
497 | " with %08x?\n", | |
498 | fn, preempt_count, | |
499 | preempt_count()); | |
1da177e4 LT |
500 | BUG(); |
501 | } | |
502 | } | |
55c888d6 | 503 | spin_lock_irq(&base->t_base.lock); |
1da177e4 LT |
504 | } |
505 | } | |
506 | set_running_timer(base, NULL); | |
55c888d6 | 507 | spin_unlock_irq(&base->t_base.lock); |
1da177e4 LT |
508 | } |
509 | ||
510 | #ifdef CONFIG_NO_IDLE_HZ | |
511 | /* | |
512 | * Find out when the next timer event is due to happen. This | |
513 | * is used on S/390 to stop all activity when a cpus is idle. | |
514 | * This functions needs to be called disabled. | |
515 | */ | |
516 | unsigned long next_timer_interrupt(void) | |
517 | { | |
518 | tvec_base_t *base; | |
519 | struct list_head *list; | |
520 | struct timer_list *nte; | |
521 | unsigned long expires; | |
522 | tvec_t *varray[4]; | |
523 | int i, j; | |
524 | ||
525 | base = &__get_cpu_var(tvec_bases); | |
55c888d6 | 526 | spin_lock(&base->t_base.lock); |
1da177e4 LT |
527 | expires = base->timer_jiffies + (LONG_MAX >> 1); |
528 | list = 0; | |
529 | ||
530 | /* Look for timer events in tv1. */ | |
531 | j = base->timer_jiffies & TVR_MASK; | |
532 | do { | |
533 | list_for_each_entry(nte, base->tv1.vec + j, entry) { | |
534 | expires = nte->expires; | |
535 | if (j < (base->timer_jiffies & TVR_MASK)) | |
536 | list = base->tv2.vec + (INDEX(0)); | |
537 | goto found; | |
538 | } | |
539 | j = (j + 1) & TVR_MASK; | |
540 | } while (j != (base->timer_jiffies & TVR_MASK)); | |
541 | ||
542 | /* Check tv2-tv5. */ | |
543 | varray[0] = &base->tv2; | |
544 | varray[1] = &base->tv3; | |
545 | varray[2] = &base->tv4; | |
546 | varray[3] = &base->tv5; | |
547 | for (i = 0; i < 4; i++) { | |
548 | j = INDEX(i); | |
549 | do { | |
550 | if (list_empty(varray[i]->vec + j)) { | |
551 | j = (j + 1) & TVN_MASK; | |
552 | continue; | |
553 | } | |
554 | list_for_each_entry(nte, varray[i]->vec + j, entry) | |
555 | if (time_before(nte->expires, expires)) | |
556 | expires = nte->expires; | |
557 | if (j < (INDEX(i)) && i < 3) | |
558 | list = varray[i + 1]->vec + (INDEX(i + 1)); | |
559 | goto found; | |
560 | } while (j != (INDEX(i))); | |
561 | } | |
562 | found: | |
563 | if (list) { | |
564 | /* | |
565 | * The search wrapped. We need to look at the next list | |
566 | * from next tv element that would cascade into tv element | |
567 | * where we found the timer element. | |
568 | */ | |
569 | list_for_each_entry(nte, list, entry) { | |
570 | if (time_before(nte->expires, expires)) | |
571 | expires = nte->expires; | |
572 | } | |
573 | } | |
55c888d6 | 574 | spin_unlock(&base->t_base.lock); |
1da177e4 LT |
575 | return expires; |
576 | } | |
577 | #endif | |
578 | ||
579 | /******************************************************************/ | |
580 | ||
581 | /* | |
582 | * Timekeeping variables | |
583 | */ | |
584 | unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */ | |
585 | unsigned long tick_nsec = TICK_NSEC; /* ACTHZ period (nsec) */ | |
586 | ||
587 | /* | |
588 | * The current time | |
589 | * wall_to_monotonic is what we need to add to xtime (or xtime corrected | |
590 | * for sub jiffie times) to get to monotonic time. Monotonic is pegged | |
591 | * at zero at system boot time, so wall_to_monotonic will be negative, | |
592 | * however, we will ALWAYS keep the tv_nsec part positive so we can use | |
593 | * the usual normalization. | |
594 | */ | |
595 | struct timespec xtime __attribute__ ((aligned (16))); | |
596 | struct timespec wall_to_monotonic __attribute__ ((aligned (16))); | |
597 | ||
598 | EXPORT_SYMBOL(xtime); | |
599 | ||
600 | /* Don't completely fail for HZ > 500. */ | |
601 | int tickadj = 500/HZ ? : 1; /* microsecs */ | |
602 | ||
603 | ||
604 | /* | |
605 | * phase-lock loop variables | |
606 | */ | |
607 | /* TIME_ERROR prevents overwriting the CMOS clock */ | |
608 | int time_state = TIME_OK; /* clock synchronization status */ | |
609 | int time_status = STA_UNSYNC; /* clock status bits */ | |
610 | long time_offset; /* time adjustment (us) */ | |
611 | long time_constant = 2; /* pll time constant */ | |
612 | long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */ | |
613 | long time_precision = 1; /* clock precision (us) */ | |
614 | long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ | |
615 | long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ | |
616 | static long time_phase; /* phase offset (scaled us) */ | |
617 | long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC; | |
618 | /* frequency offset (scaled ppm)*/ | |
619 | static long time_adj; /* tick adjust (scaled 1 / HZ) */ | |
620 | long time_reftime; /* time at last adjustment (s) */ | |
621 | long time_adjust; | |
622 | long time_next_adjust; | |
623 | ||
624 | /* | |
625 | * this routine handles the overflow of the microsecond field | |
626 | * | |
627 | * The tricky bits of code to handle the accurate clock support | |
628 | * were provided by Dave Mills ([email protected]) of NTP fame. | |
629 | * They were originally developed for SUN and DEC kernels. | |
630 | * All the kudos should go to Dave for this stuff. | |
631 | * | |
632 | */ | |
633 | static void second_overflow(void) | |
634 | { | |
635 | long ltemp; | |
636 | ||
637 | /* Bump the maxerror field */ | |
638 | time_maxerror += time_tolerance >> SHIFT_USEC; | |
639 | if ( time_maxerror > NTP_PHASE_LIMIT ) { | |
640 | time_maxerror = NTP_PHASE_LIMIT; | |
641 | time_status |= STA_UNSYNC; | |
642 | } | |
643 | ||
644 | /* | |
645 | * Leap second processing. If in leap-insert state at | |
646 | * the end of the day, the system clock is set back one | |
647 | * second; if in leap-delete state, the system clock is | |
648 | * set ahead one second. The microtime() routine or | |
649 | * external clock driver will insure that reported time | |
650 | * is always monotonic. The ugly divides should be | |
651 | * replaced. | |
652 | */ | |
653 | switch (time_state) { | |
654 | ||
655 | case TIME_OK: | |
656 | if (time_status & STA_INS) | |
657 | time_state = TIME_INS; | |
658 | else if (time_status & STA_DEL) | |
659 | time_state = TIME_DEL; | |
660 | break; | |
661 | ||
662 | case TIME_INS: | |
663 | if (xtime.tv_sec % 86400 == 0) { | |
664 | xtime.tv_sec--; | |
665 | wall_to_monotonic.tv_sec++; | |
666 | /* The timer interpolator will make time change gradually instead | |
667 | * of an immediate jump by one second. | |
668 | */ | |
669 | time_interpolator_update(-NSEC_PER_SEC); | |
670 | time_state = TIME_OOP; | |
671 | clock_was_set(); | |
672 | printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n"); | |
673 | } | |
674 | break; | |
675 | ||
676 | case TIME_DEL: | |
677 | if ((xtime.tv_sec + 1) % 86400 == 0) { | |
678 | xtime.tv_sec++; | |
679 | wall_to_monotonic.tv_sec--; | |
680 | /* Use of time interpolator for a gradual change of time */ | |
681 | time_interpolator_update(NSEC_PER_SEC); | |
682 | time_state = TIME_WAIT; | |
683 | clock_was_set(); | |
684 | printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n"); | |
685 | } | |
686 | break; | |
687 | ||
688 | case TIME_OOP: | |
689 | time_state = TIME_WAIT; | |
690 | break; | |
691 | ||
692 | case TIME_WAIT: | |
693 | if (!(time_status & (STA_INS | STA_DEL))) | |
694 | time_state = TIME_OK; | |
695 | } | |
696 | ||
697 | /* | |
698 | * Compute the phase adjustment for the next second. In | |
699 | * PLL mode, the offset is reduced by a fixed factor | |
700 | * times the time constant. In FLL mode the offset is | |
701 | * used directly. In either mode, the maximum phase | |
702 | * adjustment for each second is clamped so as to spread | |
703 | * the adjustment over not more than the number of | |
704 | * seconds between updates. | |
705 | */ | |
706 | if (time_offset < 0) { | |
707 | ltemp = -time_offset; | |
708 | if (!(time_status & STA_FLL)) | |
709 | ltemp >>= SHIFT_KG + time_constant; | |
710 | if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE) | |
711 | ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE; | |
712 | time_offset += ltemp; | |
713 | time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); | |
714 | } else { | |
715 | ltemp = time_offset; | |
716 | if (!(time_status & STA_FLL)) | |
717 | ltemp >>= SHIFT_KG + time_constant; | |
718 | if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE) | |
719 | ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE; | |
720 | time_offset -= ltemp; | |
721 | time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); | |
722 | } | |
723 | ||
724 | /* | |
725 | * Compute the frequency estimate and additional phase | |
726 | * adjustment due to frequency error for the next | |
727 | * second. When the PPS signal is engaged, gnaw on the | |
728 | * watchdog counter and update the frequency computed by | |
729 | * the pll and the PPS signal. | |
730 | */ | |
731 | pps_valid++; | |
732 | if (pps_valid == PPS_VALID) { /* PPS signal lost */ | |
733 | pps_jitter = MAXTIME; | |
734 | pps_stabil = MAXFREQ; | |
735 | time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | | |
736 | STA_PPSWANDER | STA_PPSERROR); | |
737 | } | |
738 | ltemp = time_freq + pps_freq; | |
739 | if (ltemp < 0) | |
740 | time_adj -= -ltemp >> | |
741 | (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE); | |
742 | else | |
743 | time_adj += ltemp >> | |
744 | (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE); | |
745 | ||
746 | #if HZ == 100 | |
747 | /* Compensate for (HZ==100) != (1 << SHIFT_HZ). | |
748 | * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14) | |
749 | */ | |
750 | if (time_adj < 0) | |
751 | time_adj -= (-time_adj >> 2) + (-time_adj >> 5); | |
752 | else | |
753 | time_adj += (time_adj >> 2) + (time_adj >> 5); | |
754 | #endif | |
755 | #if HZ == 1000 | |
756 | /* Compensate for (HZ==1000) != (1 << SHIFT_HZ). | |
757 | * Add 1.5625% and 0.78125% to get 1023.4375; => only 0.05% error (p. 14) | |
758 | */ | |
759 | if (time_adj < 0) | |
760 | time_adj -= (-time_adj >> 6) + (-time_adj >> 7); | |
761 | else | |
762 | time_adj += (time_adj >> 6) + (time_adj >> 7); | |
763 | #endif | |
764 | } | |
765 | ||
766 | /* in the NTP reference this is called "hardclock()" */ | |
767 | static void update_wall_time_one_tick(void) | |
768 | { | |
769 | long time_adjust_step, delta_nsec; | |
770 | ||
771 | if ( (time_adjust_step = time_adjust) != 0 ) { | |
772 | /* We are doing an adjtime thing. | |
773 | * | |
774 | * Prepare time_adjust_step to be within bounds. | |
775 | * Note that a positive time_adjust means we want the clock | |
776 | * to run faster. | |
777 | * | |
778 | * Limit the amount of the step to be in the range | |
779 | * -tickadj .. +tickadj | |
780 | */ | |
781 | if (time_adjust > tickadj) | |
782 | time_adjust_step = tickadj; | |
783 | else if (time_adjust < -tickadj) | |
784 | time_adjust_step = -tickadj; | |
785 | ||
786 | /* Reduce by this step the amount of time left */ | |
787 | time_adjust -= time_adjust_step; | |
788 | } | |
789 | delta_nsec = tick_nsec + time_adjust_step * 1000; | |
790 | /* | |
791 | * Advance the phase, once it gets to one microsecond, then | |
792 | * advance the tick more. | |
793 | */ | |
794 | time_phase += time_adj; | |
795 | if (time_phase <= -FINENSEC) { | |
796 | long ltemp = -time_phase >> (SHIFT_SCALE - 10); | |
797 | time_phase += ltemp << (SHIFT_SCALE - 10); | |
798 | delta_nsec -= ltemp; | |
799 | } | |
800 | else if (time_phase >= FINENSEC) { | |
801 | long ltemp = time_phase >> (SHIFT_SCALE - 10); | |
802 | time_phase -= ltemp << (SHIFT_SCALE - 10); | |
803 | delta_nsec += ltemp; | |
804 | } | |
805 | xtime.tv_nsec += delta_nsec; | |
806 | time_interpolator_update(delta_nsec); | |
807 | ||
808 | /* Changes by adjtime() do not take effect till next tick. */ | |
809 | if (time_next_adjust != 0) { | |
810 | time_adjust = time_next_adjust; | |
811 | time_next_adjust = 0; | |
812 | } | |
813 | } | |
814 | ||
815 | /* | |
816 | * Using a loop looks inefficient, but "ticks" is | |
817 | * usually just one (we shouldn't be losing ticks, | |
818 | * we're doing this this way mainly for interrupt | |
819 | * latency reasons, not because we think we'll | |
820 | * have lots of lost timer ticks | |
821 | */ | |
822 | static void update_wall_time(unsigned long ticks) | |
823 | { | |
824 | do { | |
825 | ticks--; | |
826 | update_wall_time_one_tick(); | |
827 | if (xtime.tv_nsec >= 1000000000) { | |
828 | xtime.tv_nsec -= 1000000000; | |
829 | xtime.tv_sec++; | |
830 | second_overflow(); | |
831 | } | |
832 | } while (ticks); | |
833 | } | |
834 | ||
835 | /* | |
836 | * Called from the timer interrupt handler to charge one tick to the current | |
837 | * process. user_tick is 1 if the tick is user time, 0 for system. | |
838 | */ | |
839 | void update_process_times(int user_tick) | |
840 | { | |
841 | struct task_struct *p = current; | |
842 | int cpu = smp_processor_id(); | |
843 | ||
844 | /* Note: this timer irq context must be accounted for as well. */ | |
845 | if (user_tick) | |
846 | account_user_time(p, jiffies_to_cputime(1)); | |
847 | else | |
848 | account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1)); | |
849 | run_local_timers(); | |
850 | if (rcu_pending(cpu)) | |
851 | rcu_check_callbacks(cpu, user_tick); | |
852 | scheduler_tick(); | |
853 | run_posix_cpu_timers(p); | |
854 | } | |
855 | ||
856 | /* | |
857 | * Nr of active tasks - counted in fixed-point numbers | |
858 | */ | |
859 | static unsigned long count_active_tasks(void) | |
860 | { | |
861 | return (nr_running() + nr_uninterruptible()) * FIXED_1; | |
862 | } | |
863 | ||
864 | /* | |
865 | * Hmm.. Changed this, as the GNU make sources (load.c) seems to | |
866 | * imply that avenrun[] is the standard name for this kind of thing. | |
867 | * Nothing else seems to be standardized: the fractional size etc | |
868 | * all seem to differ on different machines. | |
869 | * | |
870 | * Requires xtime_lock to access. | |
871 | */ | |
872 | unsigned long avenrun[3]; | |
873 | ||
874 | EXPORT_SYMBOL(avenrun); | |
875 | ||
876 | /* | |
877 | * calc_load - given tick count, update the avenrun load estimates. | |
878 | * This is called while holding a write_lock on xtime_lock. | |
879 | */ | |
880 | static inline void calc_load(unsigned long ticks) | |
881 | { | |
882 | unsigned long active_tasks; /* fixed-point */ | |
883 | static int count = LOAD_FREQ; | |
884 | ||
885 | count -= ticks; | |
886 | if (count < 0) { | |
887 | count += LOAD_FREQ; | |
888 | active_tasks = count_active_tasks(); | |
889 | CALC_LOAD(avenrun[0], EXP_1, active_tasks); | |
890 | CALC_LOAD(avenrun[1], EXP_5, active_tasks); | |
891 | CALC_LOAD(avenrun[2], EXP_15, active_tasks); | |
892 | } | |
893 | } | |
894 | ||
895 | /* jiffies at the most recent update of wall time */ | |
896 | unsigned long wall_jiffies = INITIAL_JIFFIES; | |
897 | ||
898 | /* | |
899 | * This read-write spinlock protects us from races in SMP while | |
900 | * playing with xtime and avenrun. | |
901 | */ | |
902 | #ifndef ARCH_HAVE_XTIME_LOCK | |
903 | seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; | |
904 | ||
905 | EXPORT_SYMBOL(xtime_lock); | |
906 | #endif | |
907 | ||
908 | /* | |
909 | * This function runs timers and the timer-tq in bottom half context. | |
910 | */ | |
911 | static void run_timer_softirq(struct softirq_action *h) | |
912 | { | |
913 | tvec_base_t *base = &__get_cpu_var(tvec_bases); | |
914 | ||
915 | if (time_after_eq(jiffies, base->timer_jiffies)) | |
916 | __run_timers(base); | |
917 | } | |
918 | ||
919 | /* | |
920 | * Called by the local, per-CPU timer interrupt on SMP. | |
921 | */ | |
922 | void run_local_timers(void) | |
923 | { | |
924 | raise_softirq(TIMER_SOFTIRQ); | |
925 | } | |
926 | ||
927 | /* | |
928 | * Called by the timer interrupt. xtime_lock must already be taken | |
929 | * by the timer IRQ! | |
930 | */ | |
931 | static inline void update_times(void) | |
932 | { | |
933 | unsigned long ticks; | |
934 | ||
935 | ticks = jiffies - wall_jiffies; | |
936 | if (ticks) { | |
937 | wall_jiffies += ticks; | |
938 | update_wall_time(ticks); | |
939 | } | |
940 | calc_load(ticks); | |
941 | } | |
942 | ||
943 | /* | |
944 | * The 64-bit jiffies value is not atomic - you MUST NOT read it | |
945 | * without sampling the sequence number in xtime_lock. | |
946 | * jiffies is defined in the linker script... | |
947 | */ | |
948 | ||
949 | void do_timer(struct pt_regs *regs) | |
950 | { | |
951 | jiffies_64++; | |
952 | update_times(); | |
953 | } | |
954 | ||
955 | #ifdef __ARCH_WANT_SYS_ALARM | |
956 | ||
957 | /* | |
958 | * For backwards compatibility? This can be done in libc so Alpha | |
959 | * and all newer ports shouldn't need it. | |
960 | */ | |
961 | asmlinkage unsigned long sys_alarm(unsigned int seconds) | |
962 | { | |
963 | struct itimerval it_new, it_old; | |
964 | unsigned int oldalarm; | |
965 | ||
966 | it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0; | |
967 | it_new.it_value.tv_sec = seconds; | |
968 | it_new.it_value.tv_usec = 0; | |
969 | do_setitimer(ITIMER_REAL, &it_new, &it_old); | |
970 | oldalarm = it_old.it_value.tv_sec; | |
971 | /* ehhh.. We can't return 0 if we have an alarm pending.. */ | |
972 | /* And we'd better return too much than too little anyway */ | |
973 | if ((!oldalarm && it_old.it_value.tv_usec) || it_old.it_value.tv_usec >= 500000) | |
974 | oldalarm++; | |
975 | return oldalarm; | |
976 | } | |
977 | ||
978 | #endif | |
979 | ||
980 | #ifndef __alpha__ | |
981 | ||
982 | /* | |
983 | * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this | |
984 | * should be moved into arch/i386 instead? | |
985 | */ | |
986 | ||
987 | /** | |
988 | * sys_getpid - return the thread group id of the current process | |
989 | * | |
990 | * Note, despite the name, this returns the tgid not the pid. The tgid and | |
991 | * the pid are identical unless CLONE_THREAD was specified on clone() in | |
992 | * which case the tgid is the same in all threads of the same group. | |
993 | * | |
994 | * This is SMP safe as current->tgid does not change. | |
995 | */ | |
996 | asmlinkage long sys_getpid(void) | |
997 | { | |
998 | return current->tgid; | |
999 | } | |
1000 | ||
1001 | /* | |
1002 | * Accessing ->group_leader->real_parent is not SMP-safe, it could | |
1003 | * change from under us. However, rather than getting any lock | |
1004 | * we can use an optimistic algorithm: get the parent | |
1005 | * pid, and go back and check that the parent is still | |
1006 | * the same. If it has changed (which is extremely unlikely | |
1007 | * indeed), we just try again.. | |
1008 | * | |
1009 | * NOTE! This depends on the fact that even if we _do_ | |
1010 | * get an old value of "parent", we can happily dereference | |
1011 | * the pointer (it was and remains a dereferencable kernel pointer | |
1012 | * no matter what): we just can't necessarily trust the result | |
1013 | * until we know that the parent pointer is valid. | |
1014 | * | |
1015 | * NOTE2: ->group_leader never changes from under us. | |
1016 | */ | |
1017 | asmlinkage long sys_getppid(void) | |
1018 | { | |
1019 | int pid; | |
1020 | struct task_struct *me = current; | |
1021 | struct task_struct *parent; | |
1022 | ||
1023 | parent = me->group_leader->real_parent; | |
1024 | for (;;) { | |
1025 | pid = parent->tgid; | |
1026 | #ifdef CONFIG_SMP | |
1027 | { | |
1028 | struct task_struct *old = parent; | |
1029 | ||
1030 | /* | |
1031 | * Make sure we read the pid before re-reading the | |
1032 | * parent pointer: | |
1033 | */ | |
d59dd462 | 1034 | smp_rmb(); |
1da177e4 LT |
1035 | parent = me->group_leader->real_parent; |
1036 | if (old != parent) | |
1037 | continue; | |
1038 | } | |
1039 | #endif | |
1040 | break; | |
1041 | } | |
1042 | return pid; | |
1043 | } | |
1044 | ||
1045 | asmlinkage long sys_getuid(void) | |
1046 | { | |
1047 | /* Only we change this so SMP safe */ | |
1048 | return current->uid; | |
1049 | } | |
1050 | ||
1051 | asmlinkage long sys_geteuid(void) | |
1052 | { | |
1053 | /* Only we change this so SMP safe */ | |
1054 | return current->euid; | |
1055 | } | |
1056 | ||
1057 | asmlinkage long sys_getgid(void) | |
1058 | { | |
1059 | /* Only we change this so SMP safe */ | |
1060 | return current->gid; | |
1061 | } | |
1062 | ||
1063 | asmlinkage long sys_getegid(void) | |
1064 | { | |
1065 | /* Only we change this so SMP safe */ | |
1066 | return current->egid; | |
1067 | } | |
1068 | ||
1069 | #endif | |
1070 | ||
1071 | static void process_timeout(unsigned long __data) | |
1072 | { | |
1073 | wake_up_process((task_t *)__data); | |
1074 | } | |
1075 | ||
1076 | /** | |
1077 | * schedule_timeout - sleep until timeout | |
1078 | * @timeout: timeout value in jiffies | |
1079 | * | |
1080 | * Make the current task sleep until @timeout jiffies have | |
1081 | * elapsed. The routine will return immediately unless | |
1082 | * the current task state has been set (see set_current_state()). | |
1083 | * | |
1084 | * You can set the task state as follows - | |
1085 | * | |
1086 | * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to | |
1087 | * pass before the routine returns. The routine will return 0 | |
1088 | * | |
1089 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
1090 | * delivered to the current task. In this case the remaining time | |
1091 | * in jiffies will be returned, or 0 if the timer expired in time | |
1092 | * | |
1093 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1094 | * routine returns. | |
1095 | * | |
1096 | * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule | |
1097 | * the CPU away without a bound on the timeout. In this case the return | |
1098 | * value will be %MAX_SCHEDULE_TIMEOUT. | |
1099 | * | |
1100 | * In all cases the return value is guaranteed to be non-negative. | |
1101 | */ | |
1102 | fastcall signed long __sched schedule_timeout(signed long timeout) | |
1103 | { | |
1104 | struct timer_list timer; | |
1105 | unsigned long expire; | |
1106 | ||
1107 | switch (timeout) | |
1108 | { | |
1109 | case MAX_SCHEDULE_TIMEOUT: | |
1110 | /* | |
1111 | * These two special cases are useful to be comfortable | |
1112 | * in the caller. Nothing more. We could take | |
1113 | * MAX_SCHEDULE_TIMEOUT from one of the negative value | |
1114 | * but I' d like to return a valid offset (>=0) to allow | |
1115 | * the caller to do everything it want with the retval. | |
1116 | */ | |
1117 | schedule(); | |
1118 | goto out; | |
1119 | default: | |
1120 | /* | |
1121 | * Another bit of PARANOID. Note that the retval will be | |
1122 | * 0 since no piece of kernel is supposed to do a check | |
1123 | * for a negative retval of schedule_timeout() (since it | |
1124 | * should never happens anyway). You just have the printk() | |
1125 | * that will tell you if something is gone wrong and where. | |
1126 | */ | |
1127 | if (timeout < 0) | |
1128 | { | |
1129 | printk(KERN_ERR "schedule_timeout: wrong timeout " | |
1130 | "value %lx from %p\n", timeout, | |
1131 | __builtin_return_address(0)); | |
1132 | current->state = TASK_RUNNING; | |
1133 | goto out; | |
1134 | } | |
1135 | } | |
1136 | ||
1137 | expire = timeout + jiffies; | |
1138 | ||
1139 | init_timer(&timer); | |
1140 | timer.expires = expire; | |
1141 | timer.data = (unsigned long) current; | |
1142 | timer.function = process_timeout; | |
1143 | ||
1144 | add_timer(&timer); | |
1145 | schedule(); | |
1146 | del_singleshot_timer_sync(&timer); | |
1147 | ||
1148 | timeout = expire - jiffies; | |
1149 | ||
1150 | out: | |
1151 | return timeout < 0 ? 0 : timeout; | |
1152 | } | |
1153 | ||
1154 | EXPORT_SYMBOL(schedule_timeout); | |
1155 | ||
1156 | /* Thread ID - the internal kernel "pid" */ | |
1157 | asmlinkage long sys_gettid(void) | |
1158 | { | |
1159 | return current->pid; | |
1160 | } | |
1161 | ||
1162 | static long __sched nanosleep_restart(struct restart_block *restart) | |
1163 | { | |
1164 | unsigned long expire = restart->arg0, now = jiffies; | |
1165 | struct timespec __user *rmtp = (struct timespec __user *) restart->arg1; | |
1166 | long ret; | |
1167 | ||
1168 | /* Did it expire while we handled signals? */ | |
1169 | if (!time_after(expire, now)) | |
1170 | return 0; | |
1171 | ||
1172 | current->state = TASK_INTERRUPTIBLE; | |
1173 | expire = schedule_timeout(expire - now); | |
1174 | ||
1175 | ret = 0; | |
1176 | if (expire) { | |
1177 | struct timespec t; | |
1178 | jiffies_to_timespec(expire, &t); | |
1179 | ||
1180 | ret = -ERESTART_RESTARTBLOCK; | |
1181 | if (rmtp && copy_to_user(rmtp, &t, sizeof(t))) | |
1182 | ret = -EFAULT; | |
1183 | /* The 'restart' block is already filled in */ | |
1184 | } | |
1185 | return ret; | |
1186 | } | |
1187 | ||
1188 | asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp) | |
1189 | { | |
1190 | struct timespec t; | |
1191 | unsigned long expire; | |
1192 | long ret; | |
1193 | ||
1194 | if (copy_from_user(&t, rqtp, sizeof(t))) | |
1195 | return -EFAULT; | |
1196 | ||
1197 | if ((t.tv_nsec >= 1000000000L) || (t.tv_nsec < 0) || (t.tv_sec < 0)) | |
1198 | return -EINVAL; | |
1199 | ||
1200 | expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec); | |
1201 | current->state = TASK_INTERRUPTIBLE; | |
1202 | expire = schedule_timeout(expire); | |
1203 | ||
1204 | ret = 0; | |
1205 | if (expire) { | |
1206 | struct restart_block *restart; | |
1207 | jiffies_to_timespec(expire, &t); | |
1208 | if (rmtp && copy_to_user(rmtp, &t, sizeof(t))) | |
1209 | return -EFAULT; | |
1210 | ||
1211 | restart = ¤t_thread_info()->restart_block; | |
1212 | restart->fn = nanosleep_restart; | |
1213 | restart->arg0 = jiffies + expire; | |
1214 | restart->arg1 = (unsigned long) rmtp; | |
1215 | ret = -ERESTART_RESTARTBLOCK; | |
1216 | } | |
1217 | return ret; | |
1218 | } | |
1219 | ||
1220 | /* | |
1221 | * sys_sysinfo - fill in sysinfo struct | |
1222 | */ | |
1223 | asmlinkage long sys_sysinfo(struct sysinfo __user *info) | |
1224 | { | |
1225 | struct sysinfo val; | |
1226 | unsigned long mem_total, sav_total; | |
1227 | unsigned int mem_unit, bitcount; | |
1228 | unsigned long seq; | |
1229 | ||
1230 | memset((char *)&val, 0, sizeof(struct sysinfo)); | |
1231 | ||
1232 | do { | |
1233 | struct timespec tp; | |
1234 | seq = read_seqbegin(&xtime_lock); | |
1235 | ||
1236 | /* | |
1237 | * This is annoying. The below is the same thing | |
1238 | * posix_get_clock_monotonic() does, but it wants to | |
1239 | * take the lock which we want to cover the loads stuff | |
1240 | * too. | |
1241 | */ | |
1242 | ||
1243 | getnstimeofday(&tp); | |
1244 | tp.tv_sec += wall_to_monotonic.tv_sec; | |
1245 | tp.tv_nsec += wall_to_monotonic.tv_nsec; | |
1246 | if (tp.tv_nsec - NSEC_PER_SEC >= 0) { | |
1247 | tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC; | |
1248 | tp.tv_sec++; | |
1249 | } | |
1250 | val.uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); | |
1251 | ||
1252 | val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); | |
1253 | val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT); | |
1254 | val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT); | |
1255 | ||
1256 | val.procs = nr_threads; | |
1257 | } while (read_seqretry(&xtime_lock, seq)); | |
1258 | ||
1259 | si_meminfo(&val); | |
1260 | si_swapinfo(&val); | |
1261 | ||
1262 | /* | |
1263 | * If the sum of all the available memory (i.e. ram + swap) | |
1264 | * is less than can be stored in a 32 bit unsigned long then | |
1265 | * we can be binary compatible with 2.2.x kernels. If not, | |
1266 | * well, in that case 2.2.x was broken anyways... | |
1267 | * | |
1268 | * -Erik Andersen <[email protected]> | |
1269 | */ | |
1270 | ||
1271 | mem_total = val.totalram + val.totalswap; | |
1272 | if (mem_total < val.totalram || mem_total < val.totalswap) | |
1273 | goto out; | |
1274 | bitcount = 0; | |
1275 | mem_unit = val.mem_unit; | |
1276 | while (mem_unit > 1) { | |
1277 | bitcount++; | |
1278 | mem_unit >>= 1; | |
1279 | sav_total = mem_total; | |
1280 | mem_total <<= 1; | |
1281 | if (mem_total < sav_total) | |
1282 | goto out; | |
1283 | } | |
1284 | ||
1285 | /* | |
1286 | * If mem_total did not overflow, multiply all memory values by | |
1287 | * val.mem_unit and set it to 1. This leaves things compatible | |
1288 | * with 2.2.x, and also retains compatibility with earlier 2.4.x | |
1289 | * kernels... | |
1290 | */ | |
1291 | ||
1292 | val.mem_unit = 1; | |
1293 | val.totalram <<= bitcount; | |
1294 | val.freeram <<= bitcount; | |
1295 | val.sharedram <<= bitcount; | |
1296 | val.bufferram <<= bitcount; | |
1297 | val.totalswap <<= bitcount; | |
1298 | val.freeswap <<= bitcount; | |
1299 | val.totalhigh <<= bitcount; | |
1300 | val.freehigh <<= bitcount; | |
1301 | ||
1302 | out: | |
1303 | if (copy_to_user(info, &val, sizeof(struct sysinfo))) | |
1304 | return -EFAULT; | |
1305 | ||
1306 | return 0; | |
1307 | } | |
1308 | ||
1309 | static void __devinit init_timers_cpu(int cpu) | |
1310 | { | |
1311 | int j; | |
1312 | tvec_base_t *base; | |
55c888d6 | 1313 | |
1da177e4 | 1314 | base = &per_cpu(tvec_bases, cpu); |
55c888d6 | 1315 | spin_lock_init(&base->t_base.lock); |
1da177e4 LT |
1316 | for (j = 0; j < TVN_SIZE; j++) { |
1317 | INIT_LIST_HEAD(base->tv5.vec + j); | |
1318 | INIT_LIST_HEAD(base->tv4.vec + j); | |
1319 | INIT_LIST_HEAD(base->tv3.vec + j); | |
1320 | INIT_LIST_HEAD(base->tv2.vec + j); | |
1321 | } | |
1322 | for (j = 0; j < TVR_SIZE; j++) | |
1323 | INIT_LIST_HEAD(base->tv1.vec + j); | |
1324 | ||
1325 | base->timer_jiffies = jiffies; | |
1326 | } | |
1327 | ||
1328 | #ifdef CONFIG_HOTPLUG_CPU | |
55c888d6 | 1329 | static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head) |
1da177e4 LT |
1330 | { |
1331 | struct timer_list *timer; | |
1332 | ||
1333 | while (!list_empty(head)) { | |
1334 | timer = list_entry(head->next, struct timer_list, entry); | |
55c888d6 ON |
1335 | detach_timer(timer, 0); |
1336 | timer->base = &new_base->t_base; | |
1da177e4 | 1337 | internal_add_timer(new_base, timer); |
1da177e4 | 1338 | } |
1da177e4 LT |
1339 | } |
1340 | ||
1341 | static void __devinit migrate_timers(int cpu) | |
1342 | { | |
1343 | tvec_base_t *old_base; | |
1344 | tvec_base_t *new_base; | |
1345 | int i; | |
1346 | ||
1347 | BUG_ON(cpu_online(cpu)); | |
1348 | old_base = &per_cpu(tvec_bases, cpu); | |
1349 | new_base = &get_cpu_var(tvec_bases); | |
1350 | ||
1351 | local_irq_disable(); | |
55c888d6 ON |
1352 | spin_lock(&new_base->t_base.lock); |
1353 | spin_lock(&old_base->t_base.lock); | |
1da177e4 | 1354 | |
55c888d6 | 1355 | if (old_base->t_base.running_timer) |
1da177e4 LT |
1356 | BUG(); |
1357 | for (i = 0; i < TVR_SIZE; i++) | |
55c888d6 ON |
1358 | migrate_timer_list(new_base, old_base->tv1.vec + i); |
1359 | for (i = 0; i < TVN_SIZE; i++) { | |
1360 | migrate_timer_list(new_base, old_base->tv2.vec + i); | |
1361 | migrate_timer_list(new_base, old_base->tv3.vec + i); | |
1362 | migrate_timer_list(new_base, old_base->tv4.vec + i); | |
1363 | migrate_timer_list(new_base, old_base->tv5.vec + i); | |
1364 | } | |
1365 | ||
1366 | spin_unlock(&old_base->t_base.lock); | |
1367 | spin_unlock(&new_base->t_base.lock); | |
1da177e4 LT |
1368 | local_irq_enable(); |
1369 | put_cpu_var(tvec_bases); | |
1da177e4 LT |
1370 | } |
1371 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1372 | ||
1373 | static int __devinit timer_cpu_notify(struct notifier_block *self, | |
1374 | unsigned long action, void *hcpu) | |
1375 | { | |
1376 | long cpu = (long)hcpu; | |
1377 | switch(action) { | |
1378 | case CPU_UP_PREPARE: | |
1379 | init_timers_cpu(cpu); | |
1380 | break; | |
1381 | #ifdef CONFIG_HOTPLUG_CPU | |
1382 | case CPU_DEAD: | |
1383 | migrate_timers(cpu); | |
1384 | break; | |
1385 | #endif | |
1386 | default: | |
1387 | break; | |
1388 | } | |
1389 | return NOTIFY_OK; | |
1390 | } | |
1391 | ||
1392 | static struct notifier_block __devinitdata timers_nb = { | |
1393 | .notifier_call = timer_cpu_notify, | |
1394 | }; | |
1395 | ||
1396 | ||
1397 | void __init init_timers(void) | |
1398 | { | |
1399 | timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, | |
1400 | (void *)(long)smp_processor_id()); | |
1401 | register_cpu_notifier(&timers_nb); | |
1402 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL); | |
1403 | } | |
1404 | ||
1405 | #ifdef CONFIG_TIME_INTERPOLATION | |
1406 | ||
1407 | struct time_interpolator *time_interpolator; | |
1408 | static struct time_interpolator *time_interpolator_list; | |
1409 | static DEFINE_SPINLOCK(time_interpolator_lock); | |
1410 | ||
1411 | static inline u64 time_interpolator_get_cycles(unsigned int src) | |
1412 | { | |
1413 | unsigned long (*x)(void); | |
1414 | ||
1415 | switch (src) | |
1416 | { | |
1417 | case TIME_SOURCE_FUNCTION: | |
1418 | x = time_interpolator->addr; | |
1419 | return x(); | |
1420 | ||
1421 | case TIME_SOURCE_MMIO64 : | |
1422 | return readq((void __iomem *) time_interpolator->addr); | |
1423 | ||
1424 | case TIME_SOURCE_MMIO32 : | |
1425 | return readl((void __iomem *) time_interpolator->addr); | |
1426 | ||
1427 | default: return get_cycles(); | |
1428 | } | |
1429 | } | |
1430 | ||
1431 | static inline u64 time_interpolator_get_counter(void) | |
1432 | { | |
1433 | unsigned int src = time_interpolator->source; | |
1434 | ||
1435 | if (time_interpolator->jitter) | |
1436 | { | |
1437 | u64 lcycle; | |
1438 | u64 now; | |
1439 | ||
1440 | do { | |
1441 | lcycle = time_interpolator->last_cycle; | |
1442 | now = time_interpolator_get_cycles(src); | |
1443 | if (lcycle && time_after(lcycle, now)) | |
1444 | return lcycle; | |
1445 | /* Keep track of the last timer value returned. The use of cmpxchg here | |
1446 | * will cause contention in an SMP environment. | |
1447 | */ | |
1448 | } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle)); | |
1449 | return now; | |
1450 | } | |
1451 | else | |
1452 | return time_interpolator_get_cycles(src); | |
1453 | } | |
1454 | ||
1455 | void time_interpolator_reset(void) | |
1456 | { | |
1457 | time_interpolator->offset = 0; | |
1458 | time_interpolator->last_counter = time_interpolator_get_counter(); | |
1459 | } | |
1460 | ||
1461 | #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift) | |
1462 | ||
1463 | unsigned long time_interpolator_get_offset(void) | |
1464 | { | |
1465 | /* If we do not have a time interpolator set up then just return zero */ | |
1466 | if (!time_interpolator) | |
1467 | return 0; | |
1468 | ||
1469 | return time_interpolator->offset + | |
1470 | GET_TI_NSECS(time_interpolator_get_counter(), time_interpolator); | |
1471 | } | |
1472 | ||
1473 | #define INTERPOLATOR_ADJUST 65536 | |
1474 | #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST | |
1475 | ||
1476 | static void time_interpolator_update(long delta_nsec) | |
1477 | { | |
1478 | u64 counter; | |
1479 | unsigned long offset; | |
1480 | ||
1481 | /* If there is no time interpolator set up then do nothing */ | |
1482 | if (!time_interpolator) | |
1483 | return; | |
1484 | ||
1485 | /* The interpolator compensates for late ticks by accumulating | |
1486 | * the late time in time_interpolator->offset. A tick earlier than | |
1487 | * expected will lead to a reset of the offset and a corresponding | |
1488 | * jump of the clock forward. Again this only works if the | |
1489 | * interpolator clock is running slightly slower than the regular clock | |
1490 | * and the tuning logic insures that. | |
1491 | */ | |
1492 | ||
1493 | counter = time_interpolator_get_counter(); | |
1494 | offset = time_interpolator->offset + GET_TI_NSECS(counter, time_interpolator); | |
1495 | ||
1496 | if (delta_nsec < 0 || (unsigned long) delta_nsec < offset) | |
1497 | time_interpolator->offset = offset - delta_nsec; | |
1498 | else { | |
1499 | time_interpolator->skips++; | |
1500 | time_interpolator->ns_skipped += delta_nsec - offset; | |
1501 | time_interpolator->offset = 0; | |
1502 | } | |
1503 | time_interpolator->last_counter = counter; | |
1504 | ||
1505 | /* Tuning logic for time interpolator invoked every minute or so. | |
1506 | * Decrease interpolator clock speed if no skips occurred and an offset is carried. | |
1507 | * Increase interpolator clock speed if we skip too much time. | |
1508 | */ | |
1509 | if (jiffies % INTERPOLATOR_ADJUST == 0) | |
1510 | { | |
1511 | if (time_interpolator->skips == 0 && time_interpolator->offset > TICK_NSEC) | |
1512 | time_interpolator->nsec_per_cyc--; | |
1513 | if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0) | |
1514 | time_interpolator->nsec_per_cyc++; | |
1515 | time_interpolator->skips = 0; | |
1516 | time_interpolator->ns_skipped = 0; | |
1517 | } | |
1518 | } | |
1519 | ||
1520 | static inline int | |
1521 | is_better_time_interpolator(struct time_interpolator *new) | |
1522 | { | |
1523 | if (!time_interpolator) | |
1524 | return 1; | |
1525 | return new->frequency > 2*time_interpolator->frequency || | |
1526 | (unsigned long)new->drift < (unsigned long)time_interpolator->drift; | |
1527 | } | |
1528 | ||
1529 | void | |
1530 | register_time_interpolator(struct time_interpolator *ti) | |
1531 | { | |
1532 | unsigned long flags; | |
1533 | ||
1534 | /* Sanity check */ | |
1535 | if (ti->frequency == 0 || ti->mask == 0) | |
1536 | BUG(); | |
1537 | ||
1538 | ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency; | |
1539 | spin_lock(&time_interpolator_lock); | |
1540 | write_seqlock_irqsave(&xtime_lock, flags); | |
1541 | if (is_better_time_interpolator(ti)) { | |
1542 | time_interpolator = ti; | |
1543 | time_interpolator_reset(); | |
1544 | } | |
1545 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
1546 | ||
1547 | ti->next = time_interpolator_list; | |
1548 | time_interpolator_list = ti; | |
1549 | spin_unlock(&time_interpolator_lock); | |
1550 | } | |
1551 | ||
1552 | void | |
1553 | unregister_time_interpolator(struct time_interpolator *ti) | |
1554 | { | |
1555 | struct time_interpolator *curr, **prev; | |
1556 | unsigned long flags; | |
1557 | ||
1558 | spin_lock(&time_interpolator_lock); | |
1559 | prev = &time_interpolator_list; | |
1560 | for (curr = *prev; curr; curr = curr->next) { | |
1561 | if (curr == ti) { | |
1562 | *prev = curr->next; | |
1563 | break; | |
1564 | } | |
1565 | prev = &curr->next; | |
1566 | } | |
1567 | ||
1568 | write_seqlock_irqsave(&xtime_lock, flags); | |
1569 | if (ti == time_interpolator) { | |
1570 | /* we lost the best time-interpolator: */ | |
1571 | time_interpolator = NULL; | |
1572 | /* find the next-best interpolator */ | |
1573 | for (curr = time_interpolator_list; curr; curr = curr->next) | |
1574 | if (is_better_time_interpolator(curr)) | |
1575 | time_interpolator = curr; | |
1576 | time_interpolator_reset(); | |
1577 | } | |
1578 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
1579 | spin_unlock(&time_interpolator_lock); | |
1580 | } | |
1581 | #endif /* CONFIG_TIME_INTERPOLATION */ | |
1582 | ||
1583 | /** | |
1584 | * msleep - sleep safely even with waitqueue interruptions | |
1585 | * @msecs: Time in milliseconds to sleep for | |
1586 | */ | |
1587 | void msleep(unsigned int msecs) | |
1588 | { | |
1589 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1590 | ||
1591 | while (timeout) { | |
1592 | set_current_state(TASK_UNINTERRUPTIBLE); | |
1593 | timeout = schedule_timeout(timeout); | |
1594 | } | |
1595 | } | |
1596 | ||
1597 | EXPORT_SYMBOL(msleep); | |
1598 | ||
1599 | /** | |
96ec3efd | 1600 | * msleep_interruptible - sleep waiting for signals |
1da177e4 LT |
1601 | * @msecs: Time in milliseconds to sleep for |
1602 | */ | |
1603 | unsigned long msleep_interruptible(unsigned int msecs) | |
1604 | { | |
1605 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1606 | ||
1607 | while (timeout && !signal_pending(current)) { | |
1608 | set_current_state(TASK_INTERRUPTIBLE); | |
1609 | timeout = schedule_timeout(timeout); | |
1610 | } | |
1611 | return jiffies_to_msecs(timeout); | |
1612 | } | |
1613 | ||
1614 | EXPORT_SYMBOL(msleep_interruptible); |