]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/parisc/kernel/time.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds | |
5 | * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King | |
6 | * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, [email protected]) | |
7 | * | |
8 | * 1994-07-02 Alan Modra | |
9 | * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime | |
10 | * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 | |
11 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | |
12 | */ | |
13 | #include <linux/config.h> | |
14 | #include <linux/errno.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/sched.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/param.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/mm.h> | |
21 | #include <linux/interrupt.h> | |
22 | #include <linux/time.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/smp.h> | |
25 | #include <linux/profile.h> | |
26 | ||
27 | #include <asm/uaccess.h> | |
28 | #include <asm/io.h> | |
29 | #include <asm/irq.h> | |
30 | #include <asm/param.h> | |
31 | #include <asm/pdc.h> | |
32 | #include <asm/led.h> | |
33 | ||
34 | #include <linux/timex.h> | |
35 | ||
1da177e4 LT |
36 | /* xtime and wall_jiffies keep wall-clock time */ |
37 | extern unsigned long wall_jiffies; | |
38 | ||
39 | static long clocktick; /* timer cycles per tick */ | |
40 | static long halftick; | |
41 | ||
42 | #ifdef CONFIG_SMP | |
43 | extern void smp_do_timer(struct pt_regs *regs); | |
44 | #endif | |
45 | ||
46 | irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |
47 | { | |
48 | long now; | |
49 | long next_tick; | |
50 | int nticks; | |
51 | int cpu = smp_processor_id(); | |
52 | ||
53 | profile_tick(CPU_PROFILING, regs); | |
54 | ||
55 | now = mfctl(16); | |
56 | /* initialize next_tick to time at last clocktick */ | |
57 | next_tick = cpu_data[cpu].it_value; | |
58 | ||
59 | /* since time passes between the interrupt and the mfctl() | |
60 | * above, it is never true that last_tick + clocktick == now. If we | |
61 | * never miss a clocktick, we could set next_tick = last_tick + clocktick | |
62 | * but maybe we'll miss ticks, hence the loop. | |
63 | * | |
64 | * Variables are *signed*. | |
65 | */ | |
66 | ||
67 | nticks = 0; | |
68 | while((next_tick - now) < halftick) { | |
69 | next_tick += clocktick; | |
70 | nticks++; | |
71 | } | |
72 | mtctl(next_tick, 16); | |
73 | cpu_data[cpu].it_value = next_tick; | |
74 | ||
75 | while (nticks--) { | |
76 | #ifdef CONFIG_SMP | |
77 | smp_do_timer(regs); | |
78 | #else | |
79 | update_process_times(user_mode(regs)); | |
80 | #endif | |
81 | if (cpu == 0) { | |
82 | write_seqlock(&xtime_lock); | |
83 | do_timer(regs); | |
84 | write_sequnlock(&xtime_lock); | |
85 | } | |
86 | } | |
87 | ||
1da177e4 LT |
88 | /* check soft power switch status */ |
89 | if (cpu == 0 && !atomic_read(&power_tasklet.count)) | |
90 | tasklet_schedule(&power_tasklet); | |
91 | ||
92 | return IRQ_HANDLED; | |
93 | } | |
94 | ||
5cd55b0e RC |
95 | |
96 | unsigned long profile_pc(struct pt_regs *regs) | |
97 | { | |
98 | unsigned long pc = instruction_pointer(regs); | |
99 | ||
100 | if (regs->gr[0] & PSW_N) | |
101 | pc -= 4; | |
102 | ||
103 | #ifdef CONFIG_SMP | |
104 | if (in_lock_functions(pc)) | |
105 | pc = regs->gr[2]; | |
106 | #endif | |
107 | ||
108 | return pc; | |
109 | } | |
110 | EXPORT_SYMBOL(profile_pc); | |
111 | ||
112 | ||
1da177e4 LT |
113 | /*** converted from ia64 ***/ |
114 | /* | |
115 | * Return the number of micro-seconds that elapsed since the last | |
116 | * update to wall time (aka xtime aka wall_jiffies). The xtime_lock | |
117 | * must be at least read-locked when calling this routine. | |
118 | */ | |
119 | static inline unsigned long | |
120 | gettimeoffset (void) | |
121 | { | |
122 | #ifndef CONFIG_SMP | |
123 | /* | |
124 | * FIXME: This won't work on smp because jiffies are updated by cpu 0. | |
125 | * Once parisc-linux learns the cr16 difference between processors, | |
126 | * this could be made to work. | |
127 | */ | |
128 | long last_tick; | |
129 | long elapsed_cycles; | |
130 | ||
131 | /* it_value is the intended time of the next tick */ | |
132 | last_tick = cpu_data[smp_processor_id()].it_value; | |
133 | ||
134 | /* Subtract one tick and account for possible difference between | |
135 | * when we expected the tick and when it actually arrived. | |
136 | * (aka wall vs real) | |
137 | */ | |
138 | last_tick -= clocktick * (jiffies - wall_jiffies + 1); | |
139 | elapsed_cycles = mfctl(16) - last_tick; | |
140 | ||
141 | /* the precision of this math could be improved */ | |
142 | return elapsed_cycles / (PAGE0->mem_10msec / 10000); | |
143 | #else | |
144 | return 0; | |
145 | #endif | |
146 | } | |
147 | ||
148 | void | |
149 | do_gettimeofday (struct timeval *tv) | |
150 | { | |
151 | unsigned long flags, seq, usec, sec; | |
152 | ||
153 | do { | |
154 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | |
155 | usec = gettimeoffset(); | |
156 | sec = xtime.tv_sec; | |
157 | usec += (xtime.tv_nsec / 1000); | |
158 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | |
159 | ||
160 | while (usec >= 1000000) { | |
161 | usec -= 1000000; | |
162 | ++sec; | |
163 | } | |
164 | ||
165 | tv->tv_sec = sec; | |
166 | tv->tv_usec = usec; | |
167 | } | |
168 | ||
169 | EXPORT_SYMBOL(do_gettimeofday); | |
170 | ||
171 | int | |
172 | do_settimeofday (struct timespec *tv) | |
173 | { | |
174 | time_t wtm_sec, sec = tv->tv_sec; | |
175 | long wtm_nsec, nsec = tv->tv_nsec; | |
176 | ||
177 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | |
178 | return -EINVAL; | |
179 | ||
180 | write_seqlock_irq(&xtime_lock); | |
181 | { | |
182 | /* | |
183 | * This is revolting. We need to set "xtime" | |
184 | * correctly. However, the value in this location is | |
185 | * the value at the most recent update of wall time. | |
186 | * Discover what correction gettimeofday would have | |
187 | * done, and then undo it! | |
188 | */ | |
189 | nsec -= gettimeoffset() * 1000; | |
190 | ||
191 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | |
192 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | |
193 | ||
194 | set_normalized_timespec(&xtime, sec, nsec); | |
195 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | |
196 | ||
b149ee22 | 197 | ntp_clear(); |
1da177e4 LT |
198 | } |
199 | write_sequnlock_irq(&xtime_lock); | |
200 | clock_was_set(); | |
201 | return 0; | |
202 | } | |
203 | EXPORT_SYMBOL(do_settimeofday); | |
204 | ||
205 | /* | |
206 | * XXX: We can do better than this. | |
207 | * Returns nanoseconds | |
208 | */ | |
209 | ||
210 | unsigned long long sched_clock(void) | |
211 | { | |
212 | return (unsigned long long)jiffies * (1000000000 / HZ); | |
213 | } | |
214 | ||
215 | ||
216 | void __init time_init(void) | |
217 | { | |
218 | unsigned long next_tick; | |
219 | static struct pdc_tod tod_data; | |
220 | ||
221 | clocktick = (100 * PAGE0->mem_10msec) / HZ; | |
222 | halftick = clocktick / 2; | |
223 | ||
224 | /* Setup clock interrupt timing */ | |
225 | ||
226 | next_tick = mfctl(16); | |
227 | next_tick += clocktick; | |
228 | cpu_data[smp_processor_id()].it_value = next_tick; | |
229 | ||
230 | /* kick off Itimer (CR16) */ | |
231 | mtctl(next_tick, 16); | |
232 | ||
233 | if(pdc_tod_read(&tod_data) == 0) { | |
234 | write_seqlock_irq(&xtime_lock); | |
235 | xtime.tv_sec = tod_data.tod_sec; | |
236 | xtime.tv_nsec = tod_data.tod_usec * 1000; | |
237 | set_normalized_timespec(&wall_to_monotonic, | |
238 | -xtime.tv_sec, -xtime.tv_nsec); | |
239 | write_sequnlock_irq(&xtime_lock); | |
240 | } else { | |
241 | printk(KERN_ERR "Error reading tod clock\n"); | |
242 | xtime.tv_sec = 0; | |
243 | xtime.tv_nsec = 0; | |
244 | } | |
245 | } | |
246 |