]>
Commit | Line | Data |
---|---|---|
bfc0f594 | 1 | #include <linux/kernel.h> |
0ef95533 AK |
2 | #include <linux/sched.h> |
3 | #include <linux/init.h> | |
4 | #include <linux/module.h> | |
5 | #include <linux/timer.h> | |
bfc0f594 | 6 | #include <linux/acpi_pmtmr.h> |
2dbe06fa | 7 | #include <linux/cpufreq.h> |
8fbbc4b4 AK |
8 | #include <linux/dmi.h> |
9 | #include <linux/delay.h> | |
10 | #include <linux/clocksource.h> | |
11 | #include <linux/percpu.h> | |
08604bd9 | 12 | #include <linux/timex.h> |
bfc0f594 AK |
13 | |
14 | #include <asm/hpet.h> | |
8fbbc4b4 AK |
15 | #include <asm/timer.h> |
16 | #include <asm/vgtod.h> | |
17 | #include <asm/time.h> | |
18 | #include <asm/delay.h> | |
88b094fb | 19 | #include <asm/hypervisor.h> |
08047c4f | 20 | #include <asm/nmi.h> |
2d826404 | 21 | #include <asm/x86_init.h> |
0ef95533 | 22 | |
f24ade3a | 23 | unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ |
0ef95533 | 24 | EXPORT_SYMBOL(cpu_khz); |
f24ade3a IM |
25 | |
26 | unsigned int __read_mostly tsc_khz; | |
0ef95533 AK |
27 | EXPORT_SYMBOL(tsc_khz); |
28 | ||
29 | /* | |
30 | * TSC can be unstable due to cpufreq or due to unsynced TSCs | |
31 | */ | |
f24ade3a | 32 | static int __read_mostly tsc_unstable; |
0ef95533 AK |
33 | |
34 | /* native_sched_clock() is called before tsc_init(), so | |
35 | we must start with the TSC soft disabled to prevent | |
36 | erroneous rdtsc usage on !cpu_has_tsc processors */ | |
f24ade3a | 37 | static int __read_mostly tsc_disabled = -1; |
0ef95533 | 38 | |
395628ef | 39 | static int tsc_clocksource_reliable; |
0ef95533 AK |
40 | /* |
41 | * Scheduler clock - returns current time in nanosec units. | |
42 | */ | |
43 | u64 native_sched_clock(void) | |
44 | { | |
45 | u64 this_offset; | |
46 | ||
47 | /* | |
48 | * Fall back to jiffies if there's no TSC available: | |
49 | * ( But note that we still use it if the TSC is marked | |
50 | * unstable. We do this because unlike Time Of Day, | |
51 | * the scheduler clock tolerates small errors and it's | |
52 | * very important for it to be as fast as the platform | |
3ad2f3fb | 53 | * can achieve it. ) |
0ef95533 AK |
54 | */ |
55 | if (unlikely(tsc_disabled)) { | |
56 | /* No locking but a rare wrong value is not a big deal: */ | |
57 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); | |
58 | } | |
59 | ||
60 | /* read the Time Stamp Counter: */ | |
61 | rdtscll(this_offset); | |
62 | ||
63 | /* return the value in ns */ | |
7cbaef9c | 64 | return __cycles_2_ns(this_offset); |
0ef95533 AK |
65 | } |
66 | ||
67 | /* We need to define a real function for sched_clock, to override the | |
68 | weak default version */ | |
69 | #ifdef CONFIG_PARAVIRT | |
70 | unsigned long long sched_clock(void) | |
71 | { | |
72 | return paravirt_sched_clock(); | |
73 | } | |
74 | #else | |
75 | unsigned long long | |
76 | sched_clock(void) __attribute__((alias("native_sched_clock"))); | |
77 | #endif | |
78 | ||
79 | int check_tsc_unstable(void) | |
80 | { | |
81 | return tsc_unstable; | |
82 | } | |
83 | EXPORT_SYMBOL_GPL(check_tsc_unstable); | |
84 | ||
85 | #ifdef CONFIG_X86_TSC | |
86 | int __init notsc_setup(char *str) | |
87 | { | |
88 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " | |
89 | "cannot disable TSC completely.\n"); | |
90 | tsc_disabled = 1; | |
91 | return 1; | |
92 | } | |
93 | #else | |
94 | /* | |
95 | * disable flag for tsc. Takes effect by clearing the TSC cpu flag | |
96 | * in cpu/common.c | |
97 | */ | |
98 | int __init notsc_setup(char *str) | |
99 | { | |
100 | setup_clear_cpu_cap(X86_FEATURE_TSC); | |
101 | return 1; | |
102 | } | |
103 | #endif | |
104 | ||
105 | __setup("notsc", notsc_setup); | |
bfc0f594 | 106 | |
395628ef AK |
107 | static int __init tsc_setup(char *str) |
108 | { | |
109 | if (!strcmp(str, "reliable")) | |
110 | tsc_clocksource_reliable = 1; | |
111 | return 1; | |
112 | } | |
113 | ||
114 | __setup("tsc=", tsc_setup); | |
115 | ||
bfc0f594 AK |
116 | #define MAX_RETRIES 5 |
117 | #define SMI_TRESHOLD 50000 | |
118 | ||
119 | /* | |
120 | * Read TSC and the reference counters. Take care of SMI disturbance | |
121 | */ | |
827014be | 122 | static u64 tsc_read_refs(u64 *p, int hpet) |
bfc0f594 AK |
123 | { |
124 | u64 t1, t2; | |
125 | int i; | |
126 | ||
127 | for (i = 0; i < MAX_RETRIES; i++) { | |
128 | t1 = get_cycles(); | |
129 | if (hpet) | |
827014be | 130 | *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; |
bfc0f594 | 131 | else |
827014be | 132 | *p = acpi_pm_read_early(); |
bfc0f594 AK |
133 | t2 = get_cycles(); |
134 | if ((t2 - t1) < SMI_TRESHOLD) | |
135 | return t2; | |
136 | } | |
137 | return ULLONG_MAX; | |
138 | } | |
139 | ||
d683ef7a TG |
140 | /* |
141 | * Calculate the TSC frequency from HPET reference | |
bfc0f594 | 142 | */ |
d683ef7a | 143 | static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2) |
bfc0f594 | 144 | { |
d683ef7a | 145 | u64 tmp; |
bfc0f594 | 146 | |
d683ef7a TG |
147 | if (hpet2 < hpet1) |
148 | hpet2 += 0x100000000ULL; | |
149 | hpet2 -= hpet1; | |
150 | tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); | |
151 | do_div(tmp, 1000000); | |
152 | do_div(deltatsc, tmp); | |
153 | ||
154 | return (unsigned long) deltatsc; | |
155 | } | |
156 | ||
157 | /* | |
158 | * Calculate the TSC frequency from PMTimer reference | |
159 | */ | |
160 | static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2) | |
161 | { | |
162 | u64 tmp; | |
bfc0f594 | 163 | |
d683ef7a TG |
164 | if (!pm1 && !pm2) |
165 | return ULONG_MAX; | |
166 | ||
167 | if (pm2 < pm1) | |
168 | pm2 += (u64)ACPI_PM_OVRRUN; | |
169 | pm2 -= pm1; | |
170 | tmp = pm2 * 1000000000LL; | |
171 | do_div(tmp, PMTMR_TICKS_PER_SEC); | |
172 | do_div(deltatsc, tmp); | |
173 | ||
174 | return (unsigned long) deltatsc; | |
175 | } | |
176 | ||
a977c400 | 177 | #define CAL_MS 10 |
cce3e057 | 178 | #define CAL_LATCH (CLOCK_TICK_RATE / (1000 / CAL_MS)) |
a977c400 TG |
179 | #define CAL_PIT_LOOPS 1000 |
180 | ||
181 | #define CAL2_MS 50 | |
182 | #define CAL2_LATCH (CLOCK_TICK_RATE / (1000 / CAL2_MS)) | |
183 | #define CAL2_PIT_LOOPS 5000 | |
184 | ||
cce3e057 | 185 | |
ec0c15af LT |
186 | /* |
187 | * Try to calibrate the TSC against the Programmable | |
188 | * Interrupt Timer and return the frequency of the TSC | |
189 | * in kHz. | |
190 | * | |
191 | * Return ULONG_MAX on failure to calibrate. | |
192 | */ | |
a977c400 | 193 | static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin) |
ec0c15af LT |
194 | { |
195 | u64 tsc, t1, t2, delta; | |
196 | unsigned long tscmin, tscmax; | |
197 | int pitcnt; | |
198 | ||
199 | /* Set the Gate high, disable speaker */ | |
200 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); | |
201 | ||
202 | /* | |
203 | * Setup CTC channel 2* for mode 0, (interrupt on terminal | |
204 | * count mode), binary count. Set the latch register to 50ms | |
205 | * (LSB then MSB) to begin countdown. | |
206 | */ | |
207 | outb(0xb0, 0x43); | |
a977c400 TG |
208 | outb(latch & 0xff, 0x42); |
209 | outb(latch >> 8, 0x42); | |
ec0c15af LT |
210 | |
211 | tsc = t1 = t2 = get_cycles(); | |
212 | ||
213 | pitcnt = 0; | |
214 | tscmax = 0; | |
215 | tscmin = ULONG_MAX; | |
216 | while ((inb(0x61) & 0x20) == 0) { | |
217 | t2 = get_cycles(); | |
218 | delta = t2 - tsc; | |
219 | tsc = t2; | |
220 | if ((unsigned long) delta < tscmin) | |
221 | tscmin = (unsigned int) delta; | |
222 | if ((unsigned long) delta > tscmax) | |
223 | tscmax = (unsigned int) delta; | |
224 | pitcnt++; | |
225 | } | |
226 | ||
227 | /* | |
228 | * Sanity checks: | |
229 | * | |
a977c400 | 230 | * If we were not able to read the PIT more than loopmin |
ec0c15af LT |
231 | * times, then we have been hit by a massive SMI |
232 | * | |
233 | * If the maximum is 10 times larger than the minimum, | |
234 | * then we got hit by an SMI as well. | |
235 | */ | |
a977c400 | 236 | if (pitcnt < loopmin || tscmax > 10 * tscmin) |
ec0c15af LT |
237 | return ULONG_MAX; |
238 | ||
239 | /* Calculate the PIT value */ | |
240 | delta = t2 - t1; | |
a977c400 | 241 | do_div(delta, ms); |
ec0c15af LT |
242 | return delta; |
243 | } | |
244 | ||
6ac40ed0 LT |
245 | /* |
246 | * This reads the current MSB of the PIT counter, and | |
247 | * checks if we are running on sufficiently fast and | |
248 | * non-virtualized hardware. | |
249 | * | |
250 | * Our expectations are: | |
251 | * | |
252 | * - the PIT is running at roughly 1.19MHz | |
253 | * | |
254 | * - each IO is going to take about 1us on real hardware, | |
255 | * but we allow it to be much faster (by a factor of 10) or | |
256 | * _slightly_ slower (ie we allow up to a 2us read+counter | |
257 | * update - anything else implies a unacceptably slow CPU | |
258 | * or PIT for the fast calibration to work. | |
259 | * | |
260 | * - with 256 PIT ticks to read the value, we have 214us to | |
261 | * see the same MSB (and overhead like doing a single TSC | |
262 | * read per MSB value etc). | |
263 | * | |
264 | * - We're doing 2 reads per loop (LSB, MSB), and we expect | |
265 | * them each to take about a microsecond on real hardware. | |
266 | * So we expect a count value of around 100. But we'll be | |
267 | * generous, and accept anything over 50. | |
268 | * | |
269 | * - if the PIT is stuck, and we see *many* more reads, we | |
270 | * return early (and the next caller of pit_expect_msb() | |
271 | * then consider it a failure when they don't see the | |
272 | * next expected value). | |
273 | * | |
274 | * These expectations mean that we know that we have seen the | |
275 | * transition from one expected value to another with a fairly | |
276 | * high accuracy, and we didn't miss any events. We can thus | |
277 | * use the TSC value at the transitions to calculate a pretty | |
278 | * good value for the TSC frequencty. | |
279 | */ | |
b6e61eef LT |
280 | static inline int pit_verify_msb(unsigned char val) |
281 | { | |
282 | /* Ignore LSB */ | |
283 | inb(0x42); | |
284 | return inb(0x42) == val; | |
285 | } | |
286 | ||
9e8912e0 | 287 | static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) |
6ac40ed0 | 288 | { |
9e8912e0 LT |
289 | int count; |
290 | u64 tsc = 0; | |
bfc0f594 | 291 | |
6ac40ed0 | 292 | for (count = 0; count < 50000; count++) { |
b6e61eef | 293 | if (!pit_verify_msb(val)) |
6ac40ed0 | 294 | break; |
9e8912e0 | 295 | tsc = get_cycles(); |
6ac40ed0 | 296 | } |
9e8912e0 LT |
297 | *deltap = get_cycles() - tsc; |
298 | *tscp = tsc; | |
299 | ||
300 | /* | |
301 | * We require _some_ success, but the quality control | |
302 | * will be based on the error terms on the TSC values. | |
303 | */ | |
304 | return count > 5; | |
6ac40ed0 LT |
305 | } |
306 | ||
307 | /* | |
9e8912e0 LT |
308 | * How many MSB values do we want to see? We aim for |
309 | * a maximum error rate of 500ppm (in practice the | |
310 | * real error is much smaller), but refuse to spend | |
311 | * more than 25ms on it. | |
6ac40ed0 | 312 | */ |
9e8912e0 LT |
313 | #define MAX_QUICK_PIT_MS 25 |
314 | #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) | |
bfc0f594 | 315 | |
6ac40ed0 LT |
316 | static unsigned long quick_pit_calibrate(void) |
317 | { | |
9e8912e0 LT |
318 | int i; |
319 | u64 tsc, delta; | |
320 | unsigned long d1, d2; | |
321 | ||
6ac40ed0 | 322 | /* Set the Gate high, disable speaker */ |
bfc0f594 AK |
323 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); |
324 | ||
6ac40ed0 LT |
325 | /* |
326 | * Counter 2, mode 0 (one-shot), binary count | |
327 | * | |
328 | * NOTE! Mode 2 decrements by two (and then the | |
329 | * output is flipped each time, giving the same | |
330 | * final output frequency as a decrement-by-one), | |
331 | * so mode 0 is much better when looking at the | |
332 | * individual counts. | |
333 | */ | |
bfc0f594 | 334 | outb(0xb0, 0x43); |
bfc0f594 | 335 | |
6ac40ed0 LT |
336 | /* Start at 0xffff */ |
337 | outb(0xff, 0x42); | |
338 | outb(0xff, 0x42); | |
339 | ||
a6a80e1d LT |
340 | /* |
341 | * The PIT starts counting at the next edge, so we | |
342 | * need to delay for a microsecond. The easiest way | |
343 | * to do that is to just read back the 16-bit counter | |
344 | * once from the PIT. | |
345 | */ | |
b6e61eef | 346 | pit_verify_msb(0); |
a6a80e1d | 347 | |
9e8912e0 LT |
348 | if (pit_expect_msb(0xff, &tsc, &d1)) { |
349 | for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { | |
350 | if (!pit_expect_msb(0xff-i, &delta, &d2)) | |
351 | break; | |
352 | ||
353 | /* | |
354 | * Iterate until the error is less than 500 ppm | |
355 | */ | |
356 | delta -= tsc; | |
b6e61eef LT |
357 | if (d1+d2 >= delta >> 11) |
358 | continue; | |
359 | ||
360 | /* | |
361 | * Check the PIT one more time to verify that | |
362 | * all TSC reads were stable wrt the PIT. | |
363 | * | |
364 | * This also guarantees serialization of the | |
365 | * last cycle read ('d2') in pit_expect_msb. | |
366 | */ | |
367 | if (!pit_verify_msb(0xfe - i)) | |
368 | break; | |
369 | goto success; | |
6ac40ed0 | 370 | } |
6ac40ed0 | 371 | } |
9e8912e0 | 372 | printk("Fast TSC calibration failed\n"); |
6ac40ed0 | 373 | return 0; |
9e8912e0 LT |
374 | |
375 | success: | |
376 | /* | |
377 | * Ok, if we get here, then we've seen the | |
378 | * MSB of the PIT decrement 'i' times, and the | |
379 | * error has shrunk to less than 500 ppm. | |
380 | * | |
381 | * As a result, we can depend on there not being | |
382 | * any odd delays anywhere, and the TSC reads are | |
383 | * reliable (within the error). We also adjust the | |
384 | * delta to the middle of the error bars, just | |
385 | * because it looks nicer. | |
386 | * | |
387 | * kHz = ticks / time-in-seconds / 1000; | |
388 | * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 | |
389 | * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) | |
390 | */ | |
391 | delta += (long)(d2 - d1)/2; | |
392 | delta *= PIT_TICK_RATE; | |
393 | do_div(delta, i*256*1000); | |
394 | printk("Fast TSC calibration using PIT\n"); | |
395 | return delta; | |
6ac40ed0 | 396 | } |
ec0c15af | 397 | |
bfc0f594 | 398 | /** |
e93ef949 | 399 | * native_calibrate_tsc - calibrate the tsc on boot |
bfc0f594 | 400 | */ |
e93ef949 | 401 | unsigned long native_calibrate_tsc(void) |
bfc0f594 | 402 | { |
827014be | 403 | u64 tsc1, tsc2, delta, ref1, ref2; |
fbb16e24 | 404 | unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; |
2d826404 | 405 | unsigned long flags, latch, ms, fast_calibrate; |
a977c400 | 406 | int hpet = is_hpet_enabled(), i, loopmin; |
bfc0f594 | 407 | |
6ac40ed0 LT |
408 | local_irq_save(flags); |
409 | fast_calibrate = quick_pit_calibrate(); | |
bfc0f594 | 410 | local_irq_restore(flags); |
6ac40ed0 LT |
411 | if (fast_calibrate) |
412 | return fast_calibrate; | |
bfc0f594 | 413 | |
fbb16e24 TG |
414 | /* |
415 | * Run 5 calibration loops to get the lowest frequency value | |
416 | * (the best estimate). We use two different calibration modes | |
417 | * here: | |
418 | * | |
419 | * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and | |
420 | * load a timeout of 50ms. We read the time right after we | |
421 | * started the timer and wait until the PIT count down reaches | |
422 | * zero. In each wait loop iteration we read the TSC and check | |
423 | * the delta to the previous read. We keep track of the min | |
424 | * and max values of that delta. The delta is mostly defined | |
425 | * by the IO time of the PIT access, so we can detect when a | |
426 | * SMI/SMM disturbance happend between the two reads. If the | |
427 | * maximum time is significantly larger than the minimum time, | |
428 | * then we discard the result and have another try. | |
429 | * | |
430 | * 2) Reference counter. If available we use the HPET or the | |
431 | * PMTIMER as a reference to check the sanity of that value. | |
432 | * We use separate TSC readouts and check inside of the | |
433 | * reference read for a SMI/SMM disturbance. We dicard | |
434 | * disturbed values here as well. We do that around the PIT | |
435 | * calibration delay loop as we have to wait for a certain | |
436 | * amount of time anyway. | |
437 | */ | |
a977c400 TG |
438 | |
439 | /* Preset PIT loop values */ | |
440 | latch = CAL_LATCH; | |
441 | ms = CAL_MS; | |
442 | loopmin = CAL_PIT_LOOPS; | |
443 | ||
444 | for (i = 0; i < 3; i++) { | |
ec0c15af | 445 | unsigned long tsc_pit_khz; |
fbb16e24 TG |
446 | |
447 | /* | |
448 | * Read the start value and the reference count of | |
ec0c15af LT |
449 | * hpet/pmtimer when available. Then do the PIT |
450 | * calibration, which will take at least 50ms, and | |
451 | * read the end value. | |
fbb16e24 | 452 | */ |
ec0c15af | 453 | local_irq_save(flags); |
827014be | 454 | tsc1 = tsc_read_refs(&ref1, hpet); |
a977c400 | 455 | tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin); |
827014be | 456 | tsc2 = tsc_read_refs(&ref2, hpet); |
fbb16e24 TG |
457 | local_irq_restore(flags); |
458 | ||
ec0c15af LT |
459 | /* Pick the lowest PIT TSC calibration so far */ |
460 | tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); | |
fbb16e24 TG |
461 | |
462 | /* hpet or pmtimer available ? */ | |
827014be | 463 | if (!hpet && !ref1 && !ref2) |
fbb16e24 TG |
464 | continue; |
465 | ||
466 | /* Check, whether the sampling was disturbed by an SMI */ | |
467 | if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) | |
468 | continue; | |
469 | ||
470 | tsc2 = (tsc2 - tsc1) * 1000000LL; | |
d683ef7a | 471 | if (hpet) |
827014be | 472 | tsc2 = calc_hpet_ref(tsc2, ref1, ref2); |
d683ef7a | 473 | else |
827014be | 474 | tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2); |
fbb16e24 | 475 | |
fbb16e24 | 476 | tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2); |
a977c400 TG |
477 | |
478 | /* Check the reference deviation */ | |
479 | delta = ((u64) tsc_pit_min) * 100; | |
480 | do_div(delta, tsc_ref_min); | |
481 | ||
482 | /* | |
483 | * If both calibration results are inside a 10% window | |
484 | * then we can be sure, that the calibration | |
485 | * succeeded. We break out of the loop right away. We | |
486 | * use the reference value, as it is more precise. | |
487 | */ | |
488 | if (delta >= 90 && delta <= 110) { | |
489 | printk(KERN_INFO | |
490 | "TSC: PIT calibration matches %s. %d loops\n", | |
491 | hpet ? "HPET" : "PMTIMER", i + 1); | |
492 | return tsc_ref_min; | |
fbb16e24 TG |
493 | } |
494 | ||
a977c400 TG |
495 | /* |
496 | * Check whether PIT failed more than once. This | |
497 | * happens in virtualized environments. We need to | |
498 | * give the virtual PC a slightly longer timeframe for | |
499 | * the HPET/PMTIMER to make the result precise. | |
500 | */ | |
501 | if (i == 1 && tsc_pit_min == ULONG_MAX) { | |
502 | latch = CAL2_LATCH; | |
503 | ms = CAL2_MS; | |
504 | loopmin = CAL2_PIT_LOOPS; | |
505 | } | |
fbb16e24 | 506 | } |
bfc0f594 AK |
507 | |
508 | /* | |
fbb16e24 | 509 | * Now check the results. |
bfc0f594 | 510 | */ |
fbb16e24 TG |
511 | if (tsc_pit_min == ULONG_MAX) { |
512 | /* PIT gave no useful value */ | |
de014d61 | 513 | printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n"); |
fbb16e24 TG |
514 | |
515 | /* We don't have an alternative source, disable TSC */ | |
827014be | 516 | if (!hpet && !ref1 && !ref2) { |
fbb16e24 TG |
517 | printk("TSC: No reference (HPET/PMTIMER) available\n"); |
518 | return 0; | |
519 | } | |
520 | ||
521 | /* The alternative source failed as well, disable TSC */ | |
522 | if (tsc_ref_min == ULONG_MAX) { | |
523 | printk(KERN_WARNING "TSC: HPET/PMTIMER calibration " | |
a977c400 | 524 | "failed.\n"); |
fbb16e24 TG |
525 | return 0; |
526 | } | |
527 | ||
528 | /* Use the alternative source */ | |
529 | printk(KERN_INFO "TSC: using %s reference calibration\n", | |
530 | hpet ? "HPET" : "PMTIMER"); | |
531 | ||
532 | return tsc_ref_min; | |
533 | } | |
bfc0f594 | 534 | |
fbb16e24 | 535 | /* We don't have an alternative source, use the PIT calibration value */ |
827014be | 536 | if (!hpet && !ref1 && !ref2) { |
fbb16e24 TG |
537 | printk(KERN_INFO "TSC: Using PIT calibration value\n"); |
538 | return tsc_pit_min; | |
bfc0f594 AK |
539 | } |
540 | ||
fbb16e24 TG |
541 | /* The alternative source failed, use the PIT calibration value */ |
542 | if (tsc_ref_min == ULONG_MAX) { | |
a977c400 TG |
543 | printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed. " |
544 | "Using PIT calibration\n"); | |
fbb16e24 | 545 | return tsc_pit_min; |
bfc0f594 AK |
546 | } |
547 | ||
fbb16e24 TG |
548 | /* |
549 | * The calibration values differ too much. In doubt, we use | |
550 | * the PIT value as we know that there are PMTIMERs around | |
a977c400 | 551 | * running at double speed. At least we let the user know: |
fbb16e24 | 552 | */ |
a977c400 TG |
553 | printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n", |
554 | hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min); | |
fbb16e24 TG |
555 | printk(KERN_INFO "TSC: Using PIT calibration value\n"); |
556 | return tsc_pit_min; | |
bfc0f594 AK |
557 | } |
558 | ||
bfc0f594 AK |
559 | int recalibrate_cpu_khz(void) |
560 | { | |
561 | #ifndef CONFIG_SMP | |
562 | unsigned long cpu_khz_old = cpu_khz; | |
563 | ||
564 | if (cpu_has_tsc) { | |
2d826404 | 565 | tsc_khz = x86_platform.calibrate_tsc(); |
e93ef949 | 566 | cpu_khz = tsc_khz; |
bfc0f594 AK |
567 | cpu_data(0).loops_per_jiffy = |
568 | cpufreq_scale(cpu_data(0).loops_per_jiffy, | |
569 | cpu_khz_old, cpu_khz); | |
570 | return 0; | |
571 | } else | |
572 | return -ENODEV; | |
573 | #else | |
574 | return -ENODEV; | |
575 | #endif | |
576 | } | |
577 | ||
578 | EXPORT_SYMBOL(recalibrate_cpu_khz); | |
579 | ||
2dbe06fa AK |
580 | |
581 | /* Accelerators for sched_clock() | |
582 | * convert from cycles(64bits) => nanoseconds (64bits) | |
583 | * basic equation: | |
584 | * ns = cycles / (freq / ns_per_sec) | |
585 | * ns = cycles * (ns_per_sec / freq) | |
586 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | |
587 | * ns = cycles * (10^6 / cpu_khz) | |
588 | * | |
589 | * Then we use scaling math (suggested by [email protected]) to get: | |
590 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | |
591 | * ns = cycles * cyc2ns_scale / SC | |
592 | * | |
593 | * And since SC is a constant power of two, we can convert the div | |
594 | * into a shift. | |
595 | * | |
596 | * We can use khz divisor instead of mhz to keep a better precision, since | |
597 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | |
598 | * ([email protected]) | |
599 | * | |
600 | * [email protected] "math is hard, lets go shopping!" | |
601 | */ | |
602 | ||
603 | DEFINE_PER_CPU(unsigned long, cyc2ns); | |
84599f8a | 604 | DEFINE_PER_CPU(unsigned long long, cyc2ns_offset); |
2dbe06fa | 605 | |
8fbbc4b4 | 606 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) |
2dbe06fa | 607 | { |
84599f8a | 608 | unsigned long long tsc_now, ns_now, *offset; |
2dbe06fa AK |
609 | unsigned long flags, *scale; |
610 | ||
611 | local_irq_save(flags); | |
612 | sched_clock_idle_sleep_event(); | |
613 | ||
614 | scale = &per_cpu(cyc2ns, cpu); | |
84599f8a | 615 | offset = &per_cpu(cyc2ns_offset, cpu); |
2dbe06fa AK |
616 | |
617 | rdtscll(tsc_now); | |
618 | ns_now = __cycles_2_ns(tsc_now); | |
619 | ||
84599f8a | 620 | if (cpu_khz) { |
2dbe06fa | 621 | *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; |
84599f8a PZ |
622 | *offset = ns_now - (tsc_now * *scale >> CYC2NS_SCALE_FACTOR); |
623 | } | |
2dbe06fa AK |
624 | |
625 | sched_clock_idle_wakeup_event(0); | |
626 | local_irq_restore(flags); | |
627 | } | |
628 | ||
629 | #ifdef CONFIG_CPU_FREQ | |
630 | ||
631 | /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency | |
632 | * changes. | |
633 | * | |
634 | * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's | |
635 | * not that important because current Opteron setups do not support | |
636 | * scaling on SMP anyroads. | |
637 | * | |
638 | * Should fix up last_tsc too. Currently gettimeofday in the | |
639 | * first tick after the change will be slightly wrong. | |
640 | */ | |
641 | ||
642 | static unsigned int ref_freq; | |
643 | static unsigned long loops_per_jiffy_ref; | |
644 | static unsigned long tsc_khz_ref; | |
645 | ||
646 | static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |
647 | void *data) | |
648 | { | |
649 | struct cpufreq_freqs *freq = data; | |
931db6a3 | 650 | unsigned long *lpj; |
2dbe06fa AK |
651 | |
652 | if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC)) | |
653 | return 0; | |
654 | ||
931db6a3 | 655 | lpj = &boot_cpu_data.loops_per_jiffy; |
2dbe06fa | 656 | #ifdef CONFIG_SMP |
931db6a3 | 657 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) |
2dbe06fa | 658 | lpj = &cpu_data(freq->cpu).loops_per_jiffy; |
2dbe06fa AK |
659 | #endif |
660 | ||
661 | if (!ref_freq) { | |
662 | ref_freq = freq->old; | |
663 | loops_per_jiffy_ref = *lpj; | |
664 | tsc_khz_ref = tsc_khz; | |
665 | } | |
666 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | |
667 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | |
668 | (val == CPUFREQ_RESUMECHANGE)) { | |
878f4f53 | 669 | *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); |
2dbe06fa AK |
670 | |
671 | tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); | |
672 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | |
673 | mark_tsc_unstable("cpufreq changes"); | |
674 | } | |
675 | ||
52a8968c | 676 | set_cyc2ns_scale(tsc_khz, freq->cpu); |
2dbe06fa AK |
677 | |
678 | return 0; | |
679 | } | |
680 | ||
681 | static struct notifier_block time_cpufreq_notifier_block = { | |
682 | .notifier_call = time_cpufreq_notifier | |
683 | }; | |
684 | ||
685 | static int __init cpufreq_tsc(void) | |
686 | { | |
060700b5 LT |
687 | if (!cpu_has_tsc) |
688 | return 0; | |
689 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | |
690 | return 0; | |
2dbe06fa AK |
691 | cpufreq_register_notifier(&time_cpufreq_notifier_block, |
692 | CPUFREQ_TRANSITION_NOTIFIER); | |
693 | return 0; | |
694 | } | |
695 | ||
696 | core_initcall(cpufreq_tsc); | |
697 | ||
698 | #endif /* CONFIG_CPU_FREQ */ | |
8fbbc4b4 AK |
699 | |
700 | /* clocksource code */ | |
701 | ||
702 | static struct clocksource clocksource_tsc; | |
703 | ||
704 | /* | |
705 | * We compare the TSC to the cycle_last value in the clocksource | |
706 | * structure to avoid a nasty time-warp. This can be observed in a | |
707 | * very small window right after one CPU updated cycle_last under | |
708 | * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which | |
709 | * is smaller than the cycle_last reference value due to a TSC which | |
710 | * is slighty behind. This delta is nowhere else observable, but in | |
711 | * that case it results in a forward time jump in the range of hours | |
712 | * due to the unsigned delta calculation of the time keeping core | |
713 | * code, which is necessary to support wrapping clocksources like pm | |
714 | * timer. | |
715 | */ | |
8e19608e | 716 | static cycle_t read_tsc(struct clocksource *cs) |
8fbbc4b4 AK |
717 | { |
718 | cycle_t ret = (cycle_t)get_cycles(); | |
719 | ||
720 | return ret >= clocksource_tsc.cycle_last ? | |
721 | ret : clocksource_tsc.cycle_last; | |
722 | } | |
723 | ||
431ceb83 | 724 | #ifdef CONFIG_X86_64 |
8fbbc4b4 AK |
725 | static cycle_t __vsyscall_fn vread_tsc(void) |
726 | { | |
7d96fd41 PT |
727 | cycle_t ret; |
728 | ||
729 | /* | |
730 | * Surround the RDTSC by barriers, to make sure it's not | |
731 | * speculated to outside the seqlock critical section and | |
732 | * does not cause time warps: | |
733 | */ | |
734 | rdtsc_barrier(); | |
735 | ret = (cycle_t)vget_cycles(); | |
736 | rdtsc_barrier(); | |
8fbbc4b4 AK |
737 | |
738 | return ret >= __vsyscall_gtod_data.clock.cycle_last ? | |
739 | ret : __vsyscall_gtod_data.clock.cycle_last; | |
740 | } | |
431ceb83 | 741 | #endif |
8fbbc4b4 | 742 | |
17622339 | 743 | static void resume_tsc(struct clocksource *cs) |
1be39679 MS |
744 | { |
745 | clocksource_tsc.cycle_last = 0; | |
746 | } | |
747 | ||
8fbbc4b4 AK |
748 | static struct clocksource clocksource_tsc = { |
749 | .name = "tsc", | |
750 | .rating = 300, | |
751 | .read = read_tsc, | |
1be39679 | 752 | .resume = resume_tsc, |
8fbbc4b4 AK |
753 | .mask = CLOCKSOURCE_MASK(64), |
754 | .shift = 22, | |
755 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | | |
756 | CLOCK_SOURCE_MUST_VERIFY, | |
757 | #ifdef CONFIG_X86_64 | |
758 | .vread = vread_tsc, | |
759 | #endif | |
760 | }; | |
761 | ||
762 | void mark_tsc_unstable(char *reason) | |
763 | { | |
764 | if (!tsc_unstable) { | |
765 | tsc_unstable = 1; | |
6c56ccec | 766 | sched_clock_stable = 0; |
7285dd7f | 767 | printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); |
8fbbc4b4 AK |
768 | /* Change only the rating, when not registered */ |
769 | if (clocksource_tsc.mult) | |
7285dd7f TG |
770 | clocksource_mark_unstable(&clocksource_tsc); |
771 | else { | |
772 | clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; | |
8fbbc4b4 | 773 | clocksource_tsc.rating = 0; |
7285dd7f | 774 | } |
8fbbc4b4 AK |
775 | } |
776 | } | |
777 | ||
778 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | |
779 | ||
780 | static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d) | |
781 | { | |
782 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", | |
783 | d->ident); | |
784 | tsc_unstable = 1; | |
785 | return 0; | |
786 | } | |
787 | ||
788 | /* List of systems that have known TSC problems */ | |
789 | static struct dmi_system_id __initdata bad_tsc_dmi_table[] = { | |
790 | { | |
791 | .callback = dmi_mark_tsc_unstable, | |
792 | .ident = "IBM Thinkpad 380XD", | |
793 | .matches = { | |
794 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | |
795 | DMI_MATCH(DMI_BOARD_NAME, "2635FA0"), | |
796 | }, | |
797 | }, | |
798 | {} | |
799 | }; | |
800 | ||
395628ef AK |
801 | static void __init check_system_tsc_reliable(void) |
802 | { | |
8fbbc4b4 | 803 | #ifdef CONFIG_MGEODE_LX |
395628ef | 804 | /* RTSC counts during suspend */ |
8fbbc4b4 | 805 | #define RTSC_SUSP 0x100 |
8fbbc4b4 AK |
806 | unsigned long res_low, res_high; |
807 | ||
808 | rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); | |
00097c4f | 809 | /* Geode_LX - the OLPC CPU has a very reliable TSC */ |
8fbbc4b4 | 810 | if (res_low & RTSC_SUSP) |
395628ef | 811 | tsc_clocksource_reliable = 1; |
8fbbc4b4 | 812 | #endif |
395628ef AK |
813 | if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) |
814 | tsc_clocksource_reliable = 1; | |
815 | } | |
8fbbc4b4 AK |
816 | |
817 | /* | |
818 | * Make an educated guess if the TSC is trustworthy and synchronized | |
819 | * over all CPUs. | |
820 | */ | |
821 | __cpuinit int unsynchronized_tsc(void) | |
822 | { | |
823 | if (!cpu_has_tsc || tsc_unstable) | |
824 | return 1; | |
825 | ||
3e5095d1 | 826 | #ifdef CONFIG_SMP |
8fbbc4b4 AK |
827 | if (apic_is_clustered_box()) |
828 | return 1; | |
829 | #endif | |
830 | ||
831 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | |
832 | return 0; | |
833 | /* | |
834 | * Intel systems are normally all synchronized. | |
835 | * Exceptions must mark TSC as unstable: | |
836 | */ | |
837 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { | |
838 | /* assume multi socket systems are not synchronized: */ | |
839 | if (num_possible_cpus() > 1) | |
840 | tsc_unstable = 1; | |
841 | } | |
842 | ||
843 | return tsc_unstable; | |
844 | } | |
845 | ||
846 | static void __init init_tsc_clocksource(void) | |
847 | { | |
848 | clocksource_tsc.mult = clocksource_khz2mult(tsc_khz, | |
849 | clocksource_tsc.shift); | |
395628ef AK |
850 | if (tsc_clocksource_reliable) |
851 | clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; | |
8fbbc4b4 AK |
852 | /* lower the rating if we already know its unstable: */ |
853 | if (check_tsc_unstable()) { | |
854 | clocksource_tsc.rating = 0; | |
855 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | |
856 | } | |
857 | clocksource_register(&clocksource_tsc); | |
858 | } | |
859 | ||
08047c4f TG |
860 | #ifdef CONFIG_X86_64 |
861 | /* | |
862 | * calibrate_cpu is used on systems with fixed rate TSCs to determine | |
863 | * processor frequency | |
864 | */ | |
865 | #define TICK_COUNT 100000000 | |
866 | static unsigned long __init calibrate_cpu(void) | |
867 | { | |
868 | int tsc_start, tsc_now; | |
869 | int i, no_ctr_free; | |
870 | unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0; | |
871 | unsigned long flags; | |
872 | ||
873 | for (i = 0; i < 4; i++) | |
874 | if (avail_to_resrv_perfctr_nmi_bit(i)) | |
875 | break; | |
876 | no_ctr_free = (i == 4); | |
877 | if (no_ctr_free) { | |
878 | WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... " | |
879 | "cpu_khz value may be incorrect.\n"); | |
880 | i = 3; | |
881 | rdmsrl(MSR_K7_EVNTSEL3, evntsel3); | |
882 | wrmsrl(MSR_K7_EVNTSEL3, 0); | |
883 | rdmsrl(MSR_K7_PERFCTR3, pmc3); | |
884 | } else { | |
885 | reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i); | |
886 | reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | |
887 | } | |
888 | local_irq_save(flags); | |
889 | /* start measuring cycles, incrementing from 0 */ | |
890 | wrmsrl(MSR_K7_PERFCTR0 + i, 0); | |
891 | wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76); | |
892 | rdtscl(tsc_start); | |
893 | do { | |
894 | rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now); | |
895 | tsc_now = get_cycles(); | |
896 | } while ((tsc_now - tsc_start) < TICK_COUNT); | |
897 | ||
898 | local_irq_restore(flags); | |
899 | if (no_ctr_free) { | |
900 | wrmsrl(MSR_K7_EVNTSEL3, 0); | |
901 | wrmsrl(MSR_K7_PERFCTR3, pmc3); | |
902 | wrmsrl(MSR_K7_EVNTSEL3, evntsel3); | |
903 | } else { | |
904 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | |
905 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | |
906 | } | |
907 | ||
908 | return pmc_now * tsc_khz / (tsc_now - tsc_start); | |
909 | } | |
910 | #else | |
911 | static inline unsigned long calibrate_cpu(void) { return cpu_khz; } | |
912 | #endif | |
913 | ||
8fbbc4b4 AK |
914 | void __init tsc_init(void) |
915 | { | |
916 | u64 lpj; | |
917 | int cpu; | |
918 | ||
845b3944 TG |
919 | x86_init.timers.tsc_pre_init(); |
920 | ||
8fbbc4b4 AK |
921 | if (!cpu_has_tsc) |
922 | return; | |
923 | ||
2d826404 | 924 | tsc_khz = x86_platform.calibrate_tsc(); |
e93ef949 | 925 | cpu_khz = tsc_khz; |
8fbbc4b4 | 926 | |
e93ef949 | 927 | if (!tsc_khz) { |
8fbbc4b4 AK |
928 | mark_tsc_unstable("could not calculate TSC khz"); |
929 | return; | |
930 | } | |
931 | ||
8fbbc4b4 AK |
932 | if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && |
933 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) | |
934 | cpu_khz = calibrate_cpu(); | |
8fbbc4b4 | 935 | |
8fbbc4b4 AK |
936 | printk("Detected %lu.%03lu MHz processor.\n", |
937 | (unsigned long)cpu_khz / 1000, | |
938 | (unsigned long)cpu_khz % 1000); | |
939 | ||
940 | /* | |
941 | * Secondary CPUs do not run through tsc_init(), so set up | |
942 | * all the scale factors for all CPUs, assuming the same | |
943 | * speed as the bootup CPU. (cpufreq notifiers will fix this | |
944 | * up if their speed diverges) | |
945 | */ | |
946 | for_each_possible_cpu(cpu) | |
947 | set_cyc2ns_scale(cpu_khz, cpu); | |
948 | ||
949 | if (tsc_disabled > 0) | |
950 | return; | |
951 | ||
952 | /* now allow native_sched_clock() to use rdtsc */ | |
953 | tsc_disabled = 0; | |
954 | ||
70de9a97 AK |
955 | lpj = ((u64)tsc_khz * 1000); |
956 | do_div(lpj, HZ); | |
957 | lpj_fine = lpj; | |
958 | ||
8fbbc4b4 AK |
959 | use_tsc_delay(); |
960 | /* Check and install the TSC clocksource */ | |
961 | dmi_check_system(bad_tsc_dmi_table); | |
962 | ||
963 | if (unsynchronized_tsc()) | |
964 | mark_tsc_unstable("TSCs unsynchronized"); | |
965 | ||
395628ef | 966 | check_system_tsc_reliable(); |
8fbbc4b4 AK |
967 | init_tsc_clocksource(); |
968 | } | |
969 |