]>
Commit | Line | Data |
---|---|---|
c767a54b JP |
1 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
2 | ||
bfc0f594 | 3 | #include <linux/kernel.h> |
0ef95533 AK |
4 | #include <linux/sched.h> |
5 | #include <linux/init.h> | |
186f4360 | 6 | #include <linux/export.h> |
0ef95533 | 7 | #include <linux/timer.h> |
bfc0f594 | 8 | #include <linux/acpi_pmtmr.h> |
2dbe06fa | 9 | #include <linux/cpufreq.h> |
8fbbc4b4 AK |
10 | #include <linux/delay.h> |
11 | #include <linux/clocksource.h> | |
12 | #include <linux/percpu.h> | |
08604bd9 | 13 | #include <linux/timex.h> |
10b033d4 | 14 | #include <linux/static_key.h> |
bfc0f594 AK |
15 | |
16 | #include <asm/hpet.h> | |
8fbbc4b4 AK |
17 | #include <asm/timer.h> |
18 | #include <asm/vgtod.h> | |
19 | #include <asm/time.h> | |
20 | #include <asm/delay.h> | |
88b094fb | 21 | #include <asm/hypervisor.h> |
08047c4f | 22 | #include <asm/nmi.h> |
2d826404 | 23 | #include <asm/x86_init.h> |
03da3ff1 | 24 | #include <asm/geode.h> |
6731b0d6 | 25 | #include <asm/apic.h> |
0ef95533 | 26 | |
f24ade3a | 27 | unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ |
0ef95533 | 28 | EXPORT_SYMBOL(cpu_khz); |
f24ade3a IM |
29 | |
30 | unsigned int __read_mostly tsc_khz; | |
0ef95533 AK |
31 | EXPORT_SYMBOL(tsc_khz); |
32 | ||
33 | /* | |
34 | * TSC can be unstable due to cpufreq or due to unsynced TSCs | |
35 | */ | |
f24ade3a | 36 | static int __read_mostly tsc_unstable; |
0ef95533 AK |
37 | |
38 | /* native_sched_clock() is called before tsc_init(), so | |
39 | we must start with the TSC soft disabled to prevent | |
59e21e3d | 40 | erroneous rdtsc usage on !boot_cpu_has(X86_FEATURE_TSC) processors */ |
f24ade3a | 41 | static int __read_mostly tsc_disabled = -1; |
0ef95533 | 42 | |
3bbfafb7 | 43 | static DEFINE_STATIC_KEY_FALSE(__use_tsc); |
10b033d4 | 44 | |
28a00184 | 45 | int tsc_clocksource_reliable; |
57c67da2 | 46 | |
f9677e0f CH |
47 | static u32 art_to_tsc_numerator; |
48 | static u32 art_to_tsc_denominator; | |
49 | static u64 art_to_tsc_offset; | |
50 | struct clocksource *art_related_clocksource; | |
51 | ||
20d1c86a PZ |
52 | /* |
53 | * Use a ring-buffer like data structure, where a writer advances the head by | |
54 | * writing a new data entry and a reader advances the tail when it observes a | |
55 | * new entry. | |
56 | * | |
57 | * Writers are made to wait on readers until there's space to write a new | |
58 | * entry. | |
59 | * | |
60 | * This means that we can always use an {offset, mul} pair to compute a ns | |
61 | * value that is 'roughly' in the right direction, even if we're writing a new | |
62 | * {offset, mul} pair during the clock read. | |
63 | * | |
64 | * The down-side is that we can no longer guarantee strict monotonicity anymore | |
65 | * (assuming the TSC was that to begin with), because while we compute the | |
66 | * intersection point of the two clock slopes and make sure the time is | |
67 | * continuous at the point of switching; we can no longer guarantee a reader is | |
68 | * strictly before or after the switch point. | |
69 | * | |
70 | * It does mean a reader no longer needs to disable IRQs in order to avoid | |
71 | * CPU-Freq updates messing with his times, and similarly an NMI reader will | |
72 | * no longer run the risk of hitting half-written state. | |
73 | */ | |
74 | ||
75 | struct cyc2ns { | |
76 | struct cyc2ns_data data[2]; /* 0 + 2*24 = 48 */ | |
77 | struct cyc2ns_data *head; /* 48 + 8 = 56 */ | |
78 | struct cyc2ns_data *tail; /* 56 + 8 = 64 */ | |
79 | }; /* exactly fits one cacheline */ | |
80 | ||
81 | static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns); | |
82 | ||
83 | struct cyc2ns_data *cyc2ns_read_begin(void) | |
84 | { | |
85 | struct cyc2ns_data *head; | |
86 | ||
87 | preempt_disable(); | |
88 | ||
89 | head = this_cpu_read(cyc2ns.head); | |
90 | /* | |
91 | * Ensure we observe the entry when we observe the pointer to it. | |
92 | * matches the wmb from cyc2ns_write_end(). | |
93 | */ | |
94 | smp_read_barrier_depends(); | |
95 | head->__count++; | |
96 | barrier(); | |
97 | ||
98 | return head; | |
99 | } | |
100 | ||
101 | void cyc2ns_read_end(struct cyc2ns_data *head) | |
102 | { | |
103 | barrier(); | |
104 | /* | |
105 | * If we're the outer most nested read; update the tail pointer | |
106 | * when we're done. This notifies possible pending writers | |
107 | * that we've observed the head pointer and that the other | |
108 | * entry is now free. | |
109 | */ | |
110 | if (!--head->__count) { | |
111 | /* | |
112 | * x86-TSO does not reorder writes with older reads; | |
113 | * therefore once this write becomes visible to another | |
114 | * cpu, we must be finished reading the cyc2ns_data. | |
115 | * | |
116 | * matches with cyc2ns_write_begin(). | |
117 | */ | |
118 | this_cpu_write(cyc2ns.tail, head); | |
119 | } | |
120 | preempt_enable(); | |
121 | } | |
122 | ||
123 | /* | |
124 | * Begin writing a new @data entry for @cpu. | |
125 | * | |
126 | * Assumes some sort of write side lock; currently 'provided' by the assumption | |
127 | * that cpufreq will call its notifiers sequentially. | |
128 | */ | |
129 | static struct cyc2ns_data *cyc2ns_write_begin(int cpu) | |
130 | { | |
131 | struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); | |
132 | struct cyc2ns_data *data = c2n->data; | |
133 | ||
134 | if (data == c2n->head) | |
135 | data++; | |
136 | ||
137 | /* XXX send an IPI to @cpu in order to guarantee a read? */ | |
138 | ||
139 | /* | |
140 | * When we observe the tail write from cyc2ns_read_end(), | |
141 | * the cpu must be done with that entry and its safe | |
142 | * to start writing to it. | |
143 | */ | |
144 | while (c2n->tail == data) | |
145 | cpu_relax(); | |
146 | ||
147 | return data; | |
148 | } | |
149 | ||
150 | static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data) | |
151 | { | |
152 | struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); | |
153 | ||
154 | /* | |
155 | * Ensure the @data writes are visible before we publish the | |
156 | * entry. Matches the data-depencency in cyc2ns_read_begin(). | |
157 | */ | |
158 | smp_wmb(); | |
159 | ||
160 | ACCESS_ONCE(c2n->head) = data; | |
161 | } | |
162 | ||
163 | /* | |
164 | * Accelerators for sched_clock() | |
57c67da2 PZ |
165 | * convert from cycles(64bits) => nanoseconds (64bits) |
166 | * basic equation: | |
167 | * ns = cycles / (freq / ns_per_sec) | |
168 | * ns = cycles * (ns_per_sec / freq) | |
169 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | |
170 | * ns = cycles * (10^6 / cpu_khz) | |
171 | * | |
172 | * Then we use scaling math (suggested by [email protected]) to get: | |
173 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | |
174 | * ns = cycles * cyc2ns_scale / SC | |
175 | * | |
176 | * And since SC is a constant power of two, we can convert the div | |
b20112ed AH |
177 | * into a shift. The larger SC is, the more accurate the conversion, but |
178 | * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication | |
179 | * (64-bit result) can be used. | |
57c67da2 | 180 | * |
b20112ed | 181 | * We can use khz divisor instead of mhz to keep a better precision. |
57c67da2 PZ |
182 | * ([email protected]) |
183 | * | |
184 | * [email protected] "math is hard, lets go shopping!" | |
185 | */ | |
186 | ||
20d1c86a PZ |
187 | static void cyc2ns_data_init(struct cyc2ns_data *data) |
188 | { | |
5e3c1afd | 189 | data->cyc2ns_mul = 0; |
b20112ed | 190 | data->cyc2ns_shift = 0; |
20d1c86a PZ |
191 | data->cyc2ns_offset = 0; |
192 | data->__count = 0; | |
193 | } | |
194 | ||
195 | static void cyc2ns_init(int cpu) | |
196 | { | |
197 | struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); | |
198 | ||
199 | cyc2ns_data_init(&c2n->data[0]); | |
200 | cyc2ns_data_init(&c2n->data[1]); | |
201 | ||
202 | c2n->head = c2n->data; | |
203 | c2n->tail = c2n->data; | |
204 | } | |
205 | ||
57c67da2 PZ |
206 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) |
207 | { | |
20d1c86a PZ |
208 | struct cyc2ns_data *data, *tail; |
209 | unsigned long long ns; | |
210 | ||
211 | /* | |
212 | * See cyc2ns_read_*() for details; replicated in order to avoid | |
213 | * an extra few instructions that came with the abstraction. | |
214 | * Notable, it allows us to only do the __count and tail update | |
215 | * dance when its actually needed. | |
216 | */ | |
217 | ||
569d6557 | 218 | preempt_disable_notrace(); |
20d1c86a PZ |
219 | data = this_cpu_read(cyc2ns.head); |
220 | tail = this_cpu_read(cyc2ns.tail); | |
221 | ||
222 | if (likely(data == tail)) { | |
223 | ns = data->cyc2ns_offset; | |
b20112ed | 224 | ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift); |
20d1c86a PZ |
225 | } else { |
226 | data->__count++; | |
227 | ||
228 | barrier(); | |
229 | ||
230 | ns = data->cyc2ns_offset; | |
b20112ed | 231 | ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift); |
20d1c86a PZ |
232 | |
233 | barrier(); | |
234 | ||
235 | if (!--data->__count) | |
236 | this_cpu_write(cyc2ns.tail, data); | |
237 | } | |
569d6557 | 238 | preempt_enable_notrace(); |
20d1c86a | 239 | |
57c67da2 PZ |
240 | return ns; |
241 | } | |
242 | ||
aa297292 | 243 | static void set_cyc2ns_scale(unsigned long khz, int cpu) |
57c67da2 | 244 | { |
20d1c86a PZ |
245 | unsigned long long tsc_now, ns_now; |
246 | struct cyc2ns_data *data; | |
247 | unsigned long flags; | |
57c67da2 PZ |
248 | |
249 | local_irq_save(flags); | |
250 | sched_clock_idle_sleep_event(); | |
251 | ||
aa297292 | 252 | if (!khz) |
20d1c86a PZ |
253 | goto done; |
254 | ||
255 | data = cyc2ns_write_begin(cpu); | |
57c67da2 | 256 | |
4ea1636b | 257 | tsc_now = rdtsc(); |
57c67da2 PZ |
258 | ns_now = cycles_2_ns(tsc_now); |
259 | ||
20d1c86a PZ |
260 | /* |
261 | * Compute a new multiplier as per the above comment and ensure our | |
262 | * time function is continuous; see the comment near struct | |
263 | * cyc2ns_data. | |
264 | */ | |
aa297292 | 265 | clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, khz, |
b20112ed AH |
266 | NSEC_PER_MSEC, 0); |
267 | ||
b9511cd7 AH |
268 | /* |
269 | * cyc2ns_shift is exported via arch_perf_update_userpage() where it is | |
270 | * not expected to be greater than 31 due to the original published | |
271 | * conversion algorithm shifting a 32-bit value (now specifies a 64-bit | |
272 | * value) - refer perf_event_mmap_page documentation in perf_event.h. | |
273 | */ | |
274 | if (data->cyc2ns_shift == 32) { | |
275 | data->cyc2ns_shift = 31; | |
276 | data->cyc2ns_mul >>= 1; | |
277 | } | |
278 | ||
20d1c86a | 279 | data->cyc2ns_offset = ns_now - |
b20112ed | 280 | mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, data->cyc2ns_shift); |
20d1c86a PZ |
281 | |
282 | cyc2ns_write_end(cpu, data); | |
57c67da2 | 283 | |
20d1c86a | 284 | done: |
57c67da2 PZ |
285 | sched_clock_idle_wakeup_event(0); |
286 | local_irq_restore(flags); | |
287 | } | |
0ef95533 AK |
288 | /* |
289 | * Scheduler clock - returns current time in nanosec units. | |
290 | */ | |
291 | u64 native_sched_clock(void) | |
292 | { | |
3bbfafb7 PZ |
293 | if (static_branch_likely(&__use_tsc)) { |
294 | u64 tsc_now = rdtsc(); | |
295 | ||
296 | /* return the value in ns */ | |
297 | return cycles_2_ns(tsc_now); | |
298 | } | |
0ef95533 AK |
299 | |
300 | /* | |
301 | * Fall back to jiffies if there's no TSC available: | |
302 | * ( But note that we still use it if the TSC is marked | |
303 | * unstable. We do this because unlike Time Of Day, | |
304 | * the scheduler clock tolerates small errors and it's | |
305 | * very important for it to be as fast as the platform | |
3ad2f3fb | 306 | * can achieve it. ) |
0ef95533 | 307 | */ |
0ef95533 | 308 | |
3bbfafb7 PZ |
309 | /* No locking but a rare wrong value is not a big deal: */ |
310 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); | |
0ef95533 AK |
311 | } |
312 | ||
a94cab23 AK |
313 | /* |
314 | * Generate a sched_clock if you already have a TSC value. | |
315 | */ | |
316 | u64 native_sched_clock_from_tsc(u64 tsc) | |
317 | { | |
318 | return cycles_2_ns(tsc); | |
319 | } | |
320 | ||
0ef95533 AK |
321 | /* We need to define a real function for sched_clock, to override the |
322 | weak default version */ | |
323 | #ifdef CONFIG_PARAVIRT | |
324 | unsigned long long sched_clock(void) | |
325 | { | |
326 | return paravirt_sched_clock(); | |
327 | } | |
328 | #else | |
329 | unsigned long long | |
330 | sched_clock(void) __attribute__((alias("native_sched_clock"))); | |
331 | #endif | |
332 | ||
333 | int check_tsc_unstable(void) | |
334 | { | |
335 | return tsc_unstable; | |
336 | } | |
337 | EXPORT_SYMBOL_GPL(check_tsc_unstable); | |
338 | ||
339 | #ifdef CONFIG_X86_TSC | |
340 | int __init notsc_setup(char *str) | |
341 | { | |
c767a54b | 342 | pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n"); |
0ef95533 AK |
343 | tsc_disabled = 1; |
344 | return 1; | |
345 | } | |
346 | #else | |
347 | /* | |
348 | * disable flag for tsc. Takes effect by clearing the TSC cpu flag | |
349 | * in cpu/common.c | |
350 | */ | |
351 | int __init notsc_setup(char *str) | |
352 | { | |
353 | setup_clear_cpu_cap(X86_FEATURE_TSC); | |
354 | return 1; | |
355 | } | |
356 | #endif | |
357 | ||
358 | __setup("notsc", notsc_setup); | |
bfc0f594 | 359 | |
e82b8e4e VP |
360 | static int no_sched_irq_time; |
361 | ||
395628ef AK |
362 | static int __init tsc_setup(char *str) |
363 | { | |
364 | if (!strcmp(str, "reliable")) | |
365 | tsc_clocksource_reliable = 1; | |
e82b8e4e VP |
366 | if (!strncmp(str, "noirqtime", 9)) |
367 | no_sched_irq_time = 1; | |
395628ef AK |
368 | return 1; |
369 | } | |
370 | ||
371 | __setup("tsc=", tsc_setup); | |
372 | ||
bfc0f594 AK |
373 | #define MAX_RETRIES 5 |
374 | #define SMI_TRESHOLD 50000 | |
375 | ||
376 | /* | |
377 | * Read TSC and the reference counters. Take care of SMI disturbance | |
378 | */ | |
827014be | 379 | static u64 tsc_read_refs(u64 *p, int hpet) |
bfc0f594 AK |
380 | { |
381 | u64 t1, t2; | |
382 | int i; | |
383 | ||
384 | for (i = 0; i < MAX_RETRIES; i++) { | |
385 | t1 = get_cycles(); | |
386 | if (hpet) | |
827014be | 387 | *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; |
bfc0f594 | 388 | else |
827014be | 389 | *p = acpi_pm_read_early(); |
bfc0f594 AK |
390 | t2 = get_cycles(); |
391 | if ((t2 - t1) < SMI_TRESHOLD) | |
392 | return t2; | |
393 | } | |
394 | return ULLONG_MAX; | |
395 | } | |
396 | ||
d683ef7a TG |
397 | /* |
398 | * Calculate the TSC frequency from HPET reference | |
bfc0f594 | 399 | */ |
d683ef7a | 400 | static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2) |
bfc0f594 | 401 | { |
d683ef7a | 402 | u64 tmp; |
bfc0f594 | 403 | |
d683ef7a TG |
404 | if (hpet2 < hpet1) |
405 | hpet2 += 0x100000000ULL; | |
406 | hpet2 -= hpet1; | |
407 | tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); | |
408 | do_div(tmp, 1000000); | |
409 | do_div(deltatsc, tmp); | |
410 | ||
411 | return (unsigned long) deltatsc; | |
412 | } | |
413 | ||
414 | /* | |
415 | * Calculate the TSC frequency from PMTimer reference | |
416 | */ | |
417 | static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2) | |
418 | { | |
419 | u64 tmp; | |
bfc0f594 | 420 | |
d683ef7a TG |
421 | if (!pm1 && !pm2) |
422 | return ULONG_MAX; | |
423 | ||
424 | if (pm2 < pm1) | |
425 | pm2 += (u64)ACPI_PM_OVRRUN; | |
426 | pm2 -= pm1; | |
427 | tmp = pm2 * 1000000000LL; | |
428 | do_div(tmp, PMTMR_TICKS_PER_SEC); | |
429 | do_div(deltatsc, tmp); | |
430 | ||
431 | return (unsigned long) deltatsc; | |
432 | } | |
433 | ||
a977c400 | 434 | #define CAL_MS 10 |
b7743970 | 435 | #define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS)) |
a977c400 TG |
436 | #define CAL_PIT_LOOPS 1000 |
437 | ||
438 | #define CAL2_MS 50 | |
b7743970 | 439 | #define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS)) |
a977c400 TG |
440 | #define CAL2_PIT_LOOPS 5000 |
441 | ||
cce3e057 | 442 | |
ec0c15af LT |
443 | /* |
444 | * Try to calibrate the TSC against the Programmable | |
445 | * Interrupt Timer and return the frequency of the TSC | |
446 | * in kHz. | |
447 | * | |
448 | * Return ULONG_MAX on failure to calibrate. | |
449 | */ | |
a977c400 | 450 | static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin) |
ec0c15af LT |
451 | { |
452 | u64 tsc, t1, t2, delta; | |
453 | unsigned long tscmin, tscmax; | |
454 | int pitcnt; | |
455 | ||
456 | /* Set the Gate high, disable speaker */ | |
457 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); | |
458 | ||
459 | /* | |
460 | * Setup CTC channel 2* for mode 0, (interrupt on terminal | |
461 | * count mode), binary count. Set the latch register to 50ms | |
462 | * (LSB then MSB) to begin countdown. | |
463 | */ | |
464 | outb(0xb0, 0x43); | |
a977c400 TG |
465 | outb(latch & 0xff, 0x42); |
466 | outb(latch >> 8, 0x42); | |
ec0c15af LT |
467 | |
468 | tsc = t1 = t2 = get_cycles(); | |
469 | ||
470 | pitcnt = 0; | |
471 | tscmax = 0; | |
472 | tscmin = ULONG_MAX; | |
473 | while ((inb(0x61) & 0x20) == 0) { | |
474 | t2 = get_cycles(); | |
475 | delta = t2 - tsc; | |
476 | tsc = t2; | |
477 | if ((unsigned long) delta < tscmin) | |
478 | tscmin = (unsigned int) delta; | |
479 | if ((unsigned long) delta > tscmax) | |
480 | tscmax = (unsigned int) delta; | |
481 | pitcnt++; | |
482 | } | |
483 | ||
484 | /* | |
485 | * Sanity checks: | |
486 | * | |
a977c400 | 487 | * If we were not able to read the PIT more than loopmin |
ec0c15af LT |
488 | * times, then we have been hit by a massive SMI |
489 | * | |
490 | * If the maximum is 10 times larger than the minimum, | |
491 | * then we got hit by an SMI as well. | |
492 | */ | |
a977c400 | 493 | if (pitcnt < loopmin || tscmax > 10 * tscmin) |
ec0c15af LT |
494 | return ULONG_MAX; |
495 | ||
496 | /* Calculate the PIT value */ | |
497 | delta = t2 - t1; | |
a977c400 | 498 | do_div(delta, ms); |
ec0c15af LT |
499 | return delta; |
500 | } | |
501 | ||
6ac40ed0 LT |
502 | /* |
503 | * This reads the current MSB of the PIT counter, and | |
504 | * checks if we are running on sufficiently fast and | |
505 | * non-virtualized hardware. | |
506 | * | |
507 | * Our expectations are: | |
508 | * | |
509 | * - the PIT is running at roughly 1.19MHz | |
510 | * | |
511 | * - each IO is going to take about 1us on real hardware, | |
512 | * but we allow it to be much faster (by a factor of 10) or | |
513 | * _slightly_ slower (ie we allow up to a 2us read+counter | |
514 | * update - anything else implies a unacceptably slow CPU | |
515 | * or PIT for the fast calibration to work. | |
516 | * | |
517 | * - with 256 PIT ticks to read the value, we have 214us to | |
518 | * see the same MSB (and overhead like doing a single TSC | |
519 | * read per MSB value etc). | |
520 | * | |
521 | * - We're doing 2 reads per loop (LSB, MSB), and we expect | |
522 | * them each to take about a microsecond on real hardware. | |
523 | * So we expect a count value of around 100. But we'll be | |
524 | * generous, and accept anything over 50. | |
525 | * | |
526 | * - if the PIT is stuck, and we see *many* more reads, we | |
527 | * return early (and the next caller of pit_expect_msb() | |
528 | * then consider it a failure when they don't see the | |
529 | * next expected value). | |
530 | * | |
531 | * These expectations mean that we know that we have seen the | |
532 | * transition from one expected value to another with a fairly | |
533 | * high accuracy, and we didn't miss any events. We can thus | |
534 | * use the TSC value at the transitions to calculate a pretty | |
535 | * good value for the TSC frequencty. | |
536 | */ | |
b6e61eef LT |
537 | static inline int pit_verify_msb(unsigned char val) |
538 | { | |
539 | /* Ignore LSB */ | |
540 | inb(0x42); | |
541 | return inb(0x42) == val; | |
542 | } | |
543 | ||
9e8912e0 | 544 | static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) |
6ac40ed0 | 545 | { |
9e8912e0 | 546 | int count; |
68f30fbe | 547 | u64 tsc = 0, prev_tsc = 0; |
bfc0f594 | 548 | |
6ac40ed0 | 549 | for (count = 0; count < 50000; count++) { |
b6e61eef | 550 | if (!pit_verify_msb(val)) |
6ac40ed0 | 551 | break; |
68f30fbe | 552 | prev_tsc = tsc; |
9e8912e0 | 553 | tsc = get_cycles(); |
6ac40ed0 | 554 | } |
68f30fbe | 555 | *deltap = get_cycles() - prev_tsc; |
9e8912e0 LT |
556 | *tscp = tsc; |
557 | ||
558 | /* | |
559 | * We require _some_ success, but the quality control | |
560 | * will be based on the error terms on the TSC values. | |
561 | */ | |
562 | return count > 5; | |
6ac40ed0 LT |
563 | } |
564 | ||
565 | /* | |
9e8912e0 LT |
566 | * How many MSB values do we want to see? We aim for |
567 | * a maximum error rate of 500ppm (in practice the | |
568 | * real error is much smaller), but refuse to spend | |
68f30fbe | 569 | * more than 50ms on it. |
6ac40ed0 | 570 | */ |
68f30fbe | 571 | #define MAX_QUICK_PIT_MS 50 |
9e8912e0 | 572 | #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) |
bfc0f594 | 573 | |
6ac40ed0 LT |
574 | static unsigned long quick_pit_calibrate(void) |
575 | { | |
9e8912e0 LT |
576 | int i; |
577 | u64 tsc, delta; | |
578 | unsigned long d1, d2; | |
579 | ||
6ac40ed0 | 580 | /* Set the Gate high, disable speaker */ |
bfc0f594 AK |
581 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); |
582 | ||
6ac40ed0 LT |
583 | /* |
584 | * Counter 2, mode 0 (one-shot), binary count | |
585 | * | |
586 | * NOTE! Mode 2 decrements by two (and then the | |
587 | * output is flipped each time, giving the same | |
588 | * final output frequency as a decrement-by-one), | |
589 | * so mode 0 is much better when looking at the | |
590 | * individual counts. | |
591 | */ | |
bfc0f594 | 592 | outb(0xb0, 0x43); |
bfc0f594 | 593 | |
6ac40ed0 LT |
594 | /* Start at 0xffff */ |
595 | outb(0xff, 0x42); | |
596 | outb(0xff, 0x42); | |
597 | ||
a6a80e1d LT |
598 | /* |
599 | * The PIT starts counting at the next edge, so we | |
600 | * need to delay for a microsecond. The easiest way | |
601 | * to do that is to just read back the 16-bit counter | |
602 | * once from the PIT. | |
603 | */ | |
b6e61eef | 604 | pit_verify_msb(0); |
a6a80e1d | 605 | |
9e8912e0 LT |
606 | if (pit_expect_msb(0xff, &tsc, &d1)) { |
607 | for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { | |
608 | if (!pit_expect_msb(0xff-i, &delta, &d2)) | |
609 | break; | |
610 | ||
5aac644a AH |
611 | delta -= tsc; |
612 | ||
613 | /* | |
614 | * Extrapolate the error and fail fast if the error will | |
615 | * never be below 500 ppm. | |
616 | */ | |
617 | if (i == 1 && | |
618 | d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11) | |
619 | return 0; | |
620 | ||
9e8912e0 LT |
621 | /* |
622 | * Iterate until the error is less than 500 ppm | |
623 | */ | |
b6e61eef LT |
624 | if (d1+d2 >= delta >> 11) |
625 | continue; | |
626 | ||
627 | /* | |
628 | * Check the PIT one more time to verify that | |
629 | * all TSC reads were stable wrt the PIT. | |
630 | * | |
631 | * This also guarantees serialization of the | |
632 | * last cycle read ('d2') in pit_expect_msb. | |
633 | */ | |
634 | if (!pit_verify_msb(0xfe - i)) | |
635 | break; | |
636 | goto success; | |
6ac40ed0 | 637 | } |
6ac40ed0 | 638 | } |
52045217 | 639 | pr_info("Fast TSC calibration failed\n"); |
6ac40ed0 | 640 | return 0; |
9e8912e0 LT |
641 | |
642 | success: | |
643 | /* | |
644 | * Ok, if we get here, then we've seen the | |
645 | * MSB of the PIT decrement 'i' times, and the | |
646 | * error has shrunk to less than 500 ppm. | |
647 | * | |
648 | * As a result, we can depend on there not being | |
649 | * any odd delays anywhere, and the TSC reads are | |
68f30fbe | 650 | * reliable (within the error). |
9e8912e0 LT |
651 | * |
652 | * kHz = ticks / time-in-seconds / 1000; | |
653 | * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 | |
654 | * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) | |
655 | */ | |
9e8912e0 LT |
656 | delta *= PIT_TICK_RATE; |
657 | do_div(delta, i*256*1000); | |
c767a54b | 658 | pr_info("Fast TSC calibration using PIT\n"); |
9e8912e0 | 659 | return delta; |
6ac40ed0 | 660 | } |
ec0c15af | 661 | |
bfc0f594 | 662 | /** |
aa297292 LB |
663 | * native_calibrate_tsc |
664 | * Determine TSC frequency via CPUID, else return 0. | |
bfc0f594 | 665 | */ |
e93ef949 | 666 | unsigned long native_calibrate_tsc(void) |
aa297292 LB |
667 | { |
668 | unsigned int eax_denominator, ebx_numerator, ecx_hz, edx; | |
669 | unsigned int crystal_khz; | |
670 | ||
671 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | |
672 | return 0; | |
673 | ||
674 | if (boot_cpu_data.cpuid_level < 0x15) | |
675 | return 0; | |
676 | ||
677 | eax_denominator = ebx_numerator = ecx_hz = edx = 0; | |
678 | ||
679 | /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */ | |
680 | cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx); | |
681 | ||
682 | if (ebx_numerator == 0 || eax_denominator == 0) | |
683 | return 0; | |
684 | ||
685 | crystal_khz = ecx_hz / 1000; | |
686 | ||
687 | if (crystal_khz == 0) { | |
688 | switch (boot_cpu_data.x86_model) { | |
689 | case 0x4E: /* SKL */ | |
690 | case 0x5E: /* SKL */ | |
ff4c8663 LB |
691 | crystal_khz = 24000; /* 24.0 MHz */ |
692 | break; | |
693 | case 0x5C: /* BXT */ | |
694 | crystal_khz = 19200; /* 19.2 MHz */ | |
695 | break; | |
aa297292 LB |
696 | } |
697 | } | |
698 | ||
699 | return crystal_khz * ebx_numerator / eax_denominator; | |
700 | } | |
701 | ||
702 | static unsigned long cpu_khz_from_cpuid(void) | |
703 | { | |
704 | unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx; | |
705 | ||
706 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | |
707 | return 0; | |
708 | ||
709 | if (boot_cpu_data.cpuid_level < 0x16) | |
710 | return 0; | |
711 | ||
712 | eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0; | |
713 | ||
714 | cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx); | |
715 | ||
716 | return eax_base_mhz * 1000; | |
717 | } | |
718 | ||
719 | /** | |
720 | * native_calibrate_cpu - calibrate the cpu on boot | |
721 | */ | |
722 | unsigned long native_calibrate_cpu(void) | |
bfc0f594 | 723 | { |
827014be | 724 | u64 tsc1, tsc2, delta, ref1, ref2; |
fbb16e24 | 725 | unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; |
2d826404 | 726 | unsigned long flags, latch, ms, fast_calibrate; |
a977c400 | 727 | int hpet = is_hpet_enabled(), i, loopmin; |
bfc0f594 | 728 | |
aa297292 LB |
729 | fast_calibrate = cpu_khz_from_cpuid(); |
730 | if (fast_calibrate) | |
731 | return fast_calibrate; | |
732 | ||
02c0cd2d | 733 | fast_calibrate = cpu_khz_from_msr(); |
5f0e0309 | 734 | if (fast_calibrate) |
7da7c156 | 735 | return fast_calibrate; |
7da7c156 | 736 | |
6ac40ed0 LT |
737 | local_irq_save(flags); |
738 | fast_calibrate = quick_pit_calibrate(); | |
bfc0f594 | 739 | local_irq_restore(flags); |
6ac40ed0 LT |
740 | if (fast_calibrate) |
741 | return fast_calibrate; | |
bfc0f594 | 742 | |
fbb16e24 TG |
743 | /* |
744 | * Run 5 calibration loops to get the lowest frequency value | |
745 | * (the best estimate). We use two different calibration modes | |
746 | * here: | |
747 | * | |
748 | * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and | |
749 | * load a timeout of 50ms. We read the time right after we | |
750 | * started the timer and wait until the PIT count down reaches | |
751 | * zero. In each wait loop iteration we read the TSC and check | |
752 | * the delta to the previous read. We keep track of the min | |
753 | * and max values of that delta. The delta is mostly defined | |
754 | * by the IO time of the PIT access, so we can detect when a | |
0d2eb44f | 755 | * SMI/SMM disturbance happened between the two reads. If the |
fbb16e24 TG |
756 | * maximum time is significantly larger than the minimum time, |
757 | * then we discard the result and have another try. | |
758 | * | |
759 | * 2) Reference counter. If available we use the HPET or the | |
760 | * PMTIMER as a reference to check the sanity of that value. | |
761 | * We use separate TSC readouts and check inside of the | |
762 | * reference read for a SMI/SMM disturbance. We dicard | |
763 | * disturbed values here as well. We do that around the PIT | |
764 | * calibration delay loop as we have to wait for a certain | |
765 | * amount of time anyway. | |
766 | */ | |
a977c400 TG |
767 | |
768 | /* Preset PIT loop values */ | |
769 | latch = CAL_LATCH; | |
770 | ms = CAL_MS; | |
771 | loopmin = CAL_PIT_LOOPS; | |
772 | ||
773 | for (i = 0; i < 3; i++) { | |
ec0c15af | 774 | unsigned long tsc_pit_khz; |
fbb16e24 TG |
775 | |
776 | /* | |
777 | * Read the start value and the reference count of | |
ec0c15af LT |
778 | * hpet/pmtimer when available. Then do the PIT |
779 | * calibration, which will take at least 50ms, and | |
780 | * read the end value. | |
fbb16e24 | 781 | */ |
ec0c15af | 782 | local_irq_save(flags); |
827014be | 783 | tsc1 = tsc_read_refs(&ref1, hpet); |
a977c400 | 784 | tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin); |
827014be | 785 | tsc2 = tsc_read_refs(&ref2, hpet); |
fbb16e24 TG |
786 | local_irq_restore(flags); |
787 | ||
ec0c15af LT |
788 | /* Pick the lowest PIT TSC calibration so far */ |
789 | tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); | |
fbb16e24 TG |
790 | |
791 | /* hpet or pmtimer available ? */ | |
62627bec | 792 | if (ref1 == ref2) |
fbb16e24 TG |
793 | continue; |
794 | ||
795 | /* Check, whether the sampling was disturbed by an SMI */ | |
796 | if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) | |
797 | continue; | |
798 | ||
799 | tsc2 = (tsc2 - tsc1) * 1000000LL; | |
d683ef7a | 800 | if (hpet) |
827014be | 801 | tsc2 = calc_hpet_ref(tsc2, ref1, ref2); |
d683ef7a | 802 | else |
827014be | 803 | tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2); |
fbb16e24 | 804 | |
fbb16e24 | 805 | tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2); |
a977c400 TG |
806 | |
807 | /* Check the reference deviation */ | |
808 | delta = ((u64) tsc_pit_min) * 100; | |
809 | do_div(delta, tsc_ref_min); | |
810 | ||
811 | /* | |
812 | * If both calibration results are inside a 10% window | |
813 | * then we can be sure, that the calibration | |
814 | * succeeded. We break out of the loop right away. We | |
815 | * use the reference value, as it is more precise. | |
816 | */ | |
817 | if (delta >= 90 && delta <= 110) { | |
c767a54b JP |
818 | pr_info("PIT calibration matches %s. %d loops\n", |
819 | hpet ? "HPET" : "PMTIMER", i + 1); | |
a977c400 | 820 | return tsc_ref_min; |
fbb16e24 TG |
821 | } |
822 | ||
a977c400 TG |
823 | /* |
824 | * Check whether PIT failed more than once. This | |
825 | * happens in virtualized environments. We need to | |
826 | * give the virtual PC a slightly longer timeframe for | |
827 | * the HPET/PMTIMER to make the result precise. | |
828 | */ | |
829 | if (i == 1 && tsc_pit_min == ULONG_MAX) { | |
830 | latch = CAL2_LATCH; | |
831 | ms = CAL2_MS; | |
832 | loopmin = CAL2_PIT_LOOPS; | |
833 | } | |
fbb16e24 | 834 | } |
bfc0f594 AK |
835 | |
836 | /* | |
fbb16e24 | 837 | * Now check the results. |
bfc0f594 | 838 | */ |
fbb16e24 TG |
839 | if (tsc_pit_min == ULONG_MAX) { |
840 | /* PIT gave no useful value */ | |
c767a54b | 841 | pr_warn("Unable to calibrate against PIT\n"); |
fbb16e24 TG |
842 | |
843 | /* We don't have an alternative source, disable TSC */ | |
827014be | 844 | if (!hpet && !ref1 && !ref2) { |
c767a54b | 845 | pr_notice("No reference (HPET/PMTIMER) available\n"); |
fbb16e24 TG |
846 | return 0; |
847 | } | |
848 | ||
849 | /* The alternative source failed as well, disable TSC */ | |
850 | if (tsc_ref_min == ULONG_MAX) { | |
c767a54b | 851 | pr_warn("HPET/PMTIMER calibration failed\n"); |
fbb16e24 TG |
852 | return 0; |
853 | } | |
854 | ||
855 | /* Use the alternative source */ | |
c767a54b JP |
856 | pr_info("using %s reference calibration\n", |
857 | hpet ? "HPET" : "PMTIMER"); | |
fbb16e24 TG |
858 | |
859 | return tsc_ref_min; | |
860 | } | |
bfc0f594 | 861 | |
fbb16e24 | 862 | /* We don't have an alternative source, use the PIT calibration value */ |
827014be | 863 | if (!hpet && !ref1 && !ref2) { |
c767a54b | 864 | pr_info("Using PIT calibration value\n"); |
fbb16e24 | 865 | return tsc_pit_min; |
bfc0f594 AK |
866 | } |
867 | ||
fbb16e24 TG |
868 | /* The alternative source failed, use the PIT calibration value */ |
869 | if (tsc_ref_min == ULONG_MAX) { | |
c767a54b | 870 | pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n"); |
fbb16e24 | 871 | return tsc_pit_min; |
bfc0f594 AK |
872 | } |
873 | ||
fbb16e24 TG |
874 | /* |
875 | * The calibration values differ too much. In doubt, we use | |
876 | * the PIT value as we know that there are PMTIMERs around | |
a977c400 | 877 | * running at double speed. At least we let the user know: |
fbb16e24 | 878 | */ |
c767a54b JP |
879 | pr_warn("PIT calibration deviates from %s: %lu %lu\n", |
880 | hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min); | |
881 | pr_info("Using PIT calibration value\n"); | |
fbb16e24 | 882 | return tsc_pit_min; |
bfc0f594 AK |
883 | } |
884 | ||
bfc0f594 AK |
885 | int recalibrate_cpu_khz(void) |
886 | { | |
887 | #ifndef CONFIG_SMP | |
888 | unsigned long cpu_khz_old = cpu_khz; | |
889 | ||
eff4677e | 890 | if (!boot_cpu_has(X86_FEATURE_TSC)) |
bfc0f594 | 891 | return -ENODEV; |
eff4677e | 892 | |
aa297292 | 893 | cpu_khz = x86_platform.calibrate_cpu(); |
eff4677e | 894 | tsc_khz = x86_platform.calibrate_tsc(); |
aa297292 LB |
895 | if (tsc_khz == 0) |
896 | tsc_khz = cpu_khz; | |
ff4c8663 LB |
897 | else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz) |
898 | cpu_khz = tsc_khz; | |
eff4677e BP |
899 | cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy, |
900 | cpu_khz_old, cpu_khz); | |
901 | ||
902 | return 0; | |
bfc0f594 AK |
903 | #else |
904 | return -ENODEV; | |
905 | #endif | |
906 | } | |
907 | ||
908 | EXPORT_SYMBOL(recalibrate_cpu_khz); | |
909 | ||
2dbe06fa | 910 | |
cd7240c0 SS |
911 | static unsigned long long cyc2ns_suspend; |
912 | ||
b74f05d6 | 913 | void tsc_save_sched_clock_state(void) |
cd7240c0 | 914 | { |
35af99e6 | 915 | if (!sched_clock_stable()) |
cd7240c0 SS |
916 | return; |
917 | ||
918 | cyc2ns_suspend = sched_clock(); | |
919 | } | |
920 | ||
921 | /* | |
922 | * Even on processors with invariant TSC, TSC gets reset in some the | |
923 | * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to | |
924 | * arbitrary value (still sync'd across cpu's) during resume from such sleep | |
925 | * states. To cope up with this, recompute the cyc2ns_offset for each cpu so | |
926 | * that sched_clock() continues from the point where it was left off during | |
927 | * suspend. | |
928 | */ | |
b74f05d6 | 929 | void tsc_restore_sched_clock_state(void) |
cd7240c0 SS |
930 | { |
931 | unsigned long long offset; | |
932 | unsigned long flags; | |
933 | int cpu; | |
934 | ||
35af99e6 | 935 | if (!sched_clock_stable()) |
cd7240c0 SS |
936 | return; |
937 | ||
938 | local_irq_save(flags); | |
939 | ||
20d1c86a | 940 | /* |
6a6256f9 | 941 | * We're coming out of suspend, there's no concurrency yet; don't |
20d1c86a PZ |
942 | * bother being nice about the RCU stuff, just write to both |
943 | * data fields. | |
944 | */ | |
945 | ||
946 | this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0); | |
947 | this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0); | |
948 | ||
cd7240c0 SS |
949 | offset = cyc2ns_suspend - sched_clock(); |
950 | ||
20d1c86a PZ |
951 | for_each_possible_cpu(cpu) { |
952 | per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset; | |
953 | per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset; | |
954 | } | |
cd7240c0 SS |
955 | |
956 | local_irq_restore(flags); | |
957 | } | |
958 | ||
2dbe06fa AK |
959 | #ifdef CONFIG_CPU_FREQ |
960 | ||
961 | /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency | |
962 | * changes. | |
963 | * | |
964 | * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's | |
965 | * not that important because current Opteron setups do not support | |
966 | * scaling on SMP anyroads. | |
967 | * | |
968 | * Should fix up last_tsc too. Currently gettimeofday in the | |
969 | * first tick after the change will be slightly wrong. | |
970 | */ | |
971 | ||
972 | static unsigned int ref_freq; | |
973 | static unsigned long loops_per_jiffy_ref; | |
974 | static unsigned long tsc_khz_ref; | |
975 | ||
976 | static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |
977 | void *data) | |
978 | { | |
979 | struct cpufreq_freqs *freq = data; | |
931db6a3 | 980 | unsigned long *lpj; |
2dbe06fa | 981 | |
931db6a3 | 982 | lpj = &boot_cpu_data.loops_per_jiffy; |
2dbe06fa | 983 | #ifdef CONFIG_SMP |
931db6a3 | 984 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) |
2dbe06fa | 985 | lpj = &cpu_data(freq->cpu).loops_per_jiffy; |
2dbe06fa AK |
986 | #endif |
987 | ||
988 | if (!ref_freq) { | |
989 | ref_freq = freq->old; | |
990 | loops_per_jiffy_ref = *lpj; | |
991 | tsc_khz_ref = tsc_khz; | |
992 | } | |
993 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | |
0b443ead | 994 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { |
878f4f53 | 995 | *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); |
2dbe06fa AK |
996 | |
997 | tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); | |
998 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | |
999 | mark_tsc_unstable("cpufreq changes"); | |
2dbe06fa | 1000 | |
3896c329 PZ |
1001 | set_cyc2ns_scale(tsc_khz, freq->cpu); |
1002 | } | |
2dbe06fa AK |
1003 | |
1004 | return 0; | |
1005 | } | |
1006 | ||
1007 | static struct notifier_block time_cpufreq_notifier_block = { | |
1008 | .notifier_call = time_cpufreq_notifier | |
1009 | }; | |
1010 | ||
a841cca7 | 1011 | static int __init cpufreq_register_tsc_scaling(void) |
2dbe06fa | 1012 | { |
59e21e3d | 1013 | if (!boot_cpu_has(X86_FEATURE_TSC)) |
060700b5 LT |
1014 | return 0; |
1015 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | |
1016 | return 0; | |
2dbe06fa AK |
1017 | cpufreq_register_notifier(&time_cpufreq_notifier_block, |
1018 | CPUFREQ_TRANSITION_NOTIFIER); | |
1019 | return 0; | |
1020 | } | |
1021 | ||
a841cca7 | 1022 | core_initcall(cpufreq_register_tsc_scaling); |
2dbe06fa AK |
1023 | |
1024 | #endif /* CONFIG_CPU_FREQ */ | |
8fbbc4b4 | 1025 | |
f9677e0f CH |
1026 | #define ART_CPUID_LEAF (0x15) |
1027 | #define ART_MIN_DENOMINATOR (1) | |
1028 | ||
1029 | ||
1030 | /* | |
1031 | * If ART is present detect the numerator:denominator to convert to TSC | |
1032 | */ | |
1033 | static void detect_art(void) | |
1034 | { | |
1035 | unsigned int unused[2]; | |
1036 | ||
1037 | if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF) | |
1038 | return; | |
1039 | ||
1040 | cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator, | |
1041 | &art_to_tsc_numerator, unused, unused+1); | |
1042 | ||
1043 | /* Don't enable ART in a VM, non-stop TSC required */ | |
1044 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR) || | |
1045 | !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) || | |
1046 | art_to_tsc_denominator < ART_MIN_DENOMINATOR) | |
1047 | return; | |
1048 | ||
1049 | if (rdmsrl_safe(MSR_IA32_TSC_ADJUST, &art_to_tsc_offset)) | |
1050 | return; | |
1051 | ||
1052 | /* Make this sticky over multiple CPU init calls */ | |
1053 | setup_force_cpu_cap(X86_FEATURE_ART); | |
1054 | } | |
1055 | ||
1056 | ||
8fbbc4b4 AK |
1057 | /* clocksource code */ |
1058 | ||
1059 | static struct clocksource clocksource_tsc; | |
1060 | ||
1061 | /* | |
09ec5442 | 1062 | * We used to compare the TSC to the cycle_last value in the clocksource |
8fbbc4b4 AK |
1063 | * structure to avoid a nasty time-warp. This can be observed in a |
1064 | * very small window right after one CPU updated cycle_last under | |
1065 | * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which | |
1066 | * is smaller than the cycle_last reference value due to a TSC which | |
1067 | * is slighty behind. This delta is nowhere else observable, but in | |
1068 | * that case it results in a forward time jump in the range of hours | |
1069 | * due to the unsigned delta calculation of the time keeping core | |
1070 | * code, which is necessary to support wrapping clocksources like pm | |
1071 | * timer. | |
09ec5442 TG |
1072 | * |
1073 | * This sanity check is now done in the core timekeeping code. | |
1074 | * checking the result of read_tsc() - cycle_last for being negative. | |
1075 | * That works because CLOCKSOURCE_MASK(64) does not mask out any bit. | |
8fbbc4b4 | 1076 | */ |
8e19608e | 1077 | static cycle_t read_tsc(struct clocksource *cs) |
8fbbc4b4 | 1078 | { |
27c63405 | 1079 | return (cycle_t)rdtsc_ordered(); |
1be39679 MS |
1080 | } |
1081 | ||
09ec5442 TG |
1082 | /* |
1083 | * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc() | |
1084 | */ | |
8fbbc4b4 AK |
1085 | static struct clocksource clocksource_tsc = { |
1086 | .name = "tsc", | |
1087 | .rating = 300, | |
1088 | .read = read_tsc, | |
1089 | .mask = CLOCKSOURCE_MASK(64), | |
8fbbc4b4 AK |
1090 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | |
1091 | CLOCK_SOURCE_MUST_VERIFY, | |
98d0ac38 | 1092 | .archdata = { .vclock_mode = VCLOCK_TSC }, |
8fbbc4b4 AK |
1093 | }; |
1094 | ||
1095 | void mark_tsc_unstable(char *reason) | |
1096 | { | |
1097 | if (!tsc_unstable) { | |
1098 | tsc_unstable = 1; | |
35af99e6 | 1099 | clear_sched_clock_stable(); |
e82b8e4e | 1100 | disable_sched_clock_irqtime(); |
c767a54b | 1101 | pr_info("Marking TSC unstable due to %s\n", reason); |
8fbbc4b4 AK |
1102 | /* Change only the rating, when not registered */ |
1103 | if (clocksource_tsc.mult) | |
7285dd7f TG |
1104 | clocksource_mark_unstable(&clocksource_tsc); |
1105 | else { | |
1106 | clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; | |
8fbbc4b4 | 1107 | clocksource_tsc.rating = 0; |
7285dd7f | 1108 | } |
8fbbc4b4 AK |
1109 | } |
1110 | } | |
1111 | ||
1112 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | |
1113 | ||
395628ef AK |
1114 | static void __init check_system_tsc_reliable(void) |
1115 | { | |
03da3ff1 DW |
1116 | #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC) |
1117 | if (is_geode_lx()) { | |
1118 | /* RTSC counts during suspend */ | |
8fbbc4b4 | 1119 | #define RTSC_SUSP 0x100 |
03da3ff1 | 1120 | unsigned long res_low, res_high; |
8fbbc4b4 | 1121 | |
03da3ff1 DW |
1122 | rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); |
1123 | /* Geode_LX - the OLPC CPU has a very reliable TSC */ | |
1124 | if (res_low & RTSC_SUSP) | |
1125 | tsc_clocksource_reliable = 1; | |
1126 | } | |
8fbbc4b4 | 1127 | #endif |
395628ef AK |
1128 | if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) |
1129 | tsc_clocksource_reliable = 1; | |
1130 | } | |
8fbbc4b4 AK |
1131 | |
1132 | /* | |
1133 | * Make an educated guess if the TSC is trustworthy and synchronized | |
1134 | * over all CPUs. | |
1135 | */ | |
148f9bb8 | 1136 | int unsynchronized_tsc(void) |
8fbbc4b4 | 1137 | { |
59e21e3d | 1138 | if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable) |
8fbbc4b4 AK |
1139 | return 1; |
1140 | ||
3e5095d1 | 1141 | #ifdef CONFIG_SMP |
8fbbc4b4 AK |
1142 | if (apic_is_clustered_box()) |
1143 | return 1; | |
1144 | #endif | |
1145 | ||
1146 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | |
1147 | return 0; | |
d3b8f889 JS |
1148 | |
1149 | if (tsc_clocksource_reliable) | |
1150 | return 0; | |
8fbbc4b4 AK |
1151 | /* |
1152 | * Intel systems are normally all synchronized. | |
1153 | * Exceptions must mark TSC as unstable: | |
1154 | */ | |
1155 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { | |
1156 | /* assume multi socket systems are not synchronized: */ | |
1157 | if (num_possible_cpus() > 1) | |
d3b8f889 | 1158 | return 1; |
8fbbc4b4 AK |
1159 | } |
1160 | ||
d3b8f889 | 1161 | return 0; |
8fbbc4b4 AK |
1162 | } |
1163 | ||
f9677e0f CH |
1164 | /* |
1165 | * Convert ART to TSC given numerator/denominator found in detect_art() | |
1166 | */ | |
1167 | struct system_counterval_t convert_art_to_tsc(cycle_t art) | |
1168 | { | |
1169 | u64 tmp, res, rem; | |
1170 | ||
1171 | rem = do_div(art, art_to_tsc_denominator); | |
1172 | ||
1173 | res = art * art_to_tsc_numerator; | |
1174 | tmp = rem * art_to_tsc_numerator; | |
1175 | ||
1176 | do_div(tmp, art_to_tsc_denominator); | |
1177 | res += tmp + art_to_tsc_offset; | |
1178 | ||
1179 | return (struct system_counterval_t) {.cs = art_related_clocksource, | |
1180 | .cycles = res}; | |
1181 | } | |
1182 | EXPORT_SYMBOL(convert_art_to_tsc); | |
08ec0c58 JS |
1183 | |
1184 | static void tsc_refine_calibration_work(struct work_struct *work); | |
1185 | static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work); | |
1186 | /** | |
1187 | * tsc_refine_calibration_work - Further refine tsc freq calibration | |
1188 | * @work - ignored. | |
1189 | * | |
1190 | * This functions uses delayed work over a period of a | |
1191 | * second to further refine the TSC freq value. Since this is | |
1192 | * timer based, instead of loop based, we don't block the boot | |
1193 | * process while this longer calibration is done. | |
1194 | * | |
0d2eb44f | 1195 | * If there are any calibration anomalies (too many SMIs, etc), |
08ec0c58 JS |
1196 | * or the refined calibration is off by 1% of the fast early |
1197 | * calibration, we throw out the new calibration and use the | |
1198 | * early calibration. | |
1199 | */ | |
1200 | static void tsc_refine_calibration_work(struct work_struct *work) | |
1201 | { | |
1202 | static u64 tsc_start = -1, ref_start; | |
1203 | static int hpet; | |
1204 | u64 tsc_stop, ref_stop, delta; | |
1205 | unsigned long freq; | |
1206 | ||
1207 | /* Don't bother refining TSC on unstable systems */ | |
1208 | if (check_tsc_unstable()) | |
1209 | goto out; | |
1210 | ||
1211 | /* | |
1212 | * Since the work is started early in boot, we may be | |
1213 | * delayed the first time we expire. So set the workqueue | |
1214 | * again once we know timers are working. | |
1215 | */ | |
1216 | if (tsc_start == -1) { | |
1217 | /* | |
1218 | * Only set hpet once, to avoid mixing hardware | |
1219 | * if the hpet becomes enabled later. | |
1220 | */ | |
1221 | hpet = is_hpet_enabled(); | |
1222 | schedule_delayed_work(&tsc_irqwork, HZ); | |
1223 | tsc_start = tsc_read_refs(&ref_start, hpet); | |
1224 | return; | |
1225 | } | |
1226 | ||
1227 | tsc_stop = tsc_read_refs(&ref_stop, hpet); | |
1228 | ||
1229 | /* hpet or pmtimer available ? */ | |
62627bec | 1230 | if (ref_start == ref_stop) |
08ec0c58 JS |
1231 | goto out; |
1232 | ||
1233 | /* Check, whether the sampling was disturbed by an SMI */ | |
1234 | if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX) | |
1235 | goto out; | |
1236 | ||
1237 | delta = tsc_stop - tsc_start; | |
1238 | delta *= 1000000LL; | |
1239 | if (hpet) | |
1240 | freq = calc_hpet_ref(delta, ref_start, ref_stop); | |
1241 | else | |
1242 | freq = calc_pmtimer_ref(delta, ref_start, ref_stop); | |
1243 | ||
1244 | /* Make sure we're within 1% */ | |
1245 | if (abs(tsc_khz - freq) > tsc_khz/100) | |
1246 | goto out; | |
1247 | ||
1248 | tsc_khz = freq; | |
c767a54b JP |
1249 | pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n", |
1250 | (unsigned long)tsc_khz / 1000, | |
1251 | (unsigned long)tsc_khz % 1000); | |
08ec0c58 | 1252 | |
6731b0d6 NS |
1253 | /* Inform the TSC deadline clockevent devices about the recalibration */ |
1254 | lapic_update_tsc_freq(); | |
1255 | ||
08ec0c58 | 1256 | out: |
f9677e0f CH |
1257 | if (boot_cpu_has(X86_FEATURE_ART)) |
1258 | art_related_clocksource = &clocksource_tsc; | |
08ec0c58 JS |
1259 | clocksource_register_khz(&clocksource_tsc, tsc_khz); |
1260 | } | |
1261 | ||
1262 | ||
1263 | static int __init init_tsc_clocksource(void) | |
8fbbc4b4 | 1264 | { |
59e21e3d | 1265 | if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz) |
a8760eca TG |
1266 | return 0; |
1267 | ||
395628ef AK |
1268 | if (tsc_clocksource_reliable) |
1269 | clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; | |
8fbbc4b4 AK |
1270 | /* lower the rating if we already know its unstable: */ |
1271 | if (check_tsc_unstable()) { | |
1272 | clocksource_tsc.rating = 0; | |
1273 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | |
1274 | } | |
57779dc2 | 1275 | |
82f9c080 FT |
1276 | if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) |
1277 | clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; | |
1278 | ||
57779dc2 AK |
1279 | /* |
1280 | * Trust the results of the earlier calibration on systems | |
1281 | * exporting a reliable TSC. | |
1282 | */ | |
1283 | if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { | |
1284 | clocksource_register_khz(&clocksource_tsc, tsc_khz); | |
1285 | return 0; | |
1286 | } | |
1287 | ||
08ec0c58 JS |
1288 | schedule_delayed_work(&tsc_irqwork, 0); |
1289 | return 0; | |
8fbbc4b4 | 1290 | } |
08ec0c58 JS |
1291 | /* |
1292 | * We use device_initcall here, to ensure we run after the hpet | |
1293 | * is fully initialized, which may occur at fs_initcall time. | |
1294 | */ | |
1295 | device_initcall(init_tsc_clocksource); | |
8fbbc4b4 AK |
1296 | |
1297 | void __init tsc_init(void) | |
1298 | { | |
1299 | u64 lpj; | |
1300 | int cpu; | |
1301 | ||
59e21e3d | 1302 | if (!boot_cpu_has(X86_FEATURE_TSC)) { |
b47dcbdc | 1303 | setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); |
8fbbc4b4 | 1304 | return; |
b47dcbdc | 1305 | } |
8fbbc4b4 | 1306 | |
aa297292 | 1307 | cpu_khz = x86_platform.calibrate_cpu(); |
2d826404 | 1308 | tsc_khz = x86_platform.calibrate_tsc(); |
ff4c8663 LB |
1309 | |
1310 | /* | |
1311 | * Trust non-zero tsc_khz as authorative, | |
1312 | * and use it to sanity check cpu_khz, | |
1313 | * which will be off if system timer is off. | |
1314 | */ | |
aa297292 LB |
1315 | if (tsc_khz == 0) |
1316 | tsc_khz = cpu_khz; | |
ff4c8663 LB |
1317 | else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz) |
1318 | cpu_khz = tsc_khz; | |
8fbbc4b4 | 1319 | |
e93ef949 | 1320 | if (!tsc_khz) { |
8fbbc4b4 | 1321 | mark_tsc_unstable("could not calculate TSC khz"); |
b47dcbdc | 1322 | setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); |
8fbbc4b4 AK |
1323 | return; |
1324 | } | |
1325 | ||
c767a54b JP |
1326 | pr_info("Detected %lu.%03lu MHz processor\n", |
1327 | (unsigned long)cpu_khz / 1000, | |
1328 | (unsigned long)cpu_khz % 1000); | |
8fbbc4b4 AK |
1329 | |
1330 | /* | |
1331 | * Secondary CPUs do not run through tsc_init(), so set up | |
1332 | * all the scale factors for all CPUs, assuming the same | |
1333 | * speed as the bootup CPU. (cpufreq notifiers will fix this | |
1334 | * up if their speed diverges) | |
1335 | */ | |
20d1c86a PZ |
1336 | for_each_possible_cpu(cpu) { |
1337 | cyc2ns_init(cpu); | |
aa297292 | 1338 | set_cyc2ns_scale(tsc_khz, cpu); |
20d1c86a | 1339 | } |
8fbbc4b4 AK |
1340 | |
1341 | if (tsc_disabled > 0) | |
1342 | return; | |
1343 | ||
1344 | /* now allow native_sched_clock() to use rdtsc */ | |
10b033d4 | 1345 | |
8fbbc4b4 | 1346 | tsc_disabled = 0; |
3bbfafb7 | 1347 | static_branch_enable(&__use_tsc); |
8fbbc4b4 | 1348 | |
e82b8e4e VP |
1349 | if (!no_sched_irq_time) |
1350 | enable_sched_clock_irqtime(); | |
1351 | ||
70de9a97 AK |
1352 | lpj = ((u64)tsc_khz * 1000); |
1353 | do_div(lpj, HZ); | |
1354 | lpj_fine = lpj; | |
1355 | ||
8fbbc4b4 | 1356 | use_tsc_delay(); |
8fbbc4b4 AK |
1357 | |
1358 | if (unsynchronized_tsc()) | |
1359 | mark_tsc_unstable("TSCs unsynchronized"); | |
1360 | ||
395628ef | 1361 | check_system_tsc_reliable(); |
f9677e0f CH |
1362 | |
1363 | detect_art(); | |
8fbbc4b4 AK |
1364 | } |
1365 | ||
b565201c JS |
1366 | #ifdef CONFIG_SMP |
1367 | /* | |
1368 | * If we have a constant TSC and are using the TSC for the delay loop, | |
1369 | * we can skip clock calibration if another cpu in the same socket has already | |
1370 | * been calibrated. This assumes that CONSTANT_TSC applies to all | |
1371 | * cpus in the socket - this should be a safe assumption. | |
1372 | */ | |
148f9bb8 | 1373 | unsigned long calibrate_delay_is_known(void) |
b565201c | 1374 | { |
c25323c0 | 1375 | int sibling, cpu = smp_processor_id(); |
f508a5ba | 1376 | struct cpumask *mask = topology_core_cpumask(cpu); |
b565201c JS |
1377 | |
1378 | if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC)) | |
1379 | return 0; | |
1380 | ||
f508a5ba TG |
1381 | if (!mask) |
1382 | return 0; | |
1383 | ||
1384 | sibling = cpumask_any_but(mask, cpu); | |
c25323c0 TG |
1385 | if (sibling < nr_cpu_ids) |
1386 | return cpu_data(sibling).loops_per_jiffy; | |
b565201c JS |
1387 | return 0; |
1388 | } | |
1389 | #endif |