]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
e761ecdb SG |
2 | /* |
3 | * Copyright (c) 2012 The Chromium OS Authors. | |
4 | * | |
076bb44b BM |
5 | * TSC calibration codes are adapted from Linux kernel |
6 | * arch/x86/kernel/tsc_msr.c and arch/x86/kernel/tsc.c | |
e761ecdb SG |
7 | */ |
8 | ||
9 | #include <common.h> | |
52f24238 | 10 | #include <bootstage.h> |
4e51fc23 | 11 | #include <dm.h> |
f7ae49fc | 12 | #include <log.h> |
e761ecdb | 13 | #include <malloc.h> |
1045315d | 14 | #include <time.h> |
4e51fc23 | 15 | #include <timer.h> |
0b992e49 | 16 | #include <asm/cpu.h> |
401d1c4f | 17 | #include <asm/global_data.h> |
e761ecdb SG |
18 | #include <asm/io.h> |
19 | #include <asm/i8254.h> | |
20 | #include <asm/ibmpc.h> | |
21 | #include <asm/msr.h> | |
22 | #include <asm/u-boot-x86.h> | |
c05ed00a | 23 | #include <linux/delay.h> |
e761ecdb | 24 | |
3df39ef1 | 25 | #define MAX_NUM_FREQS 9 |
076bb44b | 26 | |
ca7db866 BM |
27 | #define INTEL_FAM6_SKYLAKE_MOBILE 0x4E |
28 | #define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ | |
29 | #define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E | |
30 | #define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */ | |
31 | #define INTEL_FAM6_KABYLAKE_MOBILE 0x8E | |
32 | #define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E | |
33 | ||
e761ecdb SG |
34 | DECLARE_GLOBAL_DATA_PTR; |
35 | ||
ca7db866 BM |
36 | /* |
37 | * native_calibrate_tsc | |
38 | * Determine TSC frequency via CPUID, else return 0. | |
39 | */ | |
40 | static unsigned long native_calibrate_tsc(void) | |
41 | { | |
42 | struct cpuid_result tsc_info; | |
43 | unsigned int crystal_freq; | |
44 | ||
45 | if (gd->arch.x86_vendor != X86_VENDOR_INTEL) | |
46 | return 0; | |
47 | ||
48 | if (cpuid_eax(0) < 0x15) | |
49 | return 0; | |
50 | ||
51 | tsc_info = cpuid(0x15); | |
52 | ||
53 | if (tsc_info.ebx == 0 || tsc_info.eax == 0) | |
54 | return 0; | |
55 | ||
56 | crystal_freq = tsc_info.ecx / 1000; | |
642e8487 | 57 | if (!CONFIG_IS_ENABLED(X86_TSC_TIMER_NATIVE) && !crystal_freq) { |
ca7db866 BM |
58 | switch (gd->arch.x86_model) { |
59 | case INTEL_FAM6_SKYLAKE_MOBILE: | |
60 | case INTEL_FAM6_SKYLAKE_DESKTOP: | |
61 | case INTEL_FAM6_KABYLAKE_MOBILE: | |
62 | case INTEL_FAM6_KABYLAKE_DESKTOP: | |
63 | crystal_freq = 24000; /* 24.0 MHz */ | |
64 | break; | |
65 | case INTEL_FAM6_ATOM_GOLDMONT_X: | |
66 | crystal_freq = 25000; /* 25.0 MHz */ | |
67 | break; | |
68 | case INTEL_FAM6_ATOM_GOLDMONT: | |
69 | crystal_freq = 19200; /* 19.2 MHz */ | |
70 | break; | |
71 | default: | |
72 | return 0; | |
73 | } | |
74 | } | |
75 | ||
76 | return (crystal_freq * tsc_info.ebx / tsc_info.eax) / 1000; | |
77 | } | |
78 | ||
acc2482f CG |
79 | static unsigned long cpu_mhz_from_cpuid(void) |
80 | { | |
81 | if (gd->arch.x86_vendor != X86_VENDOR_INTEL) | |
82 | return 0; | |
83 | ||
84 | if (cpuid_eax(0) < 0x16) | |
85 | return 0; | |
86 | ||
87 | return cpuid_eax(0x16); | |
88 | } | |
89 | ||
076bb44b BM |
90 | /* |
91 | * According to Intel 64 and IA-32 System Programming Guide, | |
92 | * if MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be | |
93 | * read in MSR_PLATFORM_ID[12:8], otherwise in MSR_PERF_STAT[44:40]. | |
94 | * Unfortunately some Intel Atom SoCs aren't quite compliant to this, | |
95 | * so we need manually differentiate SoC families. This is what the | |
96 | * field msr_plat does. | |
97 | */ | |
98 | struct freq_desc { | |
99 | u8 x86_family; /* CPU family */ | |
100 | u8 x86_model; /* model */ | |
5c1b685e SG |
101 | /* 2: use 100MHz, 1: use MSR_PLATFORM_INFO, 0: MSR_IA32_PERF_STATUS */ |
102 | u8 msr_plat; | |
076bb44b BM |
103 | u32 freqs[MAX_NUM_FREQS]; |
104 | }; | |
105 | ||
106 | static struct freq_desc freq_desc_tables[] = { | |
107 | /* PNW */ | |
3df39ef1 | 108 | { 6, 0x27, 0, { 0, 0, 0, 0, 0, 99840, 0, 83200, 0 } }, |
076bb44b | 109 | /* CLV+ */ |
3df39ef1 | 110 | { 6, 0x35, 0, { 0, 133200, 0, 0, 0, 99840, 0, 83200, 0 } }, |
c6367748 | 111 | /* TNG - Intel Atom processor Z3400 series */ |
3df39ef1 | 112 | { 6, 0x4a, 1, { 0, 100000, 133300, 0, 0, 0, 0, 0, 0 } }, |
c6367748 | 113 | /* VLV2 - Intel Atom processor E3000, Z3600, Z3700 series */ |
3df39ef1 | 114 | { 6, 0x37, 1, { 83300, 100000, 133300, 116700, 80000, 0, 0, 0, 0 } }, |
c6367748 | 115 | /* ANN - Intel Atom processor Z3500 series */ |
3df39ef1 BM |
116 | { 6, 0x5a, 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0, 0 } }, |
117 | /* AMT - Intel Atom processor X7-Z8000 and X5-Z8000 series */ | |
118 | { 6, 0x4c, 1, { 83300, 100000, 133300, 116700, | |
119 | 80000, 93300, 90000, 88900, 87500 } }, | |
5c1b685e | 120 | /* Ivybridge */ |
3df39ef1 | 121 | { 6, 0x3a, 2, { 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, |
076bb44b BM |
122 | }; |
123 | ||
124 | static int match_cpu(u8 family, u8 model) | |
125 | { | |
126 | int i; | |
127 | ||
128 | for (i = 0; i < ARRAY_SIZE(freq_desc_tables); i++) { | |
129 | if ((family == freq_desc_tables[i].x86_family) && | |
130 | (model == freq_desc_tables[i].x86_model)) | |
131 | return i; | |
132 | } | |
133 | ||
134 | return -1; | |
135 | } | |
136 | ||
137 | /* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */ | |
138 | #define id_to_freq(cpu_index, freq_id) \ | |
139 | (freq_desc_tables[cpu_index].freqs[freq_id]) | |
140 | ||
141 | /* | |
167a4016 BM |
142 | * TSC on Intel Atom SoCs capable of determining TSC frequency by MSR is |
143 | * reliable and the frequency is known (provided by HW). | |
076bb44b | 144 | * |
167a4016 BM |
145 | * On these platforms PIT/HPET is generally not available so calibration won't |
146 | * work at all and there is no other clocksource to act as a watchdog for the | |
147 | * TSC, so we have no other choice than to trust it. | |
148 | * | |
149 | * Returns the TSC frequency in MHz or 0 if HW does not provide it. | |
076bb44b | 150 | */ |
167a4016 | 151 | static unsigned long __maybe_unused cpu_mhz_from_msr(void) |
076bb44b BM |
152 | { |
153 | u32 lo, hi, ratio, freq_id, freq; | |
154 | unsigned long res; | |
155 | int cpu_index; | |
156 | ||
0b992e49 BM |
157 | if (gd->arch.x86_vendor != X86_VENDOR_INTEL) |
158 | return 0; | |
159 | ||
076bb44b BM |
160 | cpu_index = match_cpu(gd->arch.x86, gd->arch.x86_model); |
161 | if (cpu_index < 0) | |
162 | return 0; | |
163 | ||
164 | if (freq_desc_tables[cpu_index].msr_plat) { | |
165 | rdmsr(MSR_PLATFORM_INFO, lo, hi); | |
d92e9c8d | 166 | ratio = (lo >> 8) & 0xff; |
076bb44b BM |
167 | } else { |
168 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | |
169 | ratio = (hi >> 8) & 0x1f; | |
170 | } | |
171 | debug("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio); | |
172 | ||
5c1b685e SG |
173 | if (freq_desc_tables[cpu_index].msr_plat == 2) { |
174 | /* TODO: Figure out how best to deal with this */ | |
f5757154 | 175 | freq = 100000; |
5c1b685e SG |
176 | debug("Using frequency: %u KHz\n", freq); |
177 | } else { | |
178 | /* Get FSB FREQ ID */ | |
179 | rdmsr(MSR_FSB_FREQ, lo, hi); | |
180 | freq_id = lo & 0x7; | |
181 | freq = id_to_freq(cpu_index, freq_id); | |
182 | debug("Resolved frequency ID: %u, frequency: %u KHz\n", | |
183 | freq_id, freq); | |
184 | } | |
076bb44b BM |
185 | |
186 | /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */ | |
187 | res = freq * ratio / 1000; | |
188 | debug("TSC runs at %lu MHz\n", res); | |
189 | ||
190 | return res; | |
076bb44b BM |
191 | } |
192 | ||
80de0495 BM |
193 | /* |
194 | * This reads the current MSB of the PIT counter, and | |
195 | * checks if we are running on sufficiently fast and | |
196 | * non-virtualized hardware. | |
197 | * | |
198 | * Our expectations are: | |
199 | * | |
200 | * - the PIT is running at roughly 1.19MHz | |
201 | * | |
202 | * - each IO is going to take about 1us on real hardware, | |
203 | * but we allow it to be much faster (by a factor of 10) or | |
204 | * _slightly_ slower (ie we allow up to a 2us read+counter | |
205 | * update - anything else implies a unacceptably slow CPU | |
206 | * or PIT for the fast calibration to work. | |
207 | * | |
208 | * - with 256 PIT ticks to read the value, we have 214us to | |
209 | * see the same MSB (and overhead like doing a single TSC | |
210 | * read per MSB value etc). | |
211 | * | |
212 | * - We're doing 2 reads per loop (LSB, MSB), and we expect | |
213 | * them each to take about a microsecond on real hardware. | |
214 | * So we expect a count value of around 100. But we'll be | |
215 | * generous, and accept anything over 50. | |
216 | * | |
217 | * - if the PIT is stuck, and we see *many* more reads, we | |
218 | * return early (and the next caller of pit_expect_msb() | |
219 | * then consider it a failure when they don't see the | |
220 | * next expected value). | |
221 | * | |
222 | * These expectations mean that we know that we have seen the | |
223 | * transition from one expected value to another with a fairly | |
224 | * high accuracy, and we didn't miss any events. We can thus | |
225 | * use the TSC value at the transitions to calculate a pretty | |
226 | * good value for the TSC frequencty. | |
227 | */ | |
228 | static inline int pit_verify_msb(unsigned char val) | |
229 | { | |
230 | /* Ignore LSB */ | |
231 | inb(0x42); | |
232 | return inb(0x42) == val; | |
233 | } | |
234 | ||
235 | static inline int pit_expect_msb(unsigned char val, u64 *tscp, | |
236 | unsigned long *deltap) | |
237 | { | |
238 | int count; | |
239 | u64 tsc = 0, prev_tsc = 0; | |
240 | ||
241 | for (count = 0; count < 50000; count++) { | |
242 | if (!pit_verify_msb(val)) | |
243 | break; | |
244 | prev_tsc = tsc; | |
245 | tsc = rdtsc(); | |
246 | } | |
247 | *deltap = rdtsc() - prev_tsc; | |
248 | *tscp = tsc; | |
249 | ||
250 | /* | |
251 | * We require _some_ success, but the quality control | |
252 | * will be based on the error terms on the TSC values. | |
253 | */ | |
254 | return count > 5; | |
255 | } | |
256 | ||
257 | /* | |
258 | * How many MSB values do we want to see? We aim for | |
259 | * a maximum error rate of 500ppm (in practice the | |
260 | * real error is much smaller), but refuse to spend | |
261 | * more than 50ms on it. | |
262 | */ | |
263 | #define MAX_QUICK_PIT_MS 50 | |
264 | #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) | |
265 | ||
3ba6a0f4 | 266 | static unsigned long __maybe_unused quick_pit_calibrate(void) |
80de0495 BM |
267 | { |
268 | int i; | |
269 | u64 tsc, delta; | |
270 | unsigned long d1, d2; | |
271 | ||
272 | /* Set the Gate high, disable speaker */ | |
273 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); | |
274 | ||
275 | /* | |
276 | * Counter 2, mode 0 (one-shot), binary count | |
277 | * | |
278 | * NOTE! Mode 2 decrements by two (and then the | |
279 | * output is flipped each time, giving the same | |
280 | * final output frequency as a decrement-by-one), | |
281 | * so mode 0 is much better when looking at the | |
282 | * individual counts. | |
283 | */ | |
284 | outb(0xb0, 0x43); | |
285 | ||
286 | /* Start at 0xffff */ | |
287 | outb(0xff, 0x42); | |
288 | outb(0xff, 0x42); | |
289 | ||
290 | /* | |
291 | * The PIT starts counting at the next edge, so we | |
292 | * need to delay for a microsecond. The easiest way | |
293 | * to do that is to just read back the 16-bit counter | |
294 | * once from the PIT. | |
295 | */ | |
296 | pit_verify_msb(0); | |
297 | ||
298 | if (pit_expect_msb(0xff, &tsc, &d1)) { | |
299 | for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { | |
300 | if (!pit_expect_msb(0xff-i, &delta, &d2)) | |
301 | break; | |
302 | ||
303 | /* | |
304 | * Iterate until the error is less than 500 ppm | |
305 | */ | |
306 | delta -= tsc; | |
307 | if (d1+d2 >= delta >> 11) | |
308 | continue; | |
309 | ||
310 | /* | |
311 | * Check the PIT one more time to verify that | |
312 | * all TSC reads were stable wrt the PIT. | |
313 | * | |
314 | * This also guarantees serialization of the | |
315 | * last cycle read ('d2') in pit_expect_msb. | |
316 | */ | |
317 | if (!pit_verify_msb(0xfe - i)) | |
318 | break; | |
319 | goto success; | |
320 | } | |
321 | } | |
322 | debug("Fast TSC calibration failed\n"); | |
323 | return 0; | |
324 | ||
325 | success: | |
326 | /* | |
327 | * Ok, if we get here, then we've seen the | |
328 | * MSB of the PIT decrement 'i' times, and the | |
329 | * error has shrunk to less than 500 ppm. | |
330 | * | |
331 | * As a result, we can depend on there not being | |
332 | * any odd delays anywhere, and the TSC reads are | |
333 | * reliable (within the error). | |
334 | * | |
335 | * kHz = ticks / time-in-seconds / 1000; | |
336 | * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 | |
337 | * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) | |
338 | */ | |
339 | delta *= PIT_TICK_RATE; | |
340 | delta /= (i*256*1000); | |
341 | debug("Fast TSC calibration using PIT\n"); | |
342 | return delta / 1000; | |
343 | } | |
344 | ||
e761ecdb | 345 | /* Get the speed of the TSC timer in MHz */ |
2f80fc50 | 346 | unsigned notrace long get_tbclk_mhz(void) |
e761ecdb | 347 | { |
4e51fc23 | 348 | return get_tbclk() / 1000000; |
e761ecdb SG |
349 | } |
350 | ||
e761ecdb SG |
351 | static ulong get_ms_timer(void) |
352 | { | |
353 | return (get_ticks() * 1000) / get_tbclk(); | |
354 | } | |
355 | ||
356 | ulong get_timer(ulong base) | |
357 | { | |
358 | return get_ms_timer() - base; | |
359 | } | |
360 | ||
2f80fc50 | 361 | ulong notrace timer_get_us(void) |
e761ecdb SG |
362 | { |
363 | return get_ticks() / get_tbclk_mhz(); | |
364 | } | |
365 | ||
366 | ulong timer_get_boot_us(void) | |
367 | { | |
368 | return timer_get_us(); | |
369 | } | |
370 | ||
371 | void __udelay(unsigned long usec) | |
372 | { | |
373 | u64 now = get_ticks(); | |
374 | u64 stop; | |
375 | ||
9edf20f1 | 376 | stop = now + (u64)usec * get_tbclk_mhz(); |
e761ecdb SG |
377 | |
378 | while ((int64_t)(stop - get_ticks()) > 0) | |
417576c2 MY |
379 | #if defined(CONFIG_QEMU) && defined(CONFIG_SMP) |
380 | /* | |
381 | * Add a 'pause' instruction on qemu target, | |
382 | * to give other VCPUs a chance to run. | |
383 | */ | |
384 | asm volatile("pause"); | |
385 | #else | |
e761ecdb | 386 | ; |
417576c2 | 387 | #endif |
e761ecdb SG |
388 | } |
389 | ||
8af7bb91 | 390 | static u64 tsc_timer_get_count(struct udevice *dev) |
4e51fc23 BM |
391 | { |
392 | u64 now_tick = rdtsc(); | |
393 | ||
8af7bb91 | 394 | return now_tick - gd->arch.tsc_base; |
4e51fc23 BM |
395 | } |
396 | ||
6ce38364 | 397 | static void tsc_timer_ensure_setup(bool early) |
4e51fc23 | 398 | { |
a478a26c | 399 | if (gd->arch.tsc_inited) |
2ff50f5f | 400 | return; |
77dd7c68 SG |
401 | if (IS_ENABLED(CONFIG_X86_TSC_READ_BASE)) |
402 | gd->arch.tsc_base = rdtsc(); | |
4e51fc23 | 403 | |
2ff50f5f | 404 | if (!gd->arch.clock_rate) { |
4e51fc23 BM |
405 | unsigned long fast_calibrate; |
406 | ||
ca7db866 BM |
407 | fast_calibrate = native_calibrate_tsc(); |
408 | if (fast_calibrate) | |
409 | goto done; | |
410 | ||
642e8487 SG |
411 | /* Reduce code size by dropping other methods */ |
412 | if (CONFIG_IS_ENABLED(X86_TSC_TIMER_NATIVE)) | |
413 | panic("no timer"); | |
414 | ||
acc2482f CG |
415 | fast_calibrate = cpu_mhz_from_cpuid(); |
416 | if (fast_calibrate) | |
417 | goto done; | |
418 | ||
167a4016 | 419 | fast_calibrate = cpu_mhz_from_msr(); |
acc2482f CG |
420 | if (fast_calibrate) |
421 | goto done; | |
422 | ||
423 | fast_calibrate = quick_pit_calibrate(); | |
424 | if (fast_calibrate) | |
425 | goto done; | |
426 | ||
6ce38364 BM |
427 | if (early) |
428 | fast_calibrate = CONFIG_X86_TSC_TIMER_EARLY_FREQ; | |
165db7c4 BM |
429 | else |
430 | return; | |
4e51fc23 | 431 | |
acc2482f | 432 | done: |
2ff50f5f | 433 | gd->arch.clock_rate = fast_calibrate * 1000000; |
4e51fc23 | 434 | } |
a478a26c | 435 | gd->arch.tsc_inited = true; |
2ff50f5f SG |
436 | } |
437 | ||
438 | static int tsc_timer_probe(struct udevice *dev) | |
439 | { | |
440 | struct timer_dev_priv *uc_priv = dev_get_uclass_priv(dev); | |
441 | ||
165db7c4 BM |
442 | /* Try hardware calibration first */ |
443 | tsc_timer_ensure_setup(false); | |
444 | if (!gd->arch.clock_rate) { | |
445 | /* | |
446 | * Use the clock frequency specified in the | |
447 | * device tree as last resort | |
448 | */ | |
449 | if (!uc_priv->clock_rate) | |
450 | panic("TSC frequency is ZERO"); | |
94e72a6b | 451 | } else { |
165db7c4 | 452 | uc_priv->clock_rate = gd->arch.clock_rate; |
94e72a6b | 453 | } |
4e51fc23 BM |
454 | |
455 | return 0; | |
456 | } | |
457 | ||
2ff50f5f SG |
458 | unsigned long notrace timer_early_get_rate(void) |
459 | { | |
94e72a6b BM |
460 | /* |
461 | * When TSC timer is used as the early timer, be warned that the timer | |
462 | * clock rate can only be calibrated via some hardware ways. Specifying | |
463 | * it in the device tree won't work for the early timer. | |
464 | */ | |
165db7c4 | 465 | tsc_timer_ensure_setup(true); |
2ff50f5f SG |
466 | |
467 | return gd->arch.clock_rate; | |
468 | } | |
469 | ||
470 | u64 notrace timer_early_get_count(void) | |
471 | { | |
096c71e3 SG |
472 | tsc_timer_ensure_setup(true); |
473 | ||
2ff50f5f SG |
474 | return rdtsc() - gd->arch.tsc_base; |
475 | } | |
476 | ||
4e51fc23 BM |
477 | static const struct timer_ops tsc_timer_ops = { |
478 | .get_count = tsc_timer_get_count, | |
479 | }; | |
480 | ||
8b842be1 | 481 | #if !CONFIG_IS_ENABLED(OF_PLATDATA) |
4e51fc23 BM |
482 | static const struct udevice_id tsc_timer_ids[] = { |
483 | { .compatible = "x86,tsc-timer", }, | |
484 | { } | |
485 | }; | |
8b842be1 | 486 | #endif |
4e51fc23 | 487 | |
9d20db04 SG |
488 | U_BOOT_DRIVER(x86_tsc_timer) = { |
489 | .name = "x86_tsc_timer", | |
4e51fc23 | 490 | .id = UCLASS_TIMER, |
8b842be1 | 491 | .of_match = of_match_ptr(tsc_timer_ids), |
4e51fc23 BM |
492 | .probe = tsc_timer_probe, |
493 | .ops = &tsc_timer_ops, | |
4e51fc23 | 494 | }; |