]>
Commit | Line | Data |
---|---|---|
250c2277 | 1 | /* |
835c34a1 | 2 | * check TSC synchronization. |
250c2277 TG |
3 | * |
4 | * Copyright (C) 2006, Red Hat, Inc., Ingo Molnar | |
5 | * | |
6 | * We check whether all boot CPUs have their TSC's synchronized, | |
7 | * print a warning if not and turn off the TSC clock-source. | |
8 | * | |
9 | * The warp-check is point-to-point between two CPUs, the CPU | |
10 | * initiating the bootup is the 'source CPU', the freshly booting | |
11 | * CPU is the 'target CPU'. | |
12 | * | |
13 | * Only two CPUs may participate - they can enter in any order. | |
14 | * ( The serial nature of the boot logic and the CPU hotplug lock | |
15 | * protects against more than 2 CPUs entering this code. ) | |
16 | */ | |
17 | #include <linux/spinlock.h> | |
18 | #include <linux/kernel.h> | |
250c2277 TG |
19 | #include <linux/smp.h> |
20 | #include <linux/nmi.h> | |
21 | #include <asm/tsc.h> | |
22 | ||
23 | /* | |
24 | * Entry/exit counters that make sure that both CPUs | |
25 | * run the measurement code at once: | |
26 | */ | |
148f9bb8 PG |
27 | static atomic_t start_count; |
28 | static atomic_t stop_count; | |
250c2277 TG |
29 | |
30 | /* | |
31 | * We use a raw spinlock in this exceptional case, because | |
32 | * we want to have the fastest, inlined, non-debug version | |
33 | * of a critical section, to be able to prove TSC time-warps: | |
34 | */ | |
148f9bb8 | 35 | static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
643bec95 | 36 | |
148f9bb8 PG |
37 | static cycles_t last_tsc; |
38 | static cycles_t max_warp; | |
39 | static int nr_warps; | |
250c2277 TG |
40 | |
41 | /* | |
42 | * TSC-warp measurement loop running on both CPUs: | |
43 | */ | |
148f9bb8 | 44 | static void check_tsc_warp(unsigned int timeout) |
250c2277 TG |
45 | { |
46 | cycles_t start, now, prev, end; | |
47 | int i; | |
48 | ||
93ce99e8 | 49 | rdtsc_barrier(); |
6d63de8d | 50 | start = get_cycles(); |
93ce99e8 | 51 | rdtsc_barrier(); |
250c2277 | 52 | /* |
b0e5c779 | 53 | * The measurement runs for 'timeout' msecs: |
250c2277 | 54 | */ |
b0e5c779 | 55 | end = start + (cycles_t) tsc_khz * timeout; |
250c2277 TG |
56 | now = start; |
57 | ||
58 | for (i = 0; ; i++) { | |
59 | /* | |
60 | * We take the global lock, measure TSC, save the | |
61 | * previous TSC that was measured (possibly on | |
62 | * another CPU) and update the previous TSC timestamp. | |
63 | */ | |
0199c4e6 | 64 | arch_spin_lock(&sync_lock); |
250c2277 | 65 | prev = last_tsc; |
93ce99e8 | 66 | rdtsc_barrier(); |
6d63de8d | 67 | now = get_cycles(); |
93ce99e8 | 68 | rdtsc_barrier(); |
250c2277 | 69 | last_tsc = now; |
0199c4e6 | 70 | arch_spin_unlock(&sync_lock); |
250c2277 TG |
71 | |
72 | /* | |
73 | * Be nice every now and then (and also check whether | |
df43510b | 74 | * measurement is done [we also insert a 10 million |
250c2277 TG |
75 | * loops safety exit, so we dont lock up in case the |
76 | * TSC readout is totally broken]): | |
77 | */ | |
78 | if (unlikely(!(i & 7))) { | |
df43510b | 79 | if (now > end || i > 10000000) |
250c2277 TG |
80 | break; |
81 | cpu_relax(); | |
82 | touch_nmi_watchdog(); | |
83 | } | |
84 | /* | |
85 | * Outside the critical section we can now see whether | |
86 | * we saw a time-warp of the TSC going backwards: | |
87 | */ | |
88 | if (unlikely(prev > now)) { | |
0199c4e6 | 89 | arch_spin_lock(&sync_lock); |
250c2277 TG |
90 | max_warp = max(max_warp, prev - now); |
91 | nr_warps++; | |
0199c4e6 | 92 | arch_spin_unlock(&sync_lock); |
250c2277 | 93 | } |
ad8ca495 | 94 | } |
bde78a79 AV |
95 | WARN(!(now-start), |
96 | "Warning: zero tsc calibration delta: %Ld [max: %Ld]\n", | |
ad8ca495 | 97 | now-start, end-start); |
250c2277 TG |
98 | } |
99 | ||
b0e5c779 SS |
100 | /* |
101 | * If the target CPU coming online doesn't have any of its core-siblings | |
102 | * online, a timeout of 20msec will be used for the TSC-warp measurement | |
103 | * loop. Otherwise a smaller timeout of 2msec will be used, as we have some | |
104 | * information about this socket already (and this information grows as we | |
105 | * have more and more logical-siblings in that socket). | |
106 | * | |
107 | * Ideally we should be able to skip the TSC sync check on the other | |
108 | * core-siblings, if the first logical CPU in a socket passed the sync test. | |
109 | * But as the TSC is per-logical CPU and can potentially be modified wrongly | |
110 | * by the bios, TSC sync test for smaller duration should be able | |
111 | * to catch such errors. Also this will catch the condition where all the | |
112 | * cores in the socket doesn't get reset at the same time. | |
113 | */ | |
114 | static inline unsigned int loop_timeout(int cpu) | |
115 | { | |
7d79a7bd | 116 | return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20; |
b0e5c779 SS |
117 | } |
118 | ||
250c2277 TG |
119 | /* |
120 | * Source CPU calls into this - it waits for the freshly booted | |
121 | * target CPU to arrive and then starts the measurement: | |
122 | */ | |
148f9bb8 | 123 | void check_tsc_sync_source(int cpu) |
250c2277 TG |
124 | { |
125 | int cpus = 2; | |
126 | ||
127 | /* | |
128 | * No need to check if we already know that the TSC is not | |
129 | * synchronized: | |
130 | */ | |
131 | if (unsynchronized_tsc()) | |
132 | return; | |
133 | ||
28a00184 | 134 | if (tsc_clocksource_reliable) { |
9b3660a5 MT |
135 | if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING) |
136 | pr_info( | |
137 | "Skipped synchronization checks as TSC is reliable.\n"); | |
eca0cd02 AK |
138 | return; |
139 | } | |
140 | ||
250c2277 TG |
141 | /* |
142 | * Reset it - in case this is a second bootup: | |
143 | */ | |
144 | atomic_set(&stop_count, 0); | |
145 | ||
146 | /* | |
147 | * Wait for the target to arrive: | |
148 | */ | |
149 | while (atomic_read(&start_count) != cpus-1) | |
150 | cpu_relax(); | |
151 | /* | |
152 | * Trigger the target to continue into the measurement too: | |
153 | */ | |
154 | atomic_inc(&start_count); | |
155 | ||
b0e5c779 | 156 | check_tsc_warp(loop_timeout(cpu)); |
250c2277 TG |
157 | |
158 | while (atomic_read(&stop_count) != cpus-1) | |
159 | cpu_relax(); | |
160 | ||
250c2277 | 161 | if (nr_warps) { |
9b3660a5 MT |
162 | pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:\n", |
163 | smp_processor_id(), cpu); | |
643bec95 IM |
164 | pr_warning("Measured %Ld cycles TSC warp between CPUs, " |
165 | "turning off TSC clock.\n", max_warp); | |
250c2277 | 166 | mark_tsc_unstable("check_tsc_sync_source failed"); |
250c2277 | 167 | } else { |
9b3660a5 MT |
168 | pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n", |
169 | smp_processor_id(), cpu); | |
250c2277 TG |
170 | } |
171 | ||
4c6b8b4d MG |
172 | /* |
173 | * Reset it - just in case we boot another CPU later: | |
174 | */ | |
175 | atomic_set(&start_count, 0); | |
176 | nr_warps = 0; | |
177 | max_warp = 0; | |
178 | last_tsc = 0; | |
179 | ||
250c2277 TG |
180 | /* |
181 | * Let the target continue with the bootup: | |
182 | */ | |
183 | atomic_inc(&stop_count); | |
184 | } | |
185 | ||
186 | /* | |
187 | * Freshly booted CPUs call into this: | |
188 | */ | |
148f9bb8 | 189 | void check_tsc_sync_target(void) |
250c2277 TG |
190 | { |
191 | int cpus = 2; | |
192 | ||
28a00184 | 193 | if (unsynchronized_tsc() || tsc_clocksource_reliable) |
250c2277 TG |
194 | return; |
195 | ||
196 | /* | |
197 | * Register this CPU's participation and wait for the | |
198 | * source CPU to start the measurement: | |
199 | */ | |
200 | atomic_inc(&start_count); | |
201 | while (atomic_read(&start_count) != cpus) | |
202 | cpu_relax(); | |
203 | ||
b0e5c779 | 204 | check_tsc_warp(loop_timeout(smp_processor_id())); |
250c2277 TG |
205 | |
206 | /* | |
207 | * Ok, we are done: | |
208 | */ | |
209 | atomic_inc(&stop_count); | |
210 | ||
211 | /* | |
212 | * Wait for the source CPU to print stuff: | |
213 | */ | |
214 | while (atomic_read(&stop_count) != cpus) | |
215 | cpu_relax(); | |
216 | } |