]>
Commit | Line | Data |
---|---|---|
4cedb334 GOC |
1 | /* |
2 | * x86 SMP booting functions | |
3 | * | |
87c6fe26 | 4 | * (c) 1995 Alan Cox, Building #3 <[email protected]> |
8f47e163 | 5 | * (c) 1998, 1999, 2000, 2009 Ingo Molnar <[email protected]> |
4cedb334 GOC |
6 | * Copyright 2001 Andi Kleen, SuSE Labs. |
7 | * | |
8 | * Much of the core SMP work is based on previous work by Thomas Radke, to | |
9 | * whom a great many thanks are extended. | |
10 | * | |
11 | * Thanks to Intel for making available several different Pentium, | |
12 | * Pentium Pro and Pentium-II/Xeon MP machines. | |
13 | * Original development of Linux SMP code supported by Caldera. | |
14 | * | |
15 | * This code is released under the GNU General Public License version 2 or | |
16 | * later. | |
17 | * | |
18 | * Fixes | |
19 | * Felix Koop : NR_CPUS used properly | |
20 | * Jose Renau : Handle single CPU case. | |
21 | * Alan Cox : By repeated request 8) - Total BogoMIPS report. | |
22 | * Greg Wright : Fix for kernel stacks panic. | |
23 | * Erich Boleyn : MP v1.4 and additional changes. | |
24 | * Matthias Sattler : Changes for 2.1 kernel map. | |
25 | * Michel Lespinasse : Changes for 2.1 kernel map. | |
26 | * Michael Chastain : Change trampoline.S to gnu as. | |
27 | * Alan Cox : Dumb bug: 'B' step PPro's are fine | |
28 | * Ingo Molnar : Added APIC timers, based on code | |
29 | * from Jose Renau | |
30 | * Ingo Molnar : various cleanups and rewrites | |
31 | * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. | |
32 | * Maciej W. Rozycki : Bits for genuine 82489DX APICs | |
33 | * Andi Kleen : Changed for SMP boot into long mode. | |
34 | * Martin J. Bligh : Added support for multi-quad systems | |
35 | * Dave Jones : Report invalid combinations of Athlon CPUs. | |
36 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. | |
37 | * Andi Kleen : Converted to new state machine. | |
38 | * Ashok Raj : CPU hotplug support | |
39 | * Glauber Costa : i386 and x86_64 integration | |
40 | */ | |
41 | ||
68a1c3f8 GC |
42 | #include <linux/init.h> |
43 | #include <linux/smp.h> | |
a355352b | 44 | #include <linux/module.h> |
70708a18 | 45 | #include <linux/sched.h> |
69c18c15 | 46 | #include <linux/percpu.h> |
91718e8d | 47 | #include <linux/bootmem.h> |
cb3c8b90 GOC |
48 | #include <linux/err.h> |
49 | #include <linux/nmi.h> | |
69575d38 | 50 | #include <linux/tboot.h> |
35f720c5 | 51 | #include <linux/stackprotector.h> |
5a0e3ad6 | 52 | #include <linux/gfp.h> |
69c18c15 | 53 | |
8aef135c | 54 | #include <asm/acpi.h> |
cb3c8b90 | 55 | #include <asm/desc.h> |
69c18c15 GC |
56 | #include <asm/nmi.h> |
57 | #include <asm/irq.h> | |
07bbc16a | 58 | #include <asm/idle.h> |
e44b7b75 | 59 | #include <asm/trampoline.h> |
69c18c15 GC |
60 | #include <asm/cpu.h> |
61 | #include <asm/numa.h> | |
cb3c8b90 GOC |
62 | #include <asm/pgtable.h> |
63 | #include <asm/tlbflush.h> | |
64 | #include <asm/mtrr.h> | |
bbc2ff6a | 65 | #include <asm/vmi.h> |
7b6aa335 | 66 | #include <asm/apic.h> |
569712b2 | 67 | #include <asm/setup.h> |
bdbcdd48 | 68 | #include <asm/uv/uv.h> |
cb3c8b90 | 69 | #include <linux/mc146818rtc.h> |
68a1c3f8 | 70 | |
1164dd00 | 71 | #include <asm/smpboot_hooks.h> |
b81bb373 | 72 | #include <asm/i8259.h> |
cb3c8b90 | 73 | |
16ecf7a4 | 74 | #ifdef CONFIG_X86_32 |
4cedb334 | 75 | u8 apicid_2_node[MAX_APICID]; |
61165d7a | 76 | static int low_mappings; |
acbb6734 GOC |
77 | #endif |
78 | ||
a8db8453 GOC |
79 | /* State of each CPU */ |
80 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | |
81 | ||
cb3c8b90 GOC |
82 | /* Store all idle threads, this can be reused instead of creating |
83 | * a new thread. Also avoids complicated thread destroy functionality | |
84 | * for idle threads. | |
85 | */ | |
86 | #ifdef CONFIG_HOTPLUG_CPU | |
87 | /* | |
88 | * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is | |
89 | * removed after init for !CONFIG_HOTPLUG_CPU. | |
90 | */ | |
91 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | |
92 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | |
93 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) | |
94 | #else | |
f86c9985 | 95 | static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; |
cb3c8b90 GOC |
96 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) |
97 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) | |
98 | #endif | |
f6bc4029 | 99 | |
a355352b GC |
100 | /* Number of siblings per CPU package */ |
101 | int smp_num_siblings = 1; | |
102 | EXPORT_SYMBOL(smp_num_siblings); | |
103 | ||
104 | /* Last level cache ID of each logical CPU */ | |
105 | DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; | |
106 | ||
a355352b | 107 | /* representing HT siblings of each logical CPU */ |
7ad728f9 | 108 | DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); |
a355352b GC |
109 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
110 | ||
111 | /* representing HT and core siblings of each logical CPU */ | |
7ad728f9 | 112 | DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); |
a355352b GC |
113 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
114 | ||
115 | /* Per CPU bogomips and other parameters */ | |
116 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); | |
117 | EXPORT_PER_CPU_SYMBOL(cpu_info); | |
768d9505 | 118 | |
2b6163bf | 119 | atomic_t init_deasserted; |
cb3c8b90 | 120 | |
7cc3959e | 121 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) |
7cc3959e GOC |
122 | /* which node each logical CPU is on */ |
123 | int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; | |
124 | EXPORT_SYMBOL(cpu_to_node_map); | |
125 | ||
126 | /* set up a mapping between cpu and node. */ | |
127 | static void map_cpu_to_node(int cpu, int node) | |
128 | { | |
129 | printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); | |
c032ef60 | 130 | cpumask_set_cpu(cpu, node_to_cpumask_map[node]); |
7cc3959e GOC |
131 | cpu_to_node_map[cpu] = node; |
132 | } | |
133 | ||
134 | /* undo a mapping between cpu and node. */ | |
135 | static void unmap_cpu_to_node(int cpu) | |
136 | { | |
137 | int node; | |
138 | ||
139 | printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); | |
140 | for (node = 0; node < MAX_NUMNODES; node++) | |
c032ef60 | 141 | cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); |
7cc3959e GOC |
142 | cpu_to_node_map[cpu] = 0; |
143 | } | |
144 | #else /* !(CONFIG_NUMA && CONFIG_X86_32) */ | |
145 | #define map_cpu_to_node(cpu, node) ({}) | |
146 | #define unmap_cpu_to_node(cpu) ({}) | |
147 | #endif | |
148 | ||
149 | #ifdef CONFIG_X86_32 | |
1b374e4d SS |
150 | static int boot_cpu_logical_apicid; |
151 | ||
7cc3959e GOC |
152 | u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = |
153 | { [0 ... NR_CPUS-1] = BAD_APICID }; | |
154 | ||
a4928cff | 155 | static void map_cpu_to_logical_apicid(void) |
7cc3959e GOC |
156 | { |
157 | int cpu = smp_processor_id(); | |
158 | int apicid = logical_smp_processor_id(); | |
3f57a318 | 159 | int node = apic->apicid_to_node(apicid); |
7cc3959e GOC |
160 | |
161 | if (!node_online(node)) | |
162 | node = first_online_node; | |
163 | ||
164 | cpu_2_logical_apicid[cpu] = apicid; | |
165 | map_cpu_to_node(cpu, node); | |
166 | } | |
167 | ||
1481a3dd | 168 | void numa_remove_cpu(int cpu) |
7cc3959e GOC |
169 | { |
170 | cpu_2_logical_apicid[cpu] = BAD_APICID; | |
171 | unmap_cpu_to_node(cpu); | |
172 | } | |
173 | #else | |
7cc3959e GOC |
174 | #define map_cpu_to_logical_apicid() do {} while (0) |
175 | #endif | |
176 | ||
cb3c8b90 GOC |
177 | /* |
178 | * Report back to the Boot Processor. | |
179 | * Running on AP. | |
180 | */ | |
a4928cff | 181 | static void __cpuinit smp_callin(void) |
cb3c8b90 GOC |
182 | { |
183 | int cpuid, phys_id; | |
184 | unsigned long timeout; | |
185 | ||
186 | /* | |
187 | * If waken up by an INIT in an 82489DX configuration | |
188 | * we may get here before an INIT-deassert IPI reaches | |
189 | * our local APIC. We have to wait for the IPI or we'll | |
190 | * lock up on an APIC access. | |
191 | */ | |
a9659366 IM |
192 | if (apic->wait_for_init_deassert) |
193 | apic->wait_for_init_deassert(&init_deasserted); | |
cb3c8b90 GOC |
194 | |
195 | /* | |
196 | * (This works even if the APIC is not enabled.) | |
197 | */ | |
4c9961d5 | 198 | phys_id = read_apic_id(); |
cb3c8b90 | 199 | cpuid = smp_processor_id(); |
c2d1cec1 | 200 | if (cpumask_test_cpu(cpuid, cpu_callin_mask)) { |
cb3c8b90 GOC |
201 | panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, |
202 | phys_id, cpuid); | |
203 | } | |
cfc1b9a6 | 204 | pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); |
cb3c8b90 GOC |
205 | |
206 | /* | |
207 | * STARTUP IPIs are fragile beasts as they might sometimes | |
208 | * trigger some glue motherboard logic. Complete APIC bus | |
209 | * silence for 1 second, this overestimates the time the | |
210 | * boot CPU is spending to send the up to 2 STARTUP IPIs | |
211 | * by a factor of two. This should be enough. | |
212 | */ | |
213 | ||
214 | /* | |
215 | * Waiting 2s total for startup (udelay is not yet working) | |
216 | */ | |
217 | timeout = jiffies + 2*HZ; | |
218 | while (time_before(jiffies, timeout)) { | |
219 | /* | |
220 | * Has the boot CPU finished it's STARTUP sequence? | |
221 | */ | |
c2d1cec1 | 222 | if (cpumask_test_cpu(cpuid, cpu_callout_mask)) |
cb3c8b90 GOC |
223 | break; |
224 | cpu_relax(); | |
225 | } | |
226 | ||
227 | if (!time_before(jiffies, timeout)) { | |
228 | panic("%s: CPU%d started up but did not get a callout!\n", | |
229 | __func__, cpuid); | |
230 | } | |
231 | ||
232 | /* | |
233 | * the boot CPU has finished the init stage and is spinning | |
234 | * on callin_map until we finish. We are free to set up this | |
235 | * CPU, first the APIC. (this is probably redundant on most | |
236 | * boards) | |
237 | */ | |
238 | ||
cfc1b9a6 | 239 | pr_debug("CALLIN, before setup_local_APIC().\n"); |
333344d9 IM |
240 | if (apic->smp_callin_clear_local_apic) |
241 | apic->smp_callin_clear_local_apic(); | |
cb3c8b90 GOC |
242 | setup_local_APIC(); |
243 | end_local_APIC_setup(); | |
244 | map_cpu_to_logical_apicid(); | |
245 | ||
9d133e5d SS |
246 | /* |
247 | * Need to setup vector mappings before we enable interrupts. | |
248 | */ | |
36e9e1ea | 249 | setup_vector_irq(smp_processor_id()); |
cb3c8b90 GOC |
250 | /* |
251 | * Get our bogomips. | |
252 | * | |
253 | * Need to enable IRQs because it can take longer and then | |
254 | * the NMI watchdog might kill us. | |
255 | */ | |
256 | local_irq_enable(); | |
257 | calibrate_delay(); | |
258 | local_irq_disable(); | |
cfc1b9a6 | 259 | pr_debug("Stack at about %p\n", &cpuid); |
cb3c8b90 GOC |
260 | |
261 | /* | |
262 | * Save our processor parameters | |
263 | */ | |
264 | smp_store_cpu_info(cpuid); | |
265 | ||
85257024 PZ |
266 | notify_cpu_starting(cpuid); |
267 | ||
cb3c8b90 GOC |
268 | /* |
269 | * Allow the master to continue. | |
270 | */ | |
c2d1cec1 | 271 | cpumask_set_cpu(cpuid, cpu_callin_mask); |
cb3c8b90 GOC |
272 | } |
273 | ||
bbc2ff6a GOC |
274 | /* |
275 | * Activate a secondary processor. | |
276 | */ | |
0ca59dd9 | 277 | notrace static void __cpuinit start_secondary(void *unused) |
bbc2ff6a GOC |
278 | { |
279 | /* | |
280 | * Don't put *anything* before cpu_init(), SMP booting is too | |
281 | * fragile that we want to limit the things done here to the | |
282 | * most necessary things. | |
283 | */ | |
bbc2ff6a | 284 | vmi_bringup(); |
bbc2ff6a GOC |
285 | cpu_init(); |
286 | preempt_disable(); | |
287 | smp_callin(); | |
288 | ||
289 | /* otherwise gcc will move up smp_processor_id before the cpu_init */ | |
290 | barrier(); | |
291 | /* | |
292 | * Check TSC synchronization with the BP: | |
293 | */ | |
294 | check_tsc_sync_target(); | |
295 | ||
296 | if (nmi_watchdog == NMI_IO_APIC) { | |
b81bb373 | 297 | legacy_pic->chip->mask(0); |
bbc2ff6a | 298 | enable_NMI_through_LVT0(); |
b81bb373 | 299 | legacy_pic->chip->unmask(0); |
bbc2ff6a GOC |
300 | } |
301 | ||
61165d7a HD |
302 | #ifdef CONFIG_X86_32 |
303 | while (low_mappings) | |
304 | cpu_relax(); | |
305 | __flush_tlb_all(); | |
306 | #endif | |
307 | ||
4f062896 | 308 | /* This must be done before setting cpu_online_mask */ |
bbc2ff6a GOC |
309 | set_cpu_sibling_map(raw_smp_processor_id()); |
310 | wmb(); | |
311 | ||
312 | /* | |
313 | * We need to hold call_lock, so there is no inconsistency | |
314 | * between the time smp_call_function() determines number of | |
315 | * IPI recipients, and the time when the determination is made | |
316 | * for which cpus receive the IPI. Holding this | |
317 | * lock helps us to not include this cpu in a currently in progress | |
318 | * smp_call_function(). | |
d388e5fd EB |
319 | * |
320 | * We need to hold vector_lock so there the set of online cpus | |
321 | * does not change while we are assigning vectors to cpus. Holding | |
322 | * this lock ensures we don't half assign or remove an irq from a cpu. | |
bbc2ff6a | 323 | */ |
0cefa5b9 | 324 | ipi_call_lock(); |
d388e5fd | 325 | lock_vector_lock(); |
c2d1cec1 | 326 | set_cpu_online(smp_processor_id(), true); |
d388e5fd | 327 | unlock_vector_lock(); |
0cefa5b9 | 328 | ipi_call_unlock(); |
bbc2ff6a | 329 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
78c06176 | 330 | x86_platform.nmi_init(); |
bbc2ff6a | 331 | |
0cefa5b9 MS |
332 | /* enable local interrupts */ |
333 | local_irq_enable(); | |
334 | ||
35f720c5 JP |
335 | /* to prevent fake stack check failure in clock setup */ |
336 | boot_init_stack_canary(); | |
0cefa5b9 | 337 | |
736decac | 338 | x86_cpuinit.setup_percpu_clockev(); |
bbc2ff6a GOC |
339 | |
340 | wmb(); | |
341 | cpu_idle(); | |
342 | } | |
343 | ||
155dd720 RR |
344 | #ifdef CONFIG_CPUMASK_OFFSTACK |
345 | /* In this case, llc_shared_map is a pointer to a cpumask. */ | |
346 | static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst, | |
347 | const struct cpuinfo_x86 *src) | |
348 | { | |
349 | struct cpumask *llc = dst->llc_shared_map; | |
350 | *dst = *src; | |
351 | dst->llc_shared_map = llc; | |
352 | } | |
353 | #else | |
354 | static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst, | |
355 | const struct cpuinfo_x86 *src) | |
356 | { | |
357 | *dst = *src; | |
358 | } | |
359 | #endif /* CONFIG_CPUMASK_OFFSTACK */ | |
360 | ||
1d89a7f0 GOC |
361 | /* |
362 | * The bootstrap kernel entry code has set these up. Save them for | |
363 | * a given CPU | |
364 | */ | |
365 | ||
366 | void __cpuinit smp_store_cpu_info(int id) | |
367 | { | |
368 | struct cpuinfo_x86 *c = &cpu_data(id); | |
369 | ||
155dd720 | 370 | copy_cpuinfo_x86(c, &boot_cpu_data); |
1d89a7f0 GOC |
371 | c->cpu_index = id; |
372 | if (id != 0) | |
373 | identify_secondary_cpu(c); | |
1d89a7f0 GOC |
374 | } |
375 | ||
376 | ||
768d9505 GC |
377 | void __cpuinit set_cpu_sibling_map(int cpu) |
378 | { | |
379 | int i; | |
380 | struct cpuinfo_x86 *c = &cpu_data(cpu); | |
381 | ||
c2d1cec1 | 382 | cpumask_set_cpu(cpu, cpu_sibling_setup_mask); |
768d9505 GC |
383 | |
384 | if (smp_num_siblings > 1) { | |
c2d1cec1 MT |
385 | for_each_cpu(i, cpu_sibling_setup_mask) { |
386 | struct cpuinfo_x86 *o = &cpu_data(i); | |
387 | ||
388 | if (c->phys_proc_id == o->phys_proc_id && | |
389 | c->cpu_core_id == o->cpu_core_id) { | |
390 | cpumask_set_cpu(i, cpu_sibling_mask(cpu)); | |
391 | cpumask_set_cpu(cpu, cpu_sibling_mask(i)); | |
392 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | |
393 | cpumask_set_cpu(cpu, cpu_core_mask(i)); | |
155dd720 RR |
394 | cpumask_set_cpu(i, c->llc_shared_map); |
395 | cpumask_set_cpu(cpu, o->llc_shared_map); | |
768d9505 GC |
396 | } |
397 | } | |
398 | } else { | |
c2d1cec1 | 399 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); |
768d9505 GC |
400 | } |
401 | ||
155dd720 | 402 | cpumask_set_cpu(cpu, c->llc_shared_map); |
768d9505 GC |
403 | |
404 | if (current_cpu_data.x86_max_cores == 1) { | |
c2d1cec1 | 405 | cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); |
768d9505 GC |
406 | c->booted_cores = 1; |
407 | return; | |
408 | } | |
409 | ||
c2d1cec1 | 410 | for_each_cpu(i, cpu_sibling_setup_mask) { |
768d9505 GC |
411 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && |
412 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | |
155dd720 RR |
413 | cpumask_set_cpu(i, c->llc_shared_map); |
414 | cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map); | |
768d9505 GC |
415 | } |
416 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { | |
c2d1cec1 MT |
417 | cpumask_set_cpu(i, cpu_core_mask(cpu)); |
418 | cpumask_set_cpu(cpu, cpu_core_mask(i)); | |
768d9505 GC |
419 | /* |
420 | * Does this new cpu bringup a new core? | |
421 | */ | |
c2d1cec1 | 422 | if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) { |
768d9505 GC |
423 | /* |
424 | * for each core in package, increment | |
425 | * the booted_cores for this new cpu | |
426 | */ | |
c2d1cec1 | 427 | if (cpumask_first(cpu_sibling_mask(i)) == i) |
768d9505 GC |
428 | c->booted_cores++; |
429 | /* | |
430 | * increment the core count for all | |
431 | * the other cpus in this package | |
432 | */ | |
433 | if (i != cpu) | |
434 | cpu_data(i).booted_cores++; | |
435 | } else if (i != cpu && !c->booted_cores) | |
436 | c->booted_cores = cpu_data(i).booted_cores; | |
437 | } | |
438 | } | |
439 | } | |
440 | ||
70708a18 | 441 | /* maps the cpu to the sched domain representing multi-core */ |
030bb203 | 442 | const struct cpumask *cpu_coregroup_mask(int cpu) |
70708a18 GC |
443 | { |
444 | struct cpuinfo_x86 *c = &cpu_data(cpu); | |
445 | /* | |
446 | * For perf, we return last level cache shared map. | |
447 | * And for power savings, we return cpu_core_map | |
448 | */ | |
5a925b42 AH |
449 | if ((sched_mc_power_savings || sched_smt_power_savings) && |
450 | !(cpu_has(c, X86_FEATURE_AMD_DCM))) | |
c2d1cec1 | 451 | return cpu_core_mask(cpu); |
70708a18 | 452 | else |
155dd720 | 453 | return c->llc_shared_map; |
030bb203 RR |
454 | } |
455 | ||
a4928cff | 456 | static void impress_friends(void) |
904541e2 GOC |
457 | { |
458 | int cpu; | |
459 | unsigned long bogosum = 0; | |
460 | /* | |
461 | * Allow the user to impress friends. | |
462 | */ | |
cfc1b9a6 | 463 | pr_debug("Before bogomips.\n"); |
904541e2 | 464 | for_each_possible_cpu(cpu) |
c2d1cec1 | 465 | if (cpumask_test_cpu(cpu, cpu_callout_mask)) |
904541e2 GOC |
466 | bogosum += cpu_data(cpu).loops_per_jiffy; |
467 | printk(KERN_INFO | |
468 | "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | |
f68e00a3 | 469 | num_online_cpus(), |
904541e2 GOC |
470 | bogosum/(500000/HZ), |
471 | (bogosum/(5000/HZ))%100); | |
472 | ||
cfc1b9a6 | 473 | pr_debug("Before bogocount - setting activated=1.\n"); |
904541e2 GOC |
474 | } |
475 | ||
569712b2 | 476 | void __inquire_remote_apic(int apicid) |
cb3c8b90 GOC |
477 | { |
478 | unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; | |
479 | char *names[] = { "ID", "VERSION", "SPIV" }; | |
480 | int timeout; | |
481 | u32 status; | |
482 | ||
823b259b | 483 | printk(KERN_INFO "Inquiring remote APIC 0x%x...\n", apicid); |
cb3c8b90 GOC |
484 | |
485 | for (i = 0; i < ARRAY_SIZE(regs); i++) { | |
823b259b | 486 | printk(KERN_INFO "... APIC 0x%x %s: ", apicid, names[i]); |
cb3c8b90 GOC |
487 | |
488 | /* | |
489 | * Wait for idle. | |
490 | */ | |
491 | status = safe_apic_wait_icr_idle(); | |
492 | if (status) | |
493 | printk(KERN_CONT | |
494 | "a previous APIC delivery may have failed\n"); | |
495 | ||
1b374e4d | 496 | apic_icr_write(APIC_DM_REMRD | regs[i], apicid); |
cb3c8b90 GOC |
497 | |
498 | timeout = 0; | |
499 | do { | |
500 | udelay(100); | |
501 | status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; | |
502 | } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); | |
503 | ||
504 | switch (status) { | |
505 | case APIC_ICR_RR_VALID: | |
506 | status = apic_read(APIC_RRR); | |
507 | printk(KERN_CONT "%08x\n", status); | |
508 | break; | |
509 | default: | |
510 | printk(KERN_CONT "failed\n"); | |
511 | } | |
512 | } | |
513 | } | |
514 | ||
cb3c8b90 GOC |
515 | /* |
516 | * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal | |
517 | * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this | |
518 | * won't ... remember to clear down the APIC, etc later. | |
519 | */ | |
cece3155 | 520 | int __cpuinit |
569712b2 | 521 | wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip) |
cb3c8b90 GOC |
522 | { |
523 | unsigned long send_status, accept_status = 0; | |
524 | int maxlvt; | |
525 | ||
526 | /* Target chip */ | |
cb3c8b90 GOC |
527 | /* Boot on the stack */ |
528 | /* Kick the second */ | |
bdb1a9b6 | 529 | apic_icr_write(APIC_DM_NMI | apic->dest_logical, logical_apicid); |
cb3c8b90 | 530 | |
cfc1b9a6 | 531 | pr_debug("Waiting for send to finish...\n"); |
cb3c8b90 GOC |
532 | send_status = safe_apic_wait_icr_idle(); |
533 | ||
534 | /* | |
535 | * Give the other CPU some time to accept the IPI. | |
536 | */ | |
537 | udelay(200); | |
569712b2 | 538 | if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { |
59ef48a5 CG |
539 | maxlvt = lapic_get_maxlvt(); |
540 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ | |
541 | apic_write(APIC_ESR, 0); | |
542 | accept_status = (apic_read(APIC_ESR) & 0xEF); | |
543 | } | |
cfc1b9a6 | 544 | pr_debug("NMI sent.\n"); |
cb3c8b90 GOC |
545 | |
546 | if (send_status) | |
547 | printk(KERN_ERR "APIC never delivered???\n"); | |
548 | if (accept_status) | |
549 | printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); | |
550 | ||
551 | return (send_status | accept_status); | |
552 | } | |
cb3c8b90 | 553 | |
cece3155 | 554 | static int __cpuinit |
569712b2 | 555 | wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) |
cb3c8b90 GOC |
556 | { |
557 | unsigned long send_status, accept_status = 0; | |
558 | int maxlvt, num_starts, j; | |
559 | ||
593f4a78 MR |
560 | maxlvt = lapic_get_maxlvt(); |
561 | ||
cb3c8b90 GOC |
562 | /* |
563 | * Be paranoid about clearing APIC errors. | |
564 | */ | |
565 | if (APIC_INTEGRATED(apic_version[phys_apicid])) { | |
593f4a78 MR |
566 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
567 | apic_write(APIC_ESR, 0); | |
cb3c8b90 GOC |
568 | apic_read(APIC_ESR); |
569 | } | |
570 | ||
cfc1b9a6 | 571 | pr_debug("Asserting INIT.\n"); |
cb3c8b90 GOC |
572 | |
573 | /* | |
574 | * Turn INIT on target chip | |
575 | */ | |
cb3c8b90 GOC |
576 | /* |
577 | * Send IPI | |
578 | */ | |
1b374e4d SS |
579 | apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT, |
580 | phys_apicid); | |
cb3c8b90 | 581 | |
cfc1b9a6 | 582 | pr_debug("Waiting for send to finish...\n"); |
cb3c8b90 GOC |
583 | send_status = safe_apic_wait_icr_idle(); |
584 | ||
585 | mdelay(10); | |
586 | ||
cfc1b9a6 | 587 | pr_debug("Deasserting INIT.\n"); |
cb3c8b90 GOC |
588 | |
589 | /* Target chip */ | |
cb3c8b90 | 590 | /* Send IPI */ |
1b374e4d | 591 | apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid); |
cb3c8b90 | 592 | |
cfc1b9a6 | 593 | pr_debug("Waiting for send to finish...\n"); |
cb3c8b90 GOC |
594 | send_status = safe_apic_wait_icr_idle(); |
595 | ||
596 | mb(); | |
597 | atomic_set(&init_deasserted, 1); | |
598 | ||
599 | /* | |
600 | * Should we send STARTUP IPIs ? | |
601 | * | |
602 | * Determine this based on the APIC version. | |
603 | * If we don't have an integrated APIC, don't send the STARTUP IPIs. | |
604 | */ | |
605 | if (APIC_INTEGRATED(apic_version[phys_apicid])) | |
606 | num_starts = 2; | |
607 | else | |
608 | num_starts = 0; | |
609 | ||
610 | /* | |
611 | * Paravirt / VMI wants a startup IPI hook here to set up the | |
612 | * target processor state. | |
613 | */ | |
614 | startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, | |
cb3c8b90 | 615 | (unsigned long)stack_start.sp); |
cb3c8b90 GOC |
616 | |
617 | /* | |
618 | * Run STARTUP IPI loop. | |
619 | */ | |
cfc1b9a6 | 620 | pr_debug("#startup loops: %d.\n", num_starts); |
cb3c8b90 | 621 | |
cb3c8b90 | 622 | for (j = 1; j <= num_starts; j++) { |
cfc1b9a6 | 623 | pr_debug("Sending STARTUP #%d.\n", j); |
593f4a78 MR |
624 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
625 | apic_write(APIC_ESR, 0); | |
cb3c8b90 | 626 | apic_read(APIC_ESR); |
cfc1b9a6 | 627 | pr_debug("After apic_write.\n"); |
cb3c8b90 GOC |
628 | |
629 | /* | |
630 | * STARTUP IPI | |
631 | */ | |
632 | ||
633 | /* Target chip */ | |
cb3c8b90 GOC |
634 | /* Boot on the stack */ |
635 | /* Kick the second */ | |
1b374e4d SS |
636 | apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12), |
637 | phys_apicid); | |
cb3c8b90 GOC |
638 | |
639 | /* | |
640 | * Give the other CPU some time to accept the IPI. | |
641 | */ | |
642 | udelay(300); | |
643 | ||
cfc1b9a6 | 644 | pr_debug("Startup point 1.\n"); |
cb3c8b90 | 645 | |
cfc1b9a6 | 646 | pr_debug("Waiting for send to finish...\n"); |
cb3c8b90 GOC |
647 | send_status = safe_apic_wait_icr_idle(); |
648 | ||
649 | /* | |
650 | * Give the other CPU some time to accept the IPI. | |
651 | */ | |
652 | udelay(200); | |
593f4a78 | 653 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
cb3c8b90 | 654 | apic_write(APIC_ESR, 0); |
cb3c8b90 GOC |
655 | accept_status = (apic_read(APIC_ESR) & 0xEF); |
656 | if (send_status || accept_status) | |
657 | break; | |
658 | } | |
cfc1b9a6 | 659 | pr_debug("After Startup.\n"); |
cb3c8b90 GOC |
660 | |
661 | if (send_status) | |
662 | printk(KERN_ERR "APIC never delivered???\n"); | |
663 | if (accept_status) | |
664 | printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); | |
665 | ||
666 | return (send_status | accept_status); | |
667 | } | |
cb3c8b90 GOC |
668 | |
669 | struct create_idle { | |
670 | struct work_struct work; | |
671 | struct task_struct *idle; | |
672 | struct completion done; | |
673 | int cpu; | |
674 | }; | |
675 | ||
676 | static void __cpuinit do_fork_idle(struct work_struct *work) | |
677 | { | |
678 | struct create_idle *c_idle = | |
679 | container_of(work, struct create_idle, work); | |
680 | ||
681 | c_idle->idle = fork_idle(c_idle->cpu); | |
682 | complete(&c_idle->done); | |
683 | } | |
684 | ||
2eaad1fd MT |
685 | /* reduce the number of lines printed when booting a large cpu count system */ |
686 | static void __cpuinit announce_cpu(int cpu, int apicid) | |
687 | { | |
688 | static int current_node = -1; | |
689 | int node = cpu_to_node(cpu); | |
690 | ||
691 | if (system_state == SYSTEM_BOOTING) { | |
692 | if (node != current_node) { | |
693 | if (current_node > (-1)) | |
694 | pr_cont(" Ok.\n"); | |
695 | current_node = node; | |
696 | pr_info("Booting Node %3d, Processors ", node); | |
697 | } | |
698 | pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : ""); | |
699 | return; | |
700 | } else | |
701 | pr_info("Booting Node %d Processor %d APIC 0x%x\n", | |
702 | node, cpu, apicid); | |
703 | } | |
704 | ||
cb3c8b90 GOC |
705 | /* |
706 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad | |
707 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. | |
1f5bcabf IM |
708 | * Returns zero if CPU booted OK, else error code from |
709 | * ->wakeup_secondary_cpu. | |
cb3c8b90 | 710 | */ |
ab6fb7c0 | 711 | static int __cpuinit do_boot_cpu(int apicid, int cpu) |
cb3c8b90 GOC |
712 | { |
713 | unsigned long boot_error = 0; | |
cb3c8b90 | 714 | unsigned long start_ip; |
ab6fb7c0 | 715 | int timeout; |
cb3c8b90 | 716 | struct create_idle c_idle = { |
ab6fb7c0 IM |
717 | .cpu = cpu, |
718 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | |
cb3c8b90 | 719 | }; |
ab6fb7c0 | 720 | |
dc186ad7 | 721 | INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle); |
cb3c8b90 | 722 | |
cb3c8b90 GOC |
723 | alternatives_smp_switch(1); |
724 | ||
725 | c_idle.idle = get_idle_for_cpu(cpu); | |
726 | ||
727 | /* | |
728 | * We can't use kernel_thread since we must avoid to | |
729 | * reschedule the child. | |
730 | */ | |
731 | if (c_idle.idle) { | |
732 | c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *) | |
733 | (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1); | |
734 | init_idle(c_idle.idle, cpu); | |
735 | goto do_rest; | |
736 | } | |
737 | ||
738 | if (!keventd_up() || current_is_keventd()) | |
739 | c_idle.work.func(&c_idle.work); | |
740 | else { | |
741 | schedule_work(&c_idle.work); | |
742 | wait_for_completion(&c_idle.done); | |
743 | } | |
744 | ||
745 | if (IS_ERR(c_idle.idle)) { | |
746 | printk("failed fork for CPU %d\n", cpu); | |
dc186ad7 | 747 | destroy_work_on_stack(&c_idle.work); |
cb3c8b90 GOC |
748 | return PTR_ERR(c_idle.idle); |
749 | } | |
750 | ||
751 | set_idle_for_cpu(cpu, c_idle.idle); | |
752 | do_rest: | |
cb3c8b90 | 753 | per_cpu(current_task, cpu) = c_idle.idle; |
c6f5e0ac | 754 | #ifdef CONFIG_X86_32 |
cb3c8b90 | 755 | /* Stack for startup_32 can be just as for start_secondary onwards */ |
cb3c8b90 GOC |
756 | irq_ctx_init(cpu); |
757 | #else | |
cb3c8b90 | 758 | clear_tsk_thread_flag(c_idle.idle, TIF_FORK); |
004aa322 | 759 | initial_gs = per_cpu_offset(cpu); |
9af45651 BG |
760 | per_cpu(kernel_stack, cpu) = |
761 | (unsigned long)task_stack_page(c_idle.idle) - | |
762 | KERNEL_STACK_OFFSET + THREAD_SIZE; | |
cb3c8b90 | 763 | #endif |
a939098a | 764 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); |
3e970473 | 765 | initial_code = (unsigned long)start_secondary; |
9cf4f298 | 766 | stack_start.sp = (void *) c_idle.idle->thread.sp; |
cb3c8b90 GOC |
767 | |
768 | /* start_ip had better be page-aligned! */ | |
769 | start_ip = setup_trampoline(); | |
770 | ||
2eaad1fd MT |
771 | /* So we see what's up */ |
772 | announce_cpu(cpu, apicid); | |
cb3c8b90 GOC |
773 | |
774 | /* | |
775 | * This grunge runs the startup process for | |
776 | * the targeted processor. | |
777 | */ | |
778 | ||
779 | atomic_set(&init_deasserted, 0); | |
780 | ||
34d05591 | 781 | if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { |
cb3c8b90 | 782 | |
cfc1b9a6 | 783 | pr_debug("Setting warm reset code and vector.\n"); |
cb3c8b90 | 784 | |
34d05591 JS |
785 | smpboot_setup_warm_reset_vector(start_ip); |
786 | /* | |
787 | * Be paranoid about clearing APIC errors. | |
db96b0a0 CG |
788 | */ |
789 | if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { | |
790 | apic_write(APIC_ESR, 0); | |
791 | apic_read(APIC_ESR); | |
792 | } | |
34d05591 | 793 | } |
cb3c8b90 | 794 | |
cb3c8b90 | 795 | /* |
1f5bcabf IM |
796 | * Kick the secondary CPU. Use the method in the APIC driver |
797 | * if it's defined - or use an INIT boot APIC message otherwise: | |
cb3c8b90 | 798 | */ |
1f5bcabf IM |
799 | if (apic->wakeup_secondary_cpu) |
800 | boot_error = apic->wakeup_secondary_cpu(apicid, start_ip); | |
801 | else | |
802 | boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip); | |
cb3c8b90 GOC |
803 | |
804 | if (!boot_error) { | |
805 | /* | |
806 | * allow APs to start initializing. | |
807 | */ | |
cfc1b9a6 | 808 | pr_debug("Before Callout %d.\n", cpu); |
c2d1cec1 | 809 | cpumask_set_cpu(cpu, cpu_callout_mask); |
cfc1b9a6 | 810 | pr_debug("After Callout %d.\n", cpu); |
cb3c8b90 GOC |
811 | |
812 | /* | |
813 | * Wait 5s total for a response | |
814 | */ | |
815 | for (timeout = 0; timeout < 50000; timeout++) { | |
c2d1cec1 | 816 | if (cpumask_test_cpu(cpu, cpu_callin_mask)) |
cb3c8b90 GOC |
817 | break; /* It has booted */ |
818 | udelay(100); | |
819 | } | |
820 | ||
2eaad1fd MT |
821 | if (cpumask_test_cpu(cpu, cpu_callin_mask)) |
822 | pr_debug("CPU%d: has booted.\n", cpu); | |
823 | else { | |
cb3c8b90 GOC |
824 | boot_error = 1; |
825 | if (*((volatile unsigned char *)trampoline_base) | |
826 | == 0xA5) | |
827 | /* trampoline started but...? */ | |
2eaad1fd | 828 | pr_err("CPU%d: Stuck ??\n", cpu); |
cb3c8b90 GOC |
829 | else |
830 | /* trampoline code not run */ | |
2eaad1fd | 831 | pr_err("CPU%d: Not responding.\n", cpu); |
25dc0049 IM |
832 | if (apic->inquire_remote_apic) |
833 | apic->inquire_remote_apic(apicid); | |
cb3c8b90 GOC |
834 | } |
835 | } | |
1a51e3a0 | 836 | |
cb3c8b90 GOC |
837 | if (boot_error) { |
838 | /* Try to put things back the way they were before ... */ | |
23ca4bba | 839 | numa_remove_cpu(cpu); /* was set by numa_add_cpu */ |
c2d1cec1 MT |
840 | |
841 | /* was set by do_boot_cpu() */ | |
842 | cpumask_clear_cpu(cpu, cpu_callout_mask); | |
843 | ||
844 | /* was set by cpu_init() */ | |
845 | cpumask_clear_cpu(cpu, cpu_initialized_mask); | |
846 | ||
847 | set_cpu_present(cpu, false); | |
cb3c8b90 GOC |
848 | per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; |
849 | } | |
850 | ||
851 | /* mark "stuck" area as not stuck */ | |
852 | *((volatile unsigned long *)trampoline_base) = 0; | |
853 | ||
02421f98 YL |
854 | if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { |
855 | /* | |
856 | * Cleanup possible dangling ends... | |
857 | */ | |
858 | smpboot_restore_warm_reset_vector(); | |
859 | } | |
63d38198 | 860 | |
dc186ad7 | 861 | destroy_work_on_stack(&c_idle.work); |
cb3c8b90 GOC |
862 | return boot_error; |
863 | } | |
864 | ||
865 | int __cpuinit native_cpu_up(unsigned int cpu) | |
866 | { | |
a21769a4 | 867 | int apicid = apic->cpu_present_to_apicid(cpu); |
cb3c8b90 GOC |
868 | unsigned long flags; |
869 | int err; | |
870 | ||
871 | WARN_ON(irqs_disabled()); | |
872 | ||
cfc1b9a6 | 873 | pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu); |
cb3c8b90 GOC |
874 | |
875 | if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid || | |
876 | !physid_isset(apicid, phys_cpu_present_map)) { | |
877 | printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu); | |
878 | return -EINVAL; | |
879 | } | |
880 | ||
881 | /* | |
882 | * Already booted CPU? | |
883 | */ | |
c2d1cec1 | 884 | if (cpumask_test_cpu(cpu, cpu_callin_mask)) { |
cfc1b9a6 | 885 | pr_debug("do_boot_cpu %d Already started\n", cpu); |
cb3c8b90 GOC |
886 | return -ENOSYS; |
887 | } | |
888 | ||
889 | /* | |
890 | * Save current MTRR state in case it was changed since early boot | |
891 | * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: | |
892 | */ | |
893 | mtrr_save_state(); | |
894 | ||
895 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | |
896 | ||
897 | #ifdef CONFIG_X86_32 | |
898 | /* init low mem mapping */ | |
68db065c | 899 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, |
61165d7a | 900 | min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); |
cb3c8b90 | 901 | flush_tlb_all(); |
61165d7a | 902 | low_mappings = 1; |
cb3c8b90 GOC |
903 | |
904 | err = do_boot_cpu(apicid, cpu); | |
61165d7a | 905 | |
55cd6367 | 906 | zap_low_mappings(false); |
61165d7a HD |
907 | low_mappings = 0; |
908 | #else | |
909 | err = do_boot_cpu(apicid, cpu); | |
910 | #endif | |
911 | if (err) { | |
cfc1b9a6 | 912 | pr_debug("do_boot_cpu failed %d\n", err); |
61165d7a | 913 | return -EIO; |
cb3c8b90 GOC |
914 | } |
915 | ||
916 | /* | |
917 | * Check TSC synchronization with the AP (keep irqs disabled | |
918 | * while doing so): | |
919 | */ | |
920 | local_irq_save(flags); | |
921 | check_tsc_sync_source(cpu); | |
922 | local_irq_restore(flags); | |
923 | ||
7c04e64a | 924 | while (!cpu_online(cpu)) { |
cb3c8b90 GOC |
925 | cpu_relax(); |
926 | touch_nmi_watchdog(); | |
927 | } | |
928 | ||
929 | return 0; | |
930 | } | |
931 | ||
8aef135c GOC |
932 | /* |
933 | * Fall back to non SMP mode after errors. | |
934 | * | |
935 | * RED-PEN audit/test this more. I bet there is more state messed up here. | |
936 | */ | |
937 | static __init void disable_smp(void) | |
938 | { | |
4f062896 RR |
939 | init_cpu_present(cpumask_of(0)); |
940 | init_cpu_possible(cpumask_of(0)); | |
8aef135c | 941 | smpboot_clear_io_apic_irqs(); |
0f385d1d | 942 | |
8aef135c | 943 | if (smp_found_config) |
b6df1b8b | 944 | physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); |
8aef135c | 945 | else |
b6df1b8b | 946 | physid_set_mask_of_physid(0, &phys_cpu_present_map); |
8aef135c | 947 | map_cpu_to_logical_apicid(); |
c2d1cec1 MT |
948 | cpumask_set_cpu(0, cpu_sibling_mask(0)); |
949 | cpumask_set_cpu(0, cpu_core_mask(0)); | |
8aef135c GOC |
950 | } |
951 | ||
952 | /* | |
953 | * Various sanity checks. | |
954 | */ | |
955 | static int __init smp_sanity_check(unsigned max_cpus) | |
956 | { | |
ac23d4ee | 957 | preempt_disable(); |
a58f03b0 | 958 | |
1ff2f20d | 959 | #if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32) |
a58f03b0 YL |
960 | if (def_to_bigsmp && nr_cpu_ids > 8) { |
961 | unsigned int cpu; | |
962 | unsigned nr; | |
963 | ||
964 | printk(KERN_WARNING | |
965 | "More than 8 CPUs detected - skipping them.\n" | |
26f7ef14 | 966 | "Use CONFIG_X86_BIGSMP.\n"); |
a58f03b0 YL |
967 | |
968 | nr = 0; | |
969 | for_each_present_cpu(cpu) { | |
970 | if (nr >= 8) | |
c2d1cec1 | 971 | set_cpu_present(cpu, false); |
a58f03b0 YL |
972 | nr++; |
973 | } | |
974 | ||
975 | nr = 0; | |
976 | for_each_possible_cpu(cpu) { | |
977 | if (nr >= 8) | |
c2d1cec1 | 978 | set_cpu_possible(cpu, false); |
a58f03b0 YL |
979 | nr++; |
980 | } | |
981 | ||
982 | nr_cpu_ids = 8; | |
983 | } | |
984 | #endif | |
985 | ||
8aef135c | 986 | if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { |
55c395b4 MT |
987 | printk(KERN_WARNING |
988 | "weird, boot CPU (#%d) not listed by the BIOS.\n", | |
989 | hard_smp_processor_id()); | |
990 | ||
8aef135c GOC |
991 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); |
992 | } | |
993 | ||
994 | /* | |
995 | * If we couldn't find an SMP configuration at boot time, | |
996 | * get out of here now! | |
997 | */ | |
998 | if (!smp_found_config && !acpi_lapic) { | |
ac23d4ee | 999 | preempt_enable(); |
8aef135c GOC |
1000 | printk(KERN_NOTICE "SMP motherboard not detected.\n"); |
1001 | disable_smp(); | |
1002 | if (APIC_init_uniprocessor()) | |
1003 | printk(KERN_NOTICE "Local APIC not detected." | |
1004 | " Using dummy APIC emulation.\n"); | |
1005 | return -1; | |
1006 | } | |
1007 | ||
1008 | /* | |
1009 | * Should not be necessary because the MP table should list the boot | |
1010 | * CPU too, but we do it for the sake of robustness anyway. | |
1011 | */ | |
a27a6210 | 1012 | if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) { |
8aef135c GOC |
1013 | printk(KERN_NOTICE |
1014 | "weird, boot CPU (#%d) not listed by the BIOS.\n", | |
1015 | boot_cpu_physical_apicid); | |
1016 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); | |
1017 | } | |
ac23d4ee | 1018 | preempt_enable(); |
8aef135c GOC |
1019 | |
1020 | /* | |
1021 | * If we couldn't find a local APIC, then get out of here now! | |
1022 | */ | |
1023 | if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && | |
1024 | !cpu_has_apic) { | |
103428e5 CG |
1025 | if (!disable_apic) { |
1026 | pr_err("BIOS bug, local APIC #%d not detected!...\n", | |
1027 | boot_cpu_physical_apicid); | |
1028 | pr_err("... forcing use of dummy APIC emulation." | |
8aef135c | 1029 | "(tell your hw vendor)\n"); |
103428e5 | 1030 | } |
8aef135c | 1031 | smpboot_clear_io_apic(); |
65a4e574 | 1032 | arch_disable_smp_support(); |
8aef135c GOC |
1033 | return -1; |
1034 | } | |
1035 | ||
1036 | verify_local_APIC(); | |
1037 | ||
1038 | /* | |
1039 | * If SMP should be disabled, then really disable it! | |
1040 | */ | |
1041 | if (!max_cpus) { | |
73d08e63 | 1042 | printk(KERN_INFO "SMP mode deactivated.\n"); |
8aef135c | 1043 | smpboot_clear_io_apic(); |
d54db1ac MR |
1044 | |
1045 | localise_nmi_watchdog(); | |
1046 | ||
e90955c2 | 1047 | connect_bsp_APIC(); |
e90955c2 JB |
1048 | setup_local_APIC(); |
1049 | end_local_APIC_setup(); | |
8aef135c GOC |
1050 | return -1; |
1051 | } | |
1052 | ||
1053 | return 0; | |
1054 | } | |
1055 | ||
1056 | static void __init smp_cpu_index_default(void) | |
1057 | { | |
1058 | int i; | |
1059 | struct cpuinfo_x86 *c; | |
1060 | ||
7c04e64a | 1061 | for_each_possible_cpu(i) { |
8aef135c GOC |
1062 | c = &cpu_data(i); |
1063 | /* mark all to hotplug */ | |
9628937d | 1064 | c->cpu_index = nr_cpu_ids; |
8aef135c GOC |
1065 | } |
1066 | } | |
1067 | ||
1068 | /* | |
1069 | * Prepare for SMP bootup. The MP table or ACPI has been read | |
1070 | * earlier. Just do some sanity checking here and enable APIC mode. | |
1071 | */ | |
1072 | void __init native_smp_prepare_cpus(unsigned int max_cpus) | |
1073 | { | |
7ad728f9 RR |
1074 | unsigned int i; |
1075 | ||
deef3250 | 1076 | preempt_disable(); |
8aef135c GOC |
1077 | smp_cpu_index_default(); |
1078 | current_cpu_data = boot_cpu_data; | |
c2d1cec1 | 1079 | cpumask_copy(cpu_callin_mask, cpumask_of(0)); |
8aef135c GOC |
1080 | mb(); |
1081 | /* | |
1082 | * Setup boot CPU information | |
1083 | */ | |
1084 | smp_store_cpu_info(0); /* Final full version of the data */ | |
1b374e4d | 1085 | #ifdef CONFIG_X86_32 |
8aef135c | 1086 | boot_cpu_logical_apicid = logical_smp_processor_id(); |
1b374e4d | 1087 | #endif |
8aef135c | 1088 | current_thread_info()->cpu = 0; /* needed? */ |
7ad728f9 | 1089 | for_each_possible_cpu(i) { |
79f55997 LZ |
1090 | zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); |
1091 | zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); | |
1092 | zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); | |
7ad728f9 | 1093 | } |
8aef135c GOC |
1094 | set_cpu_sibling_map(0); |
1095 | ||
6e1cb38a | 1096 | enable_IR_x2apic(); |
72ce0165 | 1097 | default_setup_apic_routing(); |
6e1cb38a | 1098 | |
8aef135c GOC |
1099 | if (smp_sanity_check(max_cpus) < 0) { |
1100 | printk(KERN_INFO "SMP disabled\n"); | |
1101 | disable_smp(); | |
deef3250 | 1102 | goto out; |
8aef135c GOC |
1103 | } |
1104 | ||
ac23d4ee | 1105 | preempt_disable(); |
4c9961d5 | 1106 | if (read_apic_id() != boot_cpu_physical_apicid) { |
8aef135c | 1107 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", |
4c9961d5 | 1108 | read_apic_id(), boot_cpu_physical_apicid); |
8aef135c GOC |
1109 | /* Or can we switch back to PIC here? */ |
1110 | } | |
ac23d4ee | 1111 | preempt_enable(); |
8aef135c | 1112 | |
8aef135c | 1113 | connect_bsp_APIC(); |
b5841765 | 1114 | |
8aef135c GOC |
1115 | /* |
1116 | * Switch from PIC to APIC mode. | |
1117 | */ | |
1118 | setup_local_APIC(); | |
1119 | ||
8aef135c GOC |
1120 | /* |
1121 | * Enable IO APIC before setting up error vector | |
1122 | */ | |
1123 | if (!skip_ioapic_setup && nr_ioapics) | |
1124 | enable_IO_APIC(); | |
88d0f550 | 1125 | |
8aef135c GOC |
1126 | end_local_APIC_setup(); |
1127 | ||
1128 | map_cpu_to_logical_apicid(); | |
1129 | ||
d83093b5 IM |
1130 | if (apic->setup_portio_remap) |
1131 | apic->setup_portio_remap(); | |
8aef135c GOC |
1132 | |
1133 | smpboot_setup_io_apic(); | |
1134 | /* | |
1135 | * Set up local APIC timer on boot CPU. | |
1136 | */ | |
1137 | ||
1138 | printk(KERN_INFO "CPU%d: ", 0); | |
1139 | print_cpu_info(&cpu_data(0)); | |
736decac | 1140 | x86_init.timers.setup_percpu_clockev(); |
c4bd1fda MS |
1141 | |
1142 | if (is_uv_system()) | |
1143 | uv_system_init(); | |
d0af9eed SS |
1144 | |
1145 | set_mtrr_aps_delayed_init(); | |
deef3250 IM |
1146 | out: |
1147 | preempt_enable(); | |
8aef135c | 1148 | } |
d0af9eed SS |
1149 | |
1150 | void arch_enable_nonboot_cpus_begin(void) | |
1151 | { | |
1152 | set_mtrr_aps_delayed_init(); | |
1153 | } | |
1154 | ||
1155 | void arch_enable_nonboot_cpus_end(void) | |
1156 | { | |
1157 | mtrr_aps_init(); | |
1158 | } | |
1159 | ||
a8db8453 GOC |
1160 | /* |
1161 | * Early setup to make printk work. | |
1162 | */ | |
1163 | void __init native_smp_prepare_boot_cpu(void) | |
1164 | { | |
1165 | int me = smp_processor_id(); | |
552be871 | 1166 | switch_to_new_gdt(me); |
c2d1cec1 MT |
1167 | /* already set me in cpu_online_mask in boot_cpu_init() */ |
1168 | cpumask_set_cpu(me, cpu_callout_mask); | |
a8db8453 GOC |
1169 | per_cpu(cpu_state, me) = CPU_ONLINE; |
1170 | } | |
1171 | ||
83f7eb9c GOC |
1172 | void __init native_smp_cpus_done(unsigned int max_cpus) |
1173 | { | |
cfc1b9a6 | 1174 | pr_debug("Boot done.\n"); |
83f7eb9c GOC |
1175 | |
1176 | impress_friends(); | |
83f7eb9c GOC |
1177 | #ifdef CONFIG_X86_IO_APIC |
1178 | setup_ioapic_dest(); | |
1179 | #endif | |
1180 | check_nmi_watchdog(); | |
d0af9eed | 1181 | mtrr_aps_init(); |
83f7eb9c GOC |
1182 | } |
1183 | ||
3b11ce7f MT |
1184 | static int __initdata setup_possible_cpus = -1; |
1185 | static int __init _setup_possible_cpus(char *str) | |
1186 | { | |
1187 | get_option(&str, &setup_possible_cpus); | |
1188 | return 0; | |
1189 | } | |
1190 | early_param("possible_cpus", _setup_possible_cpus); | |
1191 | ||
1192 | ||
68a1c3f8 | 1193 | /* |
4f062896 | 1194 | * cpu_possible_mask should be static, it cannot change as cpu's |
68a1c3f8 GC |
1195 | * are onlined, or offlined. The reason is per-cpu data-structures |
1196 | * are allocated by some modules at init time, and dont expect to | |
1197 | * do this dynamically on cpu arrival/departure. | |
4f062896 | 1198 | * cpu_present_mask on the other hand can change dynamically. |
68a1c3f8 GC |
1199 | * In case when cpu_hotplug is not compiled, then we resort to current |
1200 | * behaviour, which is cpu_possible == cpu_present. | |
1201 | * - Ashok Raj | |
1202 | * | |
1203 | * Three ways to find out the number of additional hotplug CPUs: | |
1204 | * - If the BIOS specified disabled CPUs in ACPI/mptables use that. | |
3b11ce7f | 1205 | * - The user can overwrite it with possible_cpus=NUM |
68a1c3f8 GC |
1206 | * - Otherwise don't reserve additional CPUs. |
1207 | * We do this because additional CPUs waste a lot of memory. | |
1208 | * -AK | |
1209 | */ | |
1210 | __init void prefill_possible_map(void) | |
1211 | { | |
cb48bb59 | 1212 | int i, possible; |
68a1c3f8 | 1213 | |
329513a3 YL |
1214 | /* no processor from mptable or madt */ |
1215 | if (!num_processors) | |
1216 | num_processors = 1; | |
1217 | ||
5f2eb550 JB |
1218 | i = setup_max_cpus ?: 1; |
1219 | if (setup_possible_cpus == -1) { | |
1220 | possible = num_processors; | |
1221 | #ifdef CONFIG_HOTPLUG_CPU | |
1222 | if (setup_max_cpus) | |
1223 | possible += disabled_cpus; | |
1224 | #else | |
1225 | if (possible > i) | |
1226 | possible = i; | |
1227 | #endif | |
1228 | } else | |
3b11ce7f MT |
1229 | possible = setup_possible_cpus; |
1230 | ||
730cf272 MT |
1231 | total_cpus = max_t(int, possible, num_processors + disabled_cpus); |
1232 | ||
2b633e3f YL |
1233 | /* nr_cpu_ids could be reduced via nr_cpus= */ |
1234 | if (possible > nr_cpu_ids) { | |
3b11ce7f MT |
1235 | printk(KERN_WARNING |
1236 | "%d Processors exceeds NR_CPUS limit of %d\n", | |
2b633e3f YL |
1237 | possible, nr_cpu_ids); |
1238 | possible = nr_cpu_ids; | |
3b11ce7f | 1239 | } |
68a1c3f8 | 1240 | |
5f2eb550 JB |
1241 | #ifdef CONFIG_HOTPLUG_CPU |
1242 | if (!setup_max_cpus) | |
1243 | #endif | |
1244 | if (possible > i) { | |
1245 | printk(KERN_WARNING | |
1246 | "%d Processors exceeds max_cpus limit of %u\n", | |
1247 | possible, setup_max_cpus); | |
1248 | possible = i; | |
1249 | } | |
1250 | ||
68a1c3f8 GC |
1251 | printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", |
1252 | possible, max_t(int, possible - num_processors, 0)); | |
1253 | ||
1254 | for (i = 0; i < possible; i++) | |
c2d1cec1 | 1255 | set_cpu_possible(i, true); |
5f2eb550 JB |
1256 | for (; i < NR_CPUS; i++) |
1257 | set_cpu_possible(i, false); | |
3461b0af MT |
1258 | |
1259 | nr_cpu_ids = possible; | |
68a1c3f8 | 1260 | } |
69c18c15 | 1261 | |
14adf855 CE |
1262 | #ifdef CONFIG_HOTPLUG_CPU |
1263 | ||
1264 | static void remove_siblinginfo(int cpu) | |
1265 | { | |
1266 | int sibling; | |
1267 | struct cpuinfo_x86 *c = &cpu_data(cpu); | |
1268 | ||
c2d1cec1 MT |
1269 | for_each_cpu(sibling, cpu_core_mask(cpu)) { |
1270 | cpumask_clear_cpu(cpu, cpu_core_mask(sibling)); | |
14adf855 CE |
1271 | /*/ |
1272 | * last thread sibling in this cpu core going down | |
1273 | */ | |
c2d1cec1 | 1274 | if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) |
14adf855 CE |
1275 | cpu_data(sibling).booted_cores--; |
1276 | } | |
1277 | ||
c2d1cec1 MT |
1278 | for_each_cpu(sibling, cpu_sibling_mask(cpu)) |
1279 | cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling)); | |
1280 | cpumask_clear(cpu_sibling_mask(cpu)); | |
1281 | cpumask_clear(cpu_core_mask(cpu)); | |
14adf855 CE |
1282 | c->phys_proc_id = 0; |
1283 | c->cpu_core_id = 0; | |
c2d1cec1 | 1284 | cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); |
14adf855 CE |
1285 | } |
1286 | ||
69c18c15 GC |
1287 | static void __ref remove_cpu_from_maps(int cpu) |
1288 | { | |
c2d1cec1 MT |
1289 | set_cpu_online(cpu, false); |
1290 | cpumask_clear_cpu(cpu, cpu_callout_mask); | |
1291 | cpumask_clear_cpu(cpu, cpu_callin_mask); | |
69c18c15 | 1292 | /* was set by cpu_init() */ |
c2d1cec1 | 1293 | cpumask_clear_cpu(cpu, cpu_initialized_mask); |
23ca4bba | 1294 | numa_remove_cpu(cpu); |
69c18c15 GC |
1295 | } |
1296 | ||
8227dce7 | 1297 | void cpu_disable_common(void) |
69c18c15 GC |
1298 | { |
1299 | int cpu = smp_processor_id(); | |
69c18c15 | 1300 | |
69c18c15 GC |
1301 | remove_siblinginfo(cpu); |
1302 | ||
1303 | /* It's now safe to remove this processor from the online map */ | |
d388e5fd | 1304 | lock_vector_lock(); |
69c18c15 | 1305 | remove_cpu_from_maps(cpu); |
d388e5fd | 1306 | unlock_vector_lock(); |
d7b381bb | 1307 | fixup_irqs(); |
8227dce7 AN |
1308 | } |
1309 | ||
1310 | int native_cpu_disable(void) | |
1311 | { | |
1312 | int cpu = smp_processor_id(); | |
1313 | ||
1314 | /* | |
1315 | * Perhaps use cpufreq to drop frequency, but that could go | |
1316 | * into generic code. | |
1317 | * | |
1318 | * We won't take down the boot processor on i386 due to some | |
1319 | * interrupts only being able to be serviced by the BSP. | |
1320 | * Especially so if we're not using an IOAPIC -zwane | |
1321 | */ | |
1322 | if (cpu == 0) | |
1323 | return -EBUSY; | |
1324 | ||
1325 | if (nmi_watchdog == NMI_LOCAL_APIC) | |
1326 | stop_apic_nmi_watchdog(NULL); | |
1327 | clear_local_APIC(); | |
1328 | ||
1329 | cpu_disable_common(); | |
69c18c15 GC |
1330 | return 0; |
1331 | } | |
1332 | ||
93be71b6 | 1333 | void native_cpu_die(unsigned int cpu) |
69c18c15 GC |
1334 | { |
1335 | /* We don't do anything here: idle task is faking death itself. */ | |
1336 | unsigned int i; | |
1337 | ||
1338 | for (i = 0; i < 10; i++) { | |
1339 | /* They ack this in play_dead by setting CPU_DEAD */ | |
1340 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { | |
2eaad1fd MT |
1341 | if (system_state == SYSTEM_RUNNING) |
1342 | pr_info("CPU %u is now offline\n", cpu); | |
1343 | ||
69c18c15 GC |
1344 | if (1 == num_online_cpus()) |
1345 | alternatives_smp_switch(0); | |
1346 | return; | |
1347 | } | |
1348 | msleep(100); | |
1349 | } | |
2eaad1fd | 1350 | pr_err("CPU %u didn't die...\n", cpu); |
69c18c15 | 1351 | } |
a21f5d88 AN |
1352 | |
1353 | void play_dead_common(void) | |
1354 | { | |
1355 | idle_task_exit(); | |
1356 | reset_lazy_tlbstate(); | |
1357 | irq_ctx_exit(raw_smp_processor_id()); | |
07bbc16a | 1358 | c1e_remove_cpu(raw_smp_processor_id()); |
a21f5d88 AN |
1359 | |
1360 | mb(); | |
1361 | /* Ack it */ | |
1362 | __get_cpu_var(cpu_state) = CPU_DEAD; | |
1363 | ||
1364 | /* | |
1365 | * With physical CPU hotplug, we should halt the cpu | |
1366 | */ | |
1367 | local_irq_disable(); | |
1368 | } | |
1369 | ||
1370 | void native_play_dead(void) | |
1371 | { | |
1372 | play_dead_common(); | |
86886e55 | 1373 | tboot_shutdown(TB_SHUTDOWN_WFS); |
a21f5d88 AN |
1374 | wbinvd_halt(); |
1375 | } | |
1376 | ||
69c18c15 | 1377 | #else /* ... !CONFIG_HOTPLUG_CPU */ |
93be71b6 | 1378 | int native_cpu_disable(void) |
69c18c15 GC |
1379 | { |
1380 | return -ENOSYS; | |
1381 | } | |
1382 | ||
93be71b6 | 1383 | void native_cpu_die(unsigned int cpu) |
69c18c15 GC |
1384 | { |
1385 | /* We said "no" in __cpu_disable */ | |
1386 | BUG(); | |
1387 | } | |
a21f5d88 AN |
1388 | |
1389 | void native_play_dead(void) | |
1390 | { | |
1391 | BUG(); | |
1392 | } | |
1393 | ||
68a1c3f8 | 1394 | #endif |