]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
45b5a378 SG |
2 | /* |
3 | * Copyright (C) 2015 Google, Inc | |
4 | * | |
45b5a378 SG |
5 | * Based on code from the coreboot file of the same name |
6 | */ | |
7 | ||
8 | #include <common.h> | |
9 | #include <cpu.h> | |
10 | #include <dm.h> | |
11 | #include <errno.h> | |
f7ae49fc | 12 | #include <log.h> |
45b5a378 | 13 | #include <malloc.h> |
18686590 | 14 | #include <qfw.h> |
45b5a378 SG |
15 | #include <asm/atomic.h> |
16 | #include <asm/cpu.h> | |
17 | #include <asm/interrupt.h> | |
c33aa352 | 18 | #include <asm/io.h> |
45b5a378 | 19 | #include <asm/lapic.h> |
6bcb675b | 20 | #include <asm/microcode.h> |
45b5a378 | 21 | #include <asm/mp.h> |
a2d73fdb | 22 | #include <asm/msr.h> |
45b5a378 | 23 | #include <asm/mtrr.h> |
a2d73fdb | 24 | #include <asm/processor.h> |
45b5a378 SG |
25 | #include <asm/sipi.h> |
26 | #include <dm/device-internal.h> | |
27 | #include <dm/uclass-internal.h> | |
de752c5e MY |
28 | #include <dm/lists.h> |
29 | #include <dm/root.h> | |
c05ed00a | 30 | #include <linux/delay.h> |
45b5a378 SG |
31 | #include <linux/linkage.h> |
32 | ||
8b097916 SG |
33 | DECLARE_GLOBAL_DATA_PTR; |
34 | ||
45b5a378 SG |
35 | /* This also needs to match the sipi.S assembly code for saved MSR encoding */ |
36 | struct saved_msr { | |
37 | uint32_t index; | |
38 | uint32_t lo; | |
39 | uint32_t hi; | |
40 | } __packed; | |
41 | ||
45b5a378 SG |
42 | struct mp_flight_plan { |
43 | int num_records; | |
44 | struct mp_flight_record *records; | |
45 | }; | |
46 | ||
c33aa352 SG |
47 | /** |
48 | * struct mp_callback - Callback information for APs | |
49 | * | |
50 | * @func: Function to run | |
51 | * @arg: Argument to pass to the function | |
52 | * @logical_cpu_number: Either a CPU number (i.e. dev->req_seq) or a special | |
53 | * value like MP_SELECT_BSP. It tells the AP whether it should process this | |
54 | * callback | |
55 | */ | |
56 | struct mp_callback { | |
57 | /** | |
58 | * func() - Function to call on the AP | |
59 | * | |
60 | * @arg: Argument to pass | |
61 | */ | |
62 | void (*func)(void *arg); | |
63 | void *arg; | |
64 | int logical_cpu_number; | |
65 | }; | |
66 | ||
45b5a378 SG |
67 | static struct mp_flight_plan mp_info; |
68 | ||
c33aa352 SG |
69 | /* |
70 | * ap_callbacks - Callback mailbox array | |
71 | * | |
72 | * Array of callback, one entry for each available CPU, indexed by the CPU | |
73 | * number, which is dev->req_seq. The entry for the main CPU is never used. | |
74 | * When this is NULL, there is no pending work for the CPU to run. When | |
75 | * non-NULL it points to the mp_callback structure. This is shared between all | |
76 | * CPUs, so should only be written by the main CPU. | |
77 | */ | |
78 | static struct mp_callback **ap_callbacks; | |
45b5a378 SG |
79 | |
80 | static inline void barrier_wait(atomic_t *b) | |
81 | { | |
82 | while (atomic_read(b) == 0) | |
83 | asm("pause"); | |
84 | mfence(); | |
85 | } | |
86 | ||
87 | static inline void release_barrier(atomic_t *b) | |
88 | { | |
89 | mfence(); | |
90 | atomic_set(b, 1); | |
91 | } | |
92 | ||
a2d73fdb BM |
93 | static inline void stop_this_cpu(void) |
94 | { | |
95 | /* Called by an AP when it is ready to halt and wait for a new task */ | |
96 | for (;;) | |
97 | cpu_hlt(); | |
98 | } | |
99 | ||
45b5a378 SG |
100 | /* Returns 1 if timeout waiting for APs. 0 if target APs found */ |
101 | static int wait_for_aps(atomic_t *val, int target, int total_delay, | |
102 | int delay_step) | |
103 | { | |
104 | int timeout = 0; | |
105 | int delayed = 0; | |
106 | ||
107 | while (atomic_read(val) != target) { | |
108 | udelay(delay_step); | |
109 | delayed += delay_step; | |
110 | if (delayed >= total_delay) { | |
111 | timeout = 1; | |
112 | break; | |
113 | } | |
114 | } | |
115 | ||
116 | return timeout; | |
117 | } | |
118 | ||
119 | static void ap_do_flight_plan(struct udevice *cpu) | |
120 | { | |
121 | int i; | |
122 | ||
123 | for (i = 0; i < mp_info.num_records; i++) { | |
124 | struct mp_flight_record *rec = &mp_info.records[i]; | |
125 | ||
126 | atomic_inc(&rec->cpus_entered); | |
127 | barrier_wait(&rec->barrier); | |
128 | ||
129 | if (rec->ap_call != NULL) | |
130 | rec->ap_call(cpu, rec->ap_arg); | |
131 | } | |
132 | } | |
133 | ||
24fb4907 | 134 | static int find_cpu_by_apic_id(int apic_id, struct udevice **devp) |
45b5a378 SG |
135 | { |
136 | struct udevice *dev; | |
137 | ||
138 | *devp = NULL; | |
139 | for (uclass_find_first_device(UCLASS_CPU, &dev); | |
140 | dev; | |
141 | uclass_find_next_device(&dev)) { | |
142 | struct cpu_platdata *plat = dev_get_parent_platdata(dev); | |
143 | ||
144 | if (plat->cpu_id == apic_id) { | |
145 | *devp = dev; | |
146 | return 0; | |
147 | } | |
148 | } | |
149 | ||
150 | return -ENOENT; | |
151 | } | |
152 | ||
153 | /* | |
154 | * By the time APs call ap_init() caching has been setup, and microcode has | |
155 | * been loaded | |
156 | */ | |
157 | static void ap_init(unsigned int cpu_index) | |
158 | { | |
159 | struct udevice *dev; | |
160 | int apic_id; | |
161 | int ret; | |
162 | ||
163 | /* Ensure the local apic is enabled */ | |
164 | enable_lapic(); | |
165 | ||
166 | apic_id = lapicid(); | |
24fb4907 | 167 | ret = find_cpu_by_apic_id(apic_id, &dev); |
45b5a378 SG |
168 | if (ret) { |
169 | debug("Unknown CPU apic_id %x\n", apic_id); | |
170 | goto done; | |
171 | } | |
172 | ||
173 | debug("AP: slot %d apic_id %x, dev %s\n", cpu_index, apic_id, | |
174 | dev ? dev->name : "(apic_id not found)"); | |
175 | ||
c33aa352 SG |
176 | /* |
177 | * Walk the flight plan, which only returns if CONFIG_SMP_AP_WORK is not | |
178 | * enabled | |
179 | */ | |
45b5a378 SG |
180 | ap_do_flight_plan(dev); |
181 | ||
45b5a378 SG |
182 | done: |
183 | stop_this_cpu(); | |
184 | } | |
185 | ||
186 | static const unsigned int fixed_mtrrs[NUM_FIXED_MTRRS] = { | |
187 | MTRR_FIX_64K_00000_MSR, MTRR_FIX_16K_80000_MSR, MTRR_FIX_16K_A0000_MSR, | |
188 | MTRR_FIX_4K_C0000_MSR, MTRR_FIX_4K_C8000_MSR, MTRR_FIX_4K_D0000_MSR, | |
189 | MTRR_FIX_4K_D8000_MSR, MTRR_FIX_4K_E0000_MSR, MTRR_FIX_4K_E8000_MSR, | |
190 | MTRR_FIX_4K_F0000_MSR, MTRR_FIX_4K_F8000_MSR, | |
191 | }; | |
192 | ||
193 | static inline struct saved_msr *save_msr(int index, struct saved_msr *entry) | |
194 | { | |
195 | msr_t msr; | |
196 | ||
197 | msr = msr_read(index); | |
198 | entry->index = index; | |
199 | entry->lo = msr.lo; | |
200 | entry->hi = msr.hi; | |
201 | ||
202 | /* Return the next entry */ | |
203 | entry++; | |
204 | return entry; | |
205 | } | |
206 | ||
207 | static int save_bsp_msrs(char *start, int size) | |
208 | { | |
209 | int msr_count; | |
210 | int num_var_mtrrs; | |
211 | struct saved_msr *msr_entry; | |
212 | int i; | |
213 | msr_t msr; | |
214 | ||
215 | /* Determine number of MTRRs need to be saved */ | |
216 | msr = msr_read(MTRR_CAP_MSR); | |
217 | num_var_mtrrs = msr.lo & 0xff; | |
218 | ||
219 | /* 2 * num_var_mtrrs for base and mask. +1 for IA32_MTRR_DEF_TYPE */ | |
220 | msr_count = 2 * num_var_mtrrs + NUM_FIXED_MTRRS + 1; | |
221 | ||
222 | if ((msr_count * sizeof(struct saved_msr)) > size) { | |
2254e34c | 223 | printf("Cannot mirror all %d msrs\n", msr_count); |
45b5a378 SG |
224 | return -ENOSPC; |
225 | } | |
226 | ||
227 | msr_entry = (void *)start; | |
228 | for (i = 0; i < NUM_FIXED_MTRRS; i++) | |
229 | msr_entry = save_msr(fixed_mtrrs[i], msr_entry); | |
230 | ||
231 | for (i = 0; i < num_var_mtrrs; i++) { | |
232 | msr_entry = save_msr(MTRR_PHYS_BASE_MSR(i), msr_entry); | |
233 | msr_entry = save_msr(MTRR_PHYS_MASK_MSR(i), msr_entry); | |
234 | } | |
235 | ||
236 | msr_entry = save_msr(MTRR_DEF_TYPE_MSR, msr_entry); | |
237 | ||
238 | return msr_count; | |
239 | } | |
240 | ||
b28cecdf | 241 | static int load_sipi_vector(atomic_t **ap_countp, int num_cpus) |
45b5a378 SG |
242 | { |
243 | struct sipi_params_16bit *params16; | |
244 | struct sipi_params *params; | |
245 | static char msr_save[512]; | |
246 | char *stack; | |
247 | ulong addr; | |
248 | int code_len; | |
249 | int size; | |
250 | int ret; | |
251 | ||
252 | /* Copy in the code */ | |
253 | code_len = ap_start16_code_end - ap_start16; | |
254 | debug("Copying SIPI code to %x: %d bytes\n", AP_DEFAULT_BASE, | |
255 | code_len); | |
256 | memcpy((void *)AP_DEFAULT_BASE, ap_start16, code_len); | |
257 | ||
258 | addr = AP_DEFAULT_BASE + (ulong)sipi_params_16bit - (ulong)ap_start16; | |
259 | params16 = (struct sipi_params_16bit *)addr; | |
260 | params16->ap_start = (uint32_t)ap_start; | |
261 | params16->gdt = (uint32_t)gd->arch.gdt; | |
262 | params16->gdt_limit = X86_GDT_SIZE - 1; | |
263 | debug("gdt = %x, gdt_limit = %x\n", params16->gdt, params16->gdt_limit); | |
264 | ||
265 | params = (struct sipi_params *)sipi_params; | |
266 | debug("SIPI 32-bit params at %p\n", params); | |
267 | params->idt_ptr = (uint32_t)x86_get_idt(); | |
268 | ||
269 | params->stack_size = CONFIG_AP_STACK_SIZE; | |
b28cecdf | 270 | size = params->stack_size * num_cpus; |
4fd64d02 | 271 | stack = memalign(4096, size); |
45b5a378 SG |
272 | if (!stack) |
273 | return -ENOMEM; | |
274 | params->stack_top = (u32)(stack + size); | |
308c75e0 AS |
275 | #if !defined(CONFIG_QEMU) && !defined(CONFIG_HAVE_FSP) && \ |
276 | !defined(CONFIG_INTEL_MID) | |
e77b62e2 SG |
277 | params->microcode_ptr = ucode_base; |
278 | debug("Microcode at %x\n", params->microcode_ptr); | |
279 | #endif | |
45b5a378 SG |
280 | params->msr_table_ptr = (u32)msr_save; |
281 | ret = save_bsp_msrs(msr_save, sizeof(msr_save)); | |
282 | if (ret < 0) | |
283 | return ret; | |
284 | params->msr_count = ret; | |
285 | ||
286 | params->c_handler = (uint32_t)&ap_init; | |
287 | ||
288 | *ap_countp = ¶ms->ap_count; | |
289 | atomic_set(*ap_countp, 0); | |
290 | debug("SIPI vector is ready\n"); | |
291 | ||
292 | return 0; | |
293 | } | |
294 | ||
295 | static int check_cpu_devices(int expected_cpus) | |
296 | { | |
297 | int i; | |
298 | ||
299 | for (i = 0; i < expected_cpus; i++) { | |
300 | struct udevice *dev; | |
301 | int ret; | |
302 | ||
303 | ret = uclass_find_device(UCLASS_CPU, i, &dev); | |
304 | if (ret) { | |
305 | debug("Cannot find CPU %d in device tree\n", i); | |
306 | return ret; | |
307 | } | |
308 | } | |
309 | ||
310 | return 0; | |
311 | } | |
312 | ||
313 | /* Returns 1 for timeout. 0 on success */ | |
2254e34c | 314 | static int apic_wait_timeout(int total_delay, const char *msg) |
45b5a378 SG |
315 | { |
316 | int total = 0; | |
45b5a378 | 317 | |
2254e34c SG |
318 | if (!(lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) |
319 | return 0; | |
320 | ||
321 | debug("Waiting for %s...", msg); | |
45b5a378 | 322 | while (lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY) { |
2254e34c SG |
323 | udelay(50); |
324 | total += 50; | |
45b5a378 | 325 | if (total >= total_delay) { |
2254e34c SG |
326 | debug("timed out: aborting\n"); |
327 | return -ETIMEDOUT; | |
45b5a378 SG |
328 | } |
329 | } | |
2254e34c | 330 | debug("done\n"); |
45b5a378 | 331 | |
2254e34c | 332 | return 0; |
45b5a378 SG |
333 | } |
334 | ||
3a5752cc SG |
335 | /** |
336 | * start_aps() - Start up the APs and count how many we find | |
337 | * | |
338 | * This is called on the boot processor to start up all the other processors | |
339 | * (here called APs). | |
340 | * | |
341 | * @num_aps: Number of APs we expect to find | |
342 | * @ap_count: Initially zero. Incremented by this function for each AP found | |
343 | * @return 0 if all APs were set up correctly or there are none to set up, | |
344 | * -ENOSPC if the SIPI vector is too high in memory, | |
345 | * -ETIMEDOUT if the ICR is busy or the second SIPI fails to complete | |
346 | * -EIO if not all APs check in correctly | |
347 | */ | |
348 | static int start_aps(int num_aps, atomic_t *ap_count) | |
45b5a378 SG |
349 | { |
350 | int sipi_vector; | |
351 | /* Max location is 4KiB below 1MiB */ | |
352 | const int max_vector_loc = ((1 << 20) - (1 << 12)) >> 12; | |
353 | ||
3a5752cc | 354 | if (num_aps == 0) |
45b5a378 SG |
355 | return 0; |
356 | ||
357 | /* The vector is sent as a 4k aligned address in one byte */ | |
358 | sipi_vector = AP_DEFAULT_BASE >> 12; | |
359 | ||
360 | if (sipi_vector > max_vector_loc) { | |
361 | printf("SIPI vector too large! 0x%08x\n", | |
362 | sipi_vector); | |
7b140238 | 363 | return -ENOSPC; |
45b5a378 SG |
364 | } |
365 | ||
3a5752cc | 366 | debug("Attempting to start %d APs\n", num_aps); |
45b5a378 | 367 | |
2254e34c SG |
368 | if (apic_wait_timeout(1000, "ICR not to be busy")) |
369 | return -ETIMEDOUT; | |
45b5a378 SG |
370 | |
371 | /* Send INIT IPI to all but self */ | |
a2d73fdb BM |
372 | lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0)); |
373 | lapic_write(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT | | |
374 | LAPIC_DM_INIT); | |
2254e34c | 375 | debug("Waiting for 10ms after sending INIT\n"); |
45b5a378 SG |
376 | mdelay(10); |
377 | ||
378 | /* Send 1st SIPI */ | |
2254e34c SG |
379 | if (apic_wait_timeout(1000, "ICR not to be busy")) |
380 | return -ETIMEDOUT; | |
45b5a378 | 381 | |
a2d73fdb BM |
382 | lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0)); |
383 | lapic_write(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT | | |
384 | LAPIC_DM_STARTUP | sipi_vector); | |
2254e34c SG |
385 | if (apic_wait_timeout(10000, "first SIPI to complete")) |
386 | return -ETIMEDOUT; | |
45b5a378 SG |
387 | |
388 | /* Wait for CPUs to check in up to 200 us */ | |
3a5752cc | 389 | wait_for_aps(ap_count, num_aps, 200, 15); |
45b5a378 SG |
390 | |
391 | /* Send 2nd SIPI */ | |
2254e34c SG |
392 | if (apic_wait_timeout(1000, "ICR not to be busy")) |
393 | return -ETIMEDOUT; | |
45b5a378 | 394 | |
a2d73fdb BM |
395 | lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0)); |
396 | lapic_write(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT | | |
397 | LAPIC_DM_STARTUP | sipi_vector); | |
2254e34c SG |
398 | if (apic_wait_timeout(10000, "second SIPI to complete")) |
399 | return -ETIMEDOUT; | |
45b5a378 SG |
400 | |
401 | /* Wait for CPUs to check in */ | |
3a5752cc | 402 | if (wait_for_aps(ap_count, num_aps, 10000, 50)) { |
2254e34c | 403 | debug("Not all APs checked in: %d/%d\n", |
3a5752cc | 404 | atomic_read(ap_count), num_aps); |
7b140238 | 405 | return -EIO; |
45b5a378 SG |
406 | } |
407 | ||
408 | return 0; | |
409 | } | |
410 | ||
a6c9fd4d SG |
411 | /** |
412 | * bsp_do_flight_plan() - Do the flight plan on the BSP | |
413 | * | |
414 | * This runs the flight plan on the main CPU used to boot U-Boot | |
415 | * | |
416 | * @cpu: Device for the main CPU | |
417 | * @plan: Flight plan to run | |
418 | * @num_aps: Number of APs (CPUs other than the BSP) | |
419 | * @returns 0 on success, -ETIMEDOUT if an AP failed to come up | |
420 | */ | |
421 | static int bsp_do_flight_plan(struct udevice *cpu, struct mp_flight_plan *plan, | |
422 | int num_aps) | |
45b5a378 SG |
423 | { |
424 | int i; | |
425 | int ret = 0; | |
426 | const int timeout_us = 100000; | |
427 | const int step_us = 100; | |
45b5a378 | 428 | |
78d57d63 SG |
429 | for (i = 0; i < plan->num_records; i++) { |
430 | struct mp_flight_record *rec = &plan->records[i]; | |
45b5a378 SG |
431 | |
432 | /* Wait for APs if the record is not released */ | |
433 | if (atomic_read(&rec->barrier) == 0) { | |
434 | /* Wait for the APs to check in */ | |
435 | if (wait_for_aps(&rec->cpus_entered, num_aps, | |
436 | timeout_us, step_us)) { | |
2254e34c | 437 | debug("MP record %d timeout\n", i); |
7b140238 | 438 | ret = -ETIMEDOUT; |
45b5a378 SG |
439 | } |
440 | } | |
441 | ||
442 | if (rec->bsp_call != NULL) | |
443 | rec->bsp_call(cpu, rec->bsp_arg); | |
444 | ||
445 | release_barrier(&rec->barrier); | |
446 | } | |
a6c9fd4d | 447 | |
45b5a378 SG |
448 | return ret; |
449 | } | |
450 | ||
20b049e8 SG |
451 | /** |
452 | * get_bsp() - Get information about the bootstrap processor | |
453 | * | |
454 | * @devp: If non-NULL, returns CPU device corresponding to the BSP | |
455 | * @cpu_countp: If non-NULL, returns the total number of CPUs | |
456 | * @return CPU number of the BSP, or -ve on error. If multiprocessing is not | |
457 | * enabled, returns 0 | |
458 | */ | |
459 | static int get_bsp(struct udevice **devp, int *cpu_countp) | |
45b5a378 SG |
460 | { |
461 | char processor_name[CPU_MAX_NAME_LEN]; | |
20b049e8 | 462 | struct udevice *dev; |
45b5a378 SG |
463 | int apic_id; |
464 | int ret; | |
465 | ||
466 | cpu_get_name(processor_name); | |
2254e34c | 467 | debug("CPU: %s\n", processor_name); |
45b5a378 | 468 | |
45b5a378 | 469 | apic_id = lapicid(); |
20b049e8 SG |
470 | ret = find_cpu_by_apic_id(apic_id, &dev); |
471 | if (ret < 0) { | |
45b5a378 SG |
472 | printf("Cannot find boot CPU, APIC ID %d\n", apic_id); |
473 | return ret; | |
474 | } | |
20b049e8 SG |
475 | ret = cpu_get_count(dev); |
476 | if (ret < 0) | |
477 | return log_msg_ret("count", ret); | |
478 | if (devp) | |
479 | *devp = dev; | |
480 | if (cpu_countp) | |
481 | *cpu_countp = ret; | |
45b5a378 | 482 | |
20b049e8 | 483 | return dev->req_seq >= 0 ? dev->req_seq : 0; |
45b5a378 SG |
484 | } |
485 | ||
c33aa352 SG |
486 | /** |
487 | * read_callback() - Read the pointer in a callback slot | |
488 | * | |
489 | * This is called by APs to read their callback slot to see if there is a | |
490 | * pointer to new instructions | |
491 | * | |
492 | * @slot: Pointer to the AP's callback slot | |
493 | * @return value of that pointer | |
494 | */ | |
495 | static struct mp_callback *read_callback(struct mp_callback **slot) | |
496 | { | |
497 | dmb(); | |
498 | ||
499 | return *slot; | |
500 | } | |
501 | ||
502 | /** | |
503 | * store_callback() - Store a pointer to the callback slot | |
504 | * | |
505 | * This is called by APs to write NULL into the callback slot when they have | |
506 | * finished the work requested by the BSP. | |
507 | * | |
508 | * @slot: Pointer to the AP's callback slot | |
509 | * @val: Value to write (e.g. NULL) | |
510 | */ | |
511 | static void store_callback(struct mp_callback **slot, struct mp_callback *val) | |
512 | { | |
513 | *slot = val; | |
514 | dmb(); | |
515 | } | |
516 | ||
517 | /** | |
518 | * ap_wait_for_instruction() - Wait for and process requests from the main CPU | |
519 | * | |
520 | * This is called by APs (here, everything other than the main boot CPU) to | |
521 | * await instructions. They arrive in the form of a function call and argument, | |
522 | * which is then called. This uses a simple mailbox with atomic read/set | |
523 | * | |
524 | * @cpu: CPU that is waiting | |
525 | * @unused: Optional argument provided by struct mp_flight_record, not used here | |
526 | * @return Does not return | |
527 | */ | |
528 | static int ap_wait_for_instruction(struct udevice *cpu, void *unused) | |
529 | { | |
530 | struct mp_callback lcb; | |
531 | struct mp_callback **per_cpu_slot; | |
532 | ||
533 | if (!IS_ENABLED(CONFIG_SMP_AP_WORK)) | |
534 | return 0; | |
535 | ||
536 | per_cpu_slot = &ap_callbacks[cpu->req_seq]; | |
537 | ||
538 | while (1) { | |
539 | struct mp_callback *cb = read_callback(per_cpu_slot); | |
540 | ||
541 | if (!cb) { | |
542 | asm ("pause"); | |
543 | continue; | |
544 | } | |
545 | ||
546 | /* Copy to local variable before using the value */ | |
547 | memcpy(&lcb, cb, sizeof(lcb)); | |
548 | mfence(); | |
549 | if (lcb.logical_cpu_number == MP_SELECT_ALL || | |
550 | lcb.logical_cpu_number == MP_SELECT_APS || | |
551 | cpu->req_seq == lcb.logical_cpu_number) | |
552 | lcb.func(lcb.arg); | |
553 | ||
554 | /* Indicate we are finished */ | |
555 | store_callback(per_cpu_slot, NULL); | |
556 | } | |
557 | ||
558 | return 0; | |
559 | } | |
560 | ||
e6248584 SG |
561 | static int mp_init_cpu(struct udevice *cpu, void *unused) |
562 | { | |
563 | struct cpu_platdata *plat = dev_get_parent_platdata(cpu); | |
564 | ||
e6248584 SG |
565 | plat->ucode_version = microcode_read_rev(); |
566 | plat->device_id = gd->arch.x86_device; | |
567 | ||
568 | return device_probe(cpu); | |
569 | } | |
570 | ||
571 | static struct mp_flight_record mp_steps[] = { | |
572 | MP_FR_BLOCK_APS(mp_init_cpu, NULL, mp_init_cpu, NULL), | |
c33aa352 | 573 | MP_FR_BLOCK_APS(ap_wait_for_instruction, NULL, NULL, NULL), |
e6248584 SG |
574 | }; |
575 | ||
78d57d63 | 576 | int mp_init(void) |
45b5a378 | 577 | { |
a6c9fd4d | 578 | int num_aps, num_cpus; |
45b5a378 SG |
579 | atomic_t *ap_count; |
580 | struct udevice *cpu; | |
45b5a378 | 581 | struct uclass *uc; |
77a5e2d3 | 582 | int ret; |
45b5a378 | 583 | |
baaeb92c SG |
584 | if (IS_ENABLED(CONFIG_QFW)) { |
585 | ret = qemu_cpu_fixup(); | |
586 | if (ret) | |
587 | return ret; | |
588 | } | |
de752c5e | 589 | |
77a5e2d3 SG |
590 | /* |
591 | * Multiple APs are brought up simultaneously and they may get the same | |
592 | * seq num in the uclass_resolve_seq() during device_probe(). To avoid | |
593 | * this, set req_seq to the reg number in the device tree in advance. | |
594 | */ | |
595 | uclass_id_foreach_dev(UCLASS_CPU, cpu, uc) | |
596 | cpu->req_seq = dev_read_u32_default(cpu, "reg", -1); | |
597 | ||
20b049e8 SG |
598 | ret = get_bsp(&cpu, &num_cpus); |
599 | if (ret < 0) { | |
45b5a378 SG |
600 | debug("Cannot init boot CPU: err=%d\n", ret); |
601 | return ret; | |
602 | } | |
603 | ||
6e6f4ce4 BM |
604 | if (num_cpus < 2) |
605 | debug("Warning: Only 1 CPU is detected\n"); | |
606 | ||
607 | ret = check_cpu_devices(num_cpus); | |
45b5a378 | 608 | if (ret) |
20b049e8 | 609 | log_warning("Warning: Device tree does not describe all CPUs. Extra ones will not be started correctly\n"); |
45b5a378 | 610 | |
c33aa352 SG |
611 | ap_callbacks = calloc(num_cpus, sizeof(struct mp_callback *)); |
612 | if (!ap_callbacks) | |
613 | return -ENOMEM; | |
614 | ||
45b5a378 | 615 | /* Copy needed parameters so that APs have a reference to the plan */ |
78d57d63 SG |
616 | mp_info.num_records = ARRAY_SIZE(mp_steps); |
617 | mp_info.records = mp_steps; | |
45b5a378 SG |
618 | |
619 | /* Load the SIPI vector */ | |
b28cecdf | 620 | ret = load_sipi_vector(&ap_count, num_cpus); |
45b5a378 | 621 | if (ap_count == NULL) |
7b140238 | 622 | return -ENOENT; |
45b5a378 SG |
623 | |
624 | /* | |
625 | * Make sure SIPI data hits RAM so the APs that come up will see | |
626 | * the startup code even if the caches are disabled | |
627 | */ | |
628 | wbinvd(); | |
629 | ||
630 | /* Start the APs providing number of APs and the cpus_entered field */ | |
6e6f4ce4 | 631 | num_aps = num_cpus - 1; |
45b5a378 SG |
632 | ret = start_aps(num_aps, ap_count); |
633 | if (ret) { | |
634 | mdelay(1000); | |
635 | debug("%d/%d eventually checked in?\n", atomic_read(ap_count), | |
636 | num_aps); | |
637 | return ret; | |
638 | } | |
639 | ||
640 | /* Walk the flight plan for the BSP */ | |
a6c9fd4d | 641 | ret = bsp_do_flight_plan(cpu, &mp_info, num_aps); |
45b5a378 SG |
642 | if (ret) { |
643 | debug("CPU init failed: err=%d\n", ret); | |
644 | return ret; | |
645 | } | |
646 | ||
647 | return 0; | |
648 | } |