]>
Commit | Line | Data |
---|---|---|
40ef8cbc PM |
1 | /* |
2 | * | |
3 | * Common boot and setup code. | |
4 | * | |
5 | * Copyright (C) 2001 PPC64 Team, IBM Corp | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | ||
4b16f8e2 | 13 | #include <linux/export.h> |
40ef8cbc PM |
14 | #include <linux/string.h> |
15 | #include <linux/sched.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/reboot.h> | |
19 | #include <linux/delay.h> | |
20 | #include <linux/initrd.h> | |
40ef8cbc PM |
21 | #include <linux/seq_file.h> |
22 | #include <linux/ioport.h> | |
23 | #include <linux/console.h> | |
24 | #include <linux/utsname.h> | |
25 | #include <linux/tty.h> | |
26 | #include <linux/root_dev.h> | |
27 | #include <linux/notifier.h> | |
28 | #include <linux/cpu.h> | |
29 | #include <linux/unistd.h> | |
30 | #include <linux/serial.h> | |
31 | #include <linux/serial_8250.h> | |
7a0268fa | 32 | #include <linux/bootmem.h> |
12d04eef | 33 | #include <linux/pci.h> |
945feb17 | 34 | #include <linux/lockdep.h> |
95f72d1e | 35 | #include <linux/memblock.h> |
a5d86257 | 36 | #include <linux/memory.h> |
c54b2bf1 | 37 | #include <linux/nmi.h> |
a6146888 | 38 | |
236003e6 | 39 | #include <asm/debugfs.h> |
40ef8cbc | 40 | #include <asm/io.h> |
0cc4746c | 41 | #include <asm/kdump.h> |
40ef8cbc PM |
42 | #include <asm/prom.h> |
43 | #include <asm/processor.h> | |
44 | #include <asm/pgtable.h> | |
40ef8cbc PM |
45 | #include <asm/smp.h> |
46 | #include <asm/elf.h> | |
47 | #include <asm/machdep.h> | |
48 | #include <asm/paca.h> | |
40ef8cbc PM |
49 | #include <asm/time.h> |
50 | #include <asm/cputable.h> | |
5a61ef74 | 51 | #include <asm/dt_cpu_ftrs.h> |
40ef8cbc PM |
52 | #include <asm/sections.h> |
53 | #include <asm/btext.h> | |
54 | #include <asm/nvram.h> | |
55 | #include <asm/setup.h> | |
40ef8cbc PM |
56 | #include <asm/rtas.h> |
57 | #include <asm/iommu.h> | |
58 | #include <asm/serial.h> | |
59 | #include <asm/cache.h> | |
60 | #include <asm/page.h> | |
61 | #include <asm/mmu.h> | |
40ef8cbc | 62 | #include <asm/firmware.h> |
f78541dc | 63 | #include <asm/xmon.h> |
dcad47fc | 64 | #include <asm/udbg.h> |
593e537b | 65 | #include <asm/kexec.h> |
d36b4c4f | 66 | #include <asm/code-patching.h> |
5d31a96e | 67 | #include <asm/livepatch.h> |
d3cbff1b | 68 | #include <asm/opal.h> |
b1923caa | 69 | #include <asm/cputhreads.h> |
c2e480ba | 70 | #include <asm/hw_irq.h> |
40ef8cbc | 71 | |
1696d0fb NP |
72 | #include "setup.h" |
73 | ||
40ef8cbc PM |
74 | #ifdef DEBUG |
75 | #define DBG(fmt...) udbg_printf(fmt) | |
76 | #else | |
77 | #define DBG(fmt...) | |
78 | #endif | |
79 | ||
8246aca7 | 80 | int spinning_secondaries; |
40ef8cbc PM |
81 | u64 ppc64_pft_size; |
82 | ||
dabcafd3 | 83 | struct ppc64_caches ppc64_caches = { |
e2827fe5 BH |
84 | .l1d = { |
85 | .block_size = 0x40, | |
86 | .log_block_size = 6, | |
87 | }, | |
88 | .l1i = { | |
89 | .block_size = 0x40, | |
90 | .log_block_size = 6 | |
91 | }, | |
dabcafd3 | 92 | }; |
40ef8cbc PM |
93 | EXPORT_SYMBOL_GPL(ppc64_caches); |
94 | ||
28efc35f | 95 | #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP) |
b1923caa | 96 | void __init setup_tlb_core_data(void) |
28efc35f SW |
97 | { |
98 | int cpu; | |
99 | ||
82d86de2 SW |
100 | BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0); |
101 | ||
28efc35f SW |
102 | for_each_possible_cpu(cpu) { |
103 | int first = cpu_first_thread_sibling(cpu); | |
104 | ||
d9e1831a SW |
105 | /* |
106 | * If we boot via kdump on a non-primary thread, | |
107 | * make sure we point at the thread that actually | |
108 | * set up this TLB. | |
109 | */ | |
110 | if (cpu_first_thread_sibling(boot_cpuid) == first) | |
111 | first = boot_cpuid; | |
112 | ||
d2e60075 | 113 | paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd; |
28efc35f SW |
114 | |
115 | /* | |
116 | * If we have threads, we need either tlbsrx. | |
117 | * or e6500 tablewalk mode, or else TLB handlers | |
118 | * will be racy and could produce duplicate entries. | |
0d2b5cdc | 119 | * Should we panic instead? |
28efc35f | 120 | */ |
0d2b5cdc ME |
121 | WARN_ONCE(smt_enabled_at_boot >= 2 && |
122 | !mmu_has_feature(MMU_FTR_USE_TLBRSRV) && | |
123 | book3e_htw_mode != PPC_HTW_E6500, | |
124 | "%s: unsupported MMU configuration\n", __func__); | |
28efc35f SW |
125 | } |
126 | } | |
28efc35f SW |
127 | #endif |
128 | ||
40ef8cbc PM |
129 | #ifdef CONFIG_SMP |
130 | ||
954e6da5 | 131 | static char *smt_enabled_cmdline; |
40ef8cbc PM |
132 | |
133 | /* Look for ibm,smt-enabled OF option */ | |
b1923caa | 134 | void __init check_smt_enabled(void) |
40ef8cbc PM |
135 | { |
136 | struct device_node *dn; | |
a7f67bdf | 137 | const char *smt_option; |
40ef8cbc | 138 | |
954e6da5 NF |
139 | /* Default to enabling all threads */ |
140 | smt_enabled_at_boot = threads_per_core; | |
40ef8cbc | 141 | |
954e6da5 NF |
142 | /* Allow the command line to overrule the OF option */ |
143 | if (smt_enabled_cmdline) { | |
144 | if (!strcmp(smt_enabled_cmdline, "on")) | |
145 | smt_enabled_at_boot = threads_per_core; | |
146 | else if (!strcmp(smt_enabled_cmdline, "off")) | |
147 | smt_enabled_at_boot = 0; | |
148 | else { | |
1618bd53 | 149 | int smt; |
954e6da5 NF |
150 | int rc; |
151 | ||
1618bd53 | 152 | rc = kstrtoint(smt_enabled_cmdline, 10, &smt); |
954e6da5 NF |
153 | if (!rc) |
154 | smt_enabled_at_boot = | |
1618bd53 | 155 | min(threads_per_core, smt); |
954e6da5 NF |
156 | } |
157 | } else { | |
158 | dn = of_find_node_by_path("/options"); | |
159 | if (dn) { | |
160 | smt_option = of_get_property(dn, "ibm,smt-enabled", | |
161 | NULL); | |
162 | ||
163 | if (smt_option) { | |
164 | if (!strcmp(smt_option, "on")) | |
165 | smt_enabled_at_boot = threads_per_core; | |
166 | else if (!strcmp(smt_option, "off")) | |
167 | smt_enabled_at_boot = 0; | |
168 | } | |
169 | ||
170 | of_node_put(dn); | |
171 | } | |
172 | } | |
40ef8cbc PM |
173 | } |
174 | ||
175 | /* Look for smt-enabled= cmdline option */ | |
176 | static int __init early_smt_enabled(char *p) | |
177 | { | |
954e6da5 | 178 | smt_enabled_cmdline = p; |
40ef8cbc PM |
179 | return 0; |
180 | } | |
181 | early_param("smt-enabled", early_smt_enabled); | |
182 | ||
40ef8cbc PM |
183 | #endif /* CONFIG_SMP */ |
184 | ||
25e13814 | 185 | /** Fix up paca fields required for the boot cpu */ |
009776ba | 186 | static void __init fixup_boot_paca(void) |
25e13814 ME |
187 | { |
188 | /* The boot cpu is started */ | |
189 | get_paca()->cpu_start = 1; | |
190 | /* Allow percpu accesses to work until we setup percpu data */ | |
191 | get_paca()->data_offset = 0; | |
c2e480ba | 192 | /* Mark interrupts disabled in PACA */ |
4e26bc4a | 193 | irq_soft_mask_set(IRQS_DISABLED); |
25e13814 ME |
194 | } |
195 | ||
009776ba | 196 | static void __init configure_exceptions(void) |
8f619b54 | 197 | { |
633440f1 | 198 | /* |
d3cbff1b BH |
199 | * Setup the trampolines from the lowmem exception vectors |
200 | * to the kdump kernel when not using a relocatable kernel. | |
633440f1 | 201 | */ |
d3cbff1b BH |
202 | setup_kdump_trampoline(); |
203 | ||
204 | /* Under a PAPR hypervisor, we need hypercalls */ | |
205 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { | |
206 | /* Enable AIL if possible */ | |
207 | pseries_enable_reloc_on_exc(); | |
208 | ||
209 | /* | |
210 | * Tell the hypervisor that we want our exceptions to | |
211 | * be taken in little endian mode. | |
212 | * | |
213 | * We don't call this for big endian as our calling convention | |
214 | * makes us always enter in BE, and the call may fail under | |
215 | * some circumstances with kdump. | |
216 | */ | |
217 | #ifdef __LITTLE_ENDIAN__ | |
218 | pseries_little_endian_exceptions(); | |
219 | #endif | |
220 | } else { | |
221 | /* Set endian mode using OPAL */ | |
222 | if (firmware_has_feature(FW_FEATURE_OPAL)) | |
223 | opal_configure_cores(); | |
224 | ||
c0a36013 | 225 | /* AIL on native is done in cpu_ready_for_interrupts() */ |
8f619b54 BH |
226 | } |
227 | } | |
228 | ||
d3cbff1b BH |
229 | static void cpu_ready_for_interrupts(void) |
230 | { | |
c0a36013 BH |
231 | /* |
232 | * Enable AIL if supported, and we are in hypervisor mode. This | |
233 | * is called once for every processor. | |
234 | * | |
235 | * If we are not in hypervisor mode the job is done once for | |
236 | * the whole partition in configure_exceptions(). | |
237 | */ | |
5511a45f ME |
238 | if (cpu_has_feature(CPU_FTR_HVMODE) && |
239 | cpu_has_feature(CPU_FTR_ARCH_207S)) { | |
c0a36013 BH |
240 | unsigned long lpcr = mfspr(SPRN_LPCR); |
241 | mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); | |
242 | } | |
243 | ||
7ed23e1b BH |
244 | /* |
245 | * Fixup HFSCR:TM based on CPU features. The bit is set by our | |
246 | * early asm init because at that point we haven't updated our | |
247 | * CPU features from firmware and device-tree. Here we have, | |
248 | * so let's do it. | |
249 | */ | |
250 | if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP)) | |
251 | mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM); | |
252 | ||
d3cbff1b BH |
253 | /* Set IR and DR in PACA MSR */ |
254 | get_paca()->kernel_msr = MSR_KERNEL; | |
255 | } | |
256 | ||
c0abd0c7 NP |
257 | unsigned long spr_default_dscr = 0; |
258 | ||
259 | void __init record_spr_defaults(void) | |
260 | { | |
261 | if (early_cpu_has_feature(CPU_FTR_DSCR)) | |
262 | spr_default_dscr = mfspr(SPRN_DSCR); | |
263 | } | |
264 | ||
40ef8cbc PM |
265 | /* |
266 | * Early initialization entry point. This is called by head.S | |
267 | * with MMU translation disabled. We rely on the "feature" of | |
268 | * the CPU that ignores the top 2 bits of the address in real | |
269 | * mode so we can access kernel globals normally provided we | |
270 | * only toy with things in the RMO region. From here, we do | |
95f72d1e | 271 | * some early parsing of the device-tree to setup out MEMBLOCK |
40ef8cbc PM |
272 | * data structures, and allocate & initialize the hash table |
273 | * and segment tables so we can start running with translation | |
274 | * enabled. | |
275 | * | |
276 | * It is this function which will call the probe() callback of | |
277 | * the various platform types and copy the matching one to the | |
278 | * global ppc_md structure. Your platform can eventually do | |
279 | * some very early initializations from the probe() routine, but | |
280 | * this is not recommended, be very careful as, for example, the | |
281 | * device-tree is not accessible via normal means at this point. | |
282 | */ | |
283 | ||
284 | void __init early_setup(unsigned long dt_ptr) | |
285 | { | |
6a7e4064 GL |
286 | static __initdata struct paca_struct boot_paca; |
287 | ||
24d96495 BH |
288 | /* -------- printk is _NOT_ safe to use here ! ------- */ |
289 | ||
5a61ef74 NP |
290 | /* Try new device tree based feature discovery ... */ |
291 | if (!dt_cpu_ftrs_init(__va(dt_ptr))) | |
292 | /* Otherwise use the old style CPU table */ | |
293 | identify_cpu(0, mfspr(SPRN_PVR)); | |
42c4aaad | 294 | |
33dbcf72 | 295 | /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ |
1426d5a3 ME |
296 | initialise_paca(&boot_paca, 0); |
297 | setup_paca(&boot_paca); | |
25e13814 | 298 | fixup_boot_paca(); |
33dbcf72 | 299 | |
24d96495 BH |
300 | /* -------- printk is now safe to use ------- */ |
301 | ||
f2fd2513 BH |
302 | /* Enable early debugging if any specified (see udbg.h) */ |
303 | udbg_early_init(); | |
304 | ||
e8222502 | 305 | DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); |
40ef8cbc | 306 | |
40ef8cbc | 307 | /* |
3c607ce2 LV |
308 | * Do early initialization using the flattened device |
309 | * tree, such as retrieving the physical memory map or | |
310 | * calculating/retrieving the hash table size. | |
40ef8cbc PM |
311 | */ |
312 | early_init_devtree(__va(dt_ptr)); | |
313 | ||
4df20460 | 314 | /* Now we know the logical id of our boot cpu, setup the paca. */ |
d2e60075 | 315 | setup_paca(paca_ptrs[boot_cpuid]); |
25e13814 | 316 | fixup_boot_paca(); |
4df20460 | 317 | |
63c254a5 | 318 | /* |
d3cbff1b BH |
319 | * Configure exception handlers. This include setting up trampolines |
320 | * if needed, setting exception endian mode, etc... | |
63c254a5 | 321 | */ |
d3cbff1b | 322 | configure_exceptions(); |
0cc4746c | 323 | |
c4bd6cb8 BH |
324 | /* Apply all the dynamic patching */ |
325 | apply_feature_fixups(); | |
97f6e0cc | 326 | setup_feature_keys(); |
c4bd6cb8 | 327 | |
9e8066f3 ME |
328 | /* Initialize the hash table or TLB handling */ |
329 | early_init_mmu(); | |
330 | ||
1696d0fb NP |
331 | /* |
332 | * After firmware and early platform setup code has set things up, | |
333 | * we note the SPR values for configurable control/performance | |
334 | * registers, and use those as initial defaults. | |
335 | */ | |
336 | record_spr_defaults(); | |
337 | ||
a944a9c4 BH |
338 | /* |
339 | * At this point, we can let interrupts switch to virtual mode | |
340 | * (the MMU has been setup), so adjust the MSR in the PACA to | |
8f619b54 | 341 | * have IR and DR set and enable AIL if it exists |
a944a9c4 | 342 | */ |
8f619b54 | 343 | cpu_ready_for_interrupts(); |
a944a9c4 | 344 | |
40ef8cbc | 345 | DBG(" <- early_setup()\n"); |
7191b615 BH |
346 | |
347 | #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX | |
348 | /* | |
349 | * This needs to be done *last* (after the above DBG() even) | |
350 | * | |
351 | * Right after we return from this function, we turn on the MMU | |
352 | * which means the real-mode access trick that btext does will | |
353 | * no longer work, it needs to switch to using a real MMU | |
354 | * mapping. This call will ensure that it does | |
355 | */ | |
356 | btext_map(); | |
357 | #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ | |
40ef8cbc PM |
358 | } |
359 | ||
799d6046 PM |
360 | #ifdef CONFIG_SMP |
361 | void early_setup_secondary(void) | |
362 | { | |
103b7827 | 363 | /* Mark interrupts disabled in PACA */ |
4e26bc4a | 364 | irq_soft_mask_set(IRQS_DISABLED); |
799d6046 | 365 | |
757c74d2 BH |
366 | /* Initialize the hash table or TLB handling */ |
367 | early_init_mmu_secondary(); | |
a944a9c4 BH |
368 | |
369 | /* | |
370 | * At this point, we can let interrupts switch to virtual mode | |
371 | * (the MMU has been setup), so adjust the MSR in the PACA to | |
372 | * have IR and DR set. | |
373 | */ | |
8f619b54 | 374 | cpu_ready_for_interrupts(); |
799d6046 PM |
375 | } |
376 | ||
377 | #endif /* CONFIG_SMP */ | |
40ef8cbc | 378 | |
da665885 | 379 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE) |
567cf94d SW |
380 | static bool use_spinloop(void) |
381 | { | |
339a3293 NP |
382 | if (IS_ENABLED(CONFIG_PPC_BOOK3S)) { |
383 | /* | |
384 | * See comments in head_64.S -- not all platforms insert | |
385 | * secondaries at __secondary_hold and wait at the spin | |
386 | * loop. | |
387 | */ | |
388 | if (firmware_has_feature(FW_FEATURE_OPAL)) | |
389 | return false; | |
567cf94d | 390 | return true; |
339a3293 | 391 | } |
567cf94d SW |
392 | |
393 | /* | |
394 | * When book3e boots from kexec, the ePAPR spin table does | |
395 | * not get used. | |
396 | */ | |
397 | return of_property_read_bool(of_chosen, "linux,booted-from-kexec"); | |
398 | } | |
399 | ||
b8f51021 ME |
400 | void smp_release_cpus(void) |
401 | { | |
758438a7 | 402 | unsigned long *ptr; |
9d07bc84 | 403 | int i; |
b8f51021 | 404 | |
567cf94d SW |
405 | if (!use_spinloop()) |
406 | return; | |
407 | ||
b8f51021 ME |
408 | DBG(" -> smp_release_cpus()\n"); |
409 | ||
410 | /* All secondary cpus are spinning on a common spinloop, release them | |
411 | * all now so they can start to spin on their individual paca | |
412 | * spinloops. For non SMP kernels, the secondary cpus never get out | |
413 | * of the common spinloop. | |
1f6a93e4 | 414 | */ |
b8f51021 | 415 | |
758438a7 ME |
416 | ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop |
417 | - PHYSICAL_START); | |
2751b628 | 418 | *ptr = ppc_function_entry(generic_secondary_smp_init); |
9d07bc84 BH |
419 | |
420 | /* And wait a bit for them to catch up */ | |
421 | for (i = 0; i < 100000; i++) { | |
422 | mb(); | |
423 | HMT_low(); | |
7ac87abb | 424 | if (spinning_secondaries == 0) |
9d07bc84 BH |
425 | break; |
426 | udelay(1); | |
427 | } | |
7ac87abb | 428 | DBG("spinning_secondaries = %d\n", spinning_secondaries); |
b8f51021 ME |
429 | |
430 | DBG(" <- smp_release_cpus()\n"); | |
431 | } | |
da665885 | 432 | #endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */ |
b8f51021 | 433 | |
40ef8cbc | 434 | /* |
799d6046 PM |
435 | * Initialize some remaining members of the ppc64_caches and systemcfg |
436 | * structures | |
40ef8cbc PM |
437 | * (at least until we get rid of them completely). This is mostly some |
438 | * cache informations about the CPU that will be used by cache flush | |
439 | * routines and/or provided to userland | |
440 | */ | |
e2827fe5 BH |
441 | |
442 | static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize, | |
443 | u32 bsize, u32 sets) | |
444 | { | |
445 | info->size = size; | |
446 | info->sets = sets; | |
447 | info->line_size = lsize; | |
448 | info->block_size = bsize; | |
449 | info->log_block_size = __ilog2(bsize); | |
6ba422c7 AB |
450 | if (bsize) |
451 | info->blocks_per_page = PAGE_SIZE / bsize; | |
452 | else | |
453 | info->blocks_per_page = 0; | |
98a5f361 BH |
454 | |
455 | if (sets == 0) | |
456 | info->assoc = 0xffff; | |
457 | else | |
458 | info->assoc = size / (sets * lsize); | |
e2827fe5 BH |
459 | } |
460 | ||
461 | static bool __init parse_cache_info(struct device_node *np, | |
462 | bool icache, | |
463 | struct ppc_cache_info *info) | |
464 | { | |
465 | static const char *ipropnames[] __initdata = { | |
466 | "i-cache-size", | |
467 | "i-cache-sets", | |
468 | "i-cache-block-size", | |
469 | "i-cache-line-size", | |
470 | }; | |
471 | static const char *dpropnames[] __initdata = { | |
472 | "d-cache-size", | |
473 | "d-cache-sets", | |
474 | "d-cache-block-size", | |
475 | "d-cache-line-size", | |
476 | }; | |
477 | const char **propnames = icache ? ipropnames : dpropnames; | |
478 | const __be32 *sizep, *lsizep, *bsizep, *setsp; | |
479 | u32 size, lsize, bsize, sets; | |
480 | bool success = true; | |
481 | ||
482 | size = 0; | |
483 | sets = -1u; | |
484 | lsize = bsize = cur_cpu_spec->dcache_bsize; | |
485 | sizep = of_get_property(np, propnames[0], NULL); | |
486 | if (sizep != NULL) | |
487 | size = be32_to_cpu(*sizep); | |
488 | setsp = of_get_property(np, propnames[1], NULL); | |
489 | if (setsp != NULL) | |
490 | sets = be32_to_cpu(*setsp); | |
491 | bsizep = of_get_property(np, propnames[2], NULL); | |
492 | lsizep = of_get_property(np, propnames[3], NULL); | |
493 | if (bsizep == NULL) | |
494 | bsizep = lsizep; | |
495 | if (lsizep != NULL) | |
496 | lsize = be32_to_cpu(*lsizep); | |
497 | if (bsizep != NULL) | |
498 | bsize = be32_to_cpu(*bsizep); | |
499 | if (sizep == NULL || bsizep == NULL || lsizep == NULL) | |
500 | success = false; | |
501 | ||
502 | /* | |
503 | * OF is weird .. it represents fully associative caches | |
504 | * as "1 way" which doesn't make much sense and doesn't | |
505 | * leave room for direct mapped. We'll assume that 0 | |
506 | * in OF means direct mapped for that reason. | |
507 | */ | |
508 | if (sets == 1) | |
509 | sets = 0; | |
510 | else if (sets == 0) | |
511 | sets = 1; | |
512 | ||
513 | init_cache_info(info, size, lsize, bsize, sets); | |
514 | ||
515 | return success; | |
516 | } | |
517 | ||
b1923caa | 518 | void __init initialize_cache_info(void) |
40ef8cbc | 519 | { |
608b4214 BH |
520 | struct device_node *cpu = NULL, *l2, *l3 = NULL; |
521 | u32 pvr; | |
40ef8cbc PM |
522 | |
523 | DBG(" -> initialize_cache_info()\n"); | |
524 | ||
608b4214 BH |
525 | /* |
526 | * All shipping POWER8 machines have a firmware bug that | |
527 | * puts incorrect information in the device-tree. This will | |
528 | * be (hopefully) fixed for future chips but for now hard | |
529 | * code the values if we are running on one of these | |
530 | */ | |
531 | pvr = PVR_VER(mfspr(SPRN_PVR)); | |
532 | if (pvr == PVR_POWER8 || pvr == PVR_POWER8E || | |
533 | pvr == PVR_POWER8NVL) { | |
534 | /* size lsize blk sets */ | |
535 | init_cache_info(&ppc64_caches.l1i, 0x8000, 128, 128, 32); | |
536 | init_cache_info(&ppc64_caches.l1d, 0x10000, 128, 128, 64); | |
537 | init_cache_info(&ppc64_caches.l2, 0x80000, 128, 0, 512); | |
538 | init_cache_info(&ppc64_caches.l3, 0x800000, 128, 0, 8192); | |
539 | } else | |
540 | cpu = of_find_node_by_type(NULL, "cpu"); | |
40ef8cbc | 541 | |
e2827fe5 BH |
542 | /* |
543 | * We're assuming *all* of the CPUs have the same | |
544 | * d-cache and i-cache sizes... -Peter | |
545 | */ | |
65e01f38 BH |
546 | if (cpu) { |
547 | if (!parse_cache_info(cpu, false, &ppc64_caches.l1d)) | |
e2827fe5 BH |
548 | DBG("Argh, can't find dcache properties !\n"); |
549 | ||
65e01f38 | 550 | if (!parse_cache_info(cpu, true, &ppc64_caches.l1i)) |
e2827fe5 | 551 | DBG("Argh, can't find icache properties !\n"); |
65e01f38 BH |
552 | |
553 | /* | |
554 | * Try to find the L2 and L3 if any. Assume they are | |
555 | * unified and use the D-side properties. | |
556 | */ | |
557 | l2 = of_find_next_cache_node(cpu); | |
558 | of_node_put(cpu); | |
559 | if (l2) { | |
560 | parse_cache_info(l2, false, &ppc64_caches.l2); | |
561 | l3 = of_find_next_cache_node(l2); | |
562 | of_node_put(l2); | |
563 | } | |
564 | if (l3) { | |
565 | parse_cache_info(l3, false, &ppc64_caches.l3); | |
566 | of_node_put(l3); | |
567 | } | |
40ef8cbc PM |
568 | } |
569 | ||
9df549af | 570 | /* For use by binfmt_elf */ |
e2827fe5 BH |
571 | dcache_bsize = ppc64_caches.l1d.block_size; |
572 | icache_bsize = ppc64_caches.l1i.block_size; | |
9df549af | 573 | |
5a61ef74 NP |
574 | cur_cpu_spec->dcache_bsize = dcache_bsize; |
575 | cur_cpu_spec->icache_bsize = icache_bsize; | |
576 | ||
40ef8cbc PM |
577 | DBG(" <- initialize_cache_info()\n"); |
578 | } | |
579 | ||
1af19331 NP |
580 | /* |
581 | * This returns the limit below which memory accesses to the linear | |
582 | * mapping are guarnateed not to cause an architectural exception (e.g., | |
583 | * TLB or SLB miss fault). | |
584 | * | |
585 | * This is used to allocate PACAs and various interrupt stacks that | |
586 | * that are accessed early in interrupt handlers that must not cause | |
587 | * re-entrant interrupts. | |
40bd587a | 588 | */ |
1af19331 | 589 | __init u64 ppc64_bolted_size(void) |
095c7965 | 590 | { |
40bd587a BH |
591 | #ifdef CONFIG_PPC_BOOK3E |
592 | /* Freescale BookE bolts the entire linear mapping */ | |
1af19331 NP |
593 | /* XXX: BookE ppc64_rma_limit setup seems to disagree? */ |
594 | if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) | |
40bd587a BH |
595 | return linear_map_top; |
596 | /* Other BookE, we assume the first GB is bolted */ | |
597 | return 1ul << 30; | |
598 | #else | |
1af19331 | 599 | /* BookS radix, does not take faults on linear mapping */ |
d5507190 NP |
600 | if (early_radix_enabled()) |
601 | return ULONG_MAX; | |
602 | ||
1af19331 NP |
603 | /* BookS hash, the first segment is bolted */ |
604 | if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT)) | |
095c7965 | 605 | return 1UL << SID_SHIFT_1T; |
095c7965 | 606 | return 1UL << SID_SHIFT; |
40bd587a | 607 | #endif |
095c7965 AB |
608 | } |
609 | ||
b1923caa | 610 | void __init irqstack_early_init(void) |
40ef8cbc | 611 | { |
1af19331 | 612 | u64 limit = ppc64_bolted_size(); |
40ef8cbc PM |
613 | unsigned int i; |
614 | ||
615 | /* | |
8f4da26e | 616 | * Interrupt stacks must be in the first segment since we |
d5507190 NP |
617 | * cannot afford to take SLB misses on them. They are not |
618 | * accessed in realmode. | |
40ef8cbc | 619 | */ |
0e551954 | 620 | for_each_possible_cpu(i) { |
3c726f8d | 621 | softirq_ctx[i] = (struct thread_info *) |
95f72d1e | 622 | __va(memblock_alloc_base(THREAD_SIZE, |
095c7965 | 623 | THREAD_SIZE, limit)); |
3c726f8d | 624 | hardirq_ctx[i] = (struct thread_info *) |
95f72d1e | 625 | __va(memblock_alloc_base(THREAD_SIZE, |
095c7965 | 626 | THREAD_SIZE, limit)); |
40ef8cbc PM |
627 | } |
628 | } | |
40ef8cbc | 629 | |
2d27cfd3 | 630 | #ifdef CONFIG_PPC_BOOK3E |
b1923caa | 631 | void __init exc_lvl_early_init(void) |
2d27cfd3 BH |
632 | { |
633 | unsigned int i; | |
160c7324 | 634 | unsigned long sp; |
2d27cfd3 BH |
635 | |
636 | for_each_possible_cpu(i) { | |
160c7324 TC |
637 | sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE); |
638 | critirq_ctx[i] = (struct thread_info *)__va(sp); | |
d2e60075 | 639 | paca_ptrs[i]->crit_kstack = __va(sp + THREAD_SIZE); |
160c7324 TC |
640 | |
641 | sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE); | |
642 | dbgirq_ctx[i] = (struct thread_info *)__va(sp); | |
d2e60075 | 643 | paca_ptrs[i]->dbg_kstack = __va(sp + THREAD_SIZE); |
160c7324 TC |
644 | |
645 | sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE); | |
646 | mcheckirq_ctx[i] = (struct thread_info *)__va(sp); | |
d2e60075 | 647 | paca_ptrs[i]->mc_kstack = __va(sp + THREAD_SIZE); |
2d27cfd3 | 648 | } |
d36b4c4f KG |
649 | |
650 | if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) | |
565c2f24 | 651 | patch_exception(0x040, exc_debug_debug_book3e); |
2d27cfd3 | 652 | } |
2d27cfd3 BH |
653 | #endif |
654 | ||
34f19ff1 NP |
655 | /* |
656 | * Emergency stacks are used for a range of things, from asynchronous | |
657 | * NMIs (system reset, machine check) to synchronous, process context. | |
658 | * We set preempt_count to zero, even though that isn't necessarily correct. To | |
659 | * get the right value we'd need to copy it from the previous thread_info, but | |
660 | * doing that might fault causing more problems. | |
661 | * TODO: what to do with accounting? | |
662 | */ | |
663 | static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu) | |
664 | { | |
665 | ti->task = NULL; | |
666 | ti->cpu = cpu; | |
667 | ti->preempt_count = 0; | |
668 | ti->local_flags = 0; | |
669 | ti->flags = 0; | |
670 | klp_init_thread_info(ti); | |
671 | } | |
672 | ||
40ef8cbc PM |
673 | /* |
674 | * Stack space used when we detect a bad kernel stack pointer, and | |
729b0f71 MS |
675 | * early in SMP boots before relocation is enabled. Exclusive emergency |
676 | * stack for machine checks. | |
40ef8cbc | 677 | */ |
b1923caa | 678 | void __init emergency_stack_init(void) |
40ef8cbc | 679 | { |
095c7965 | 680 | u64 limit; |
40ef8cbc PM |
681 | unsigned int i; |
682 | ||
683 | /* | |
684 | * Emergency stacks must be under 256MB, we cannot afford to take | |
685 | * SLB misses on them. The ABI also requires them to be 128-byte | |
686 | * aligned. | |
687 | * | |
688 | * Since we use these as temporary stacks during secondary CPU | |
d5507190 NP |
689 | * bringup, machine check, system reset, and HMI, we need to get |
690 | * at them in real mode. This means they must also be within the RMO | |
691 | * region. | |
34f19ff1 NP |
692 | * |
693 | * The IRQ stacks allocated elsewhere in this file are zeroed and | |
694 | * initialized in kernel/irq.c. These are initialized here in order | |
695 | * to have emergency stacks available as early as possible. | |
40ef8cbc | 696 | */ |
1af19331 | 697 | limit = min(ppc64_bolted_size(), ppc64_rma_size); |
40ef8cbc | 698 | |
3243d874 | 699 | for_each_possible_cpu(i) { |
5d31a96e ME |
700 | struct thread_info *ti; |
701 | ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); | |
34f19ff1 NP |
702 | memset(ti, 0, THREAD_SIZE); |
703 | emerg_stack_init_thread_info(ti, i); | |
d2e60075 | 704 | paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE; |
729b0f71 MS |
705 | |
706 | #ifdef CONFIG_PPC_BOOK3S_64 | |
b1ee8a3d NP |
707 | /* emergency stack for NMI exception handling. */ |
708 | ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); | |
34f19ff1 NP |
709 | memset(ti, 0, THREAD_SIZE); |
710 | emerg_stack_init_thread_info(ti, i); | |
d2e60075 | 711 | paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE; |
b1ee8a3d | 712 | |
729b0f71 | 713 | /* emergency stack for machine check exception handling. */ |
5d31a96e | 714 | ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); |
34f19ff1 NP |
715 | memset(ti, 0, THREAD_SIZE); |
716 | emerg_stack_init_thread_info(ti, i); | |
d2e60075 | 717 | paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE; |
729b0f71 | 718 | #endif |
3243d874 | 719 | } |
40ef8cbc PM |
720 | } |
721 | ||
7a0268fa | 722 | #ifdef CONFIG_SMP |
c2a7e818 TH |
723 | #define PCPU_DYN_SIZE () |
724 | ||
725 | static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) | |
7a0268fa | 726 | { |
ba4a648f | 727 | return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align, |
c2a7e818 TH |
728 | __pa(MAX_DMA_ADDRESS)); |
729 | } | |
7a0268fa | 730 | |
c2a7e818 TH |
731 | static void __init pcpu_fc_free(void *ptr, size_t size) |
732 | { | |
733 | free_bootmem(__pa(ptr), size); | |
734 | } | |
7a0268fa | 735 | |
c2a7e818 TH |
736 | static int pcpu_cpu_distance(unsigned int from, unsigned int to) |
737 | { | |
ba4a648f | 738 | if (early_cpu_to_node(from) == early_cpu_to_node(to)) |
c2a7e818 TH |
739 | return LOCAL_DISTANCE; |
740 | else | |
741 | return REMOTE_DISTANCE; | |
742 | } | |
743 | ||
ae01f84b AB |
744 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; |
745 | EXPORT_SYMBOL(__per_cpu_offset); | |
746 | ||
c2a7e818 TH |
747 | void __init setup_per_cpu_areas(void) |
748 | { | |
749 | const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; | |
750 | size_t atom_size; | |
751 | unsigned long delta; | |
752 | unsigned int cpu; | |
753 | int rc; | |
754 | ||
755 | /* | |
756 | * Linear mapping is one of 4K, 1M and 16M. For 4K, no need | |
757 | * to group units. For larger mappings, use 1M atom which | |
758 | * should be large enough to contain a number of units. | |
759 | */ | |
760 | if (mmu_linear_psize == MMU_PAGE_4K) | |
761 | atom_size = PAGE_SIZE; | |
762 | else | |
763 | atom_size = 1 << 20; | |
764 | ||
765 | rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance, | |
766 | pcpu_fc_alloc, pcpu_fc_free); | |
767 | if (rc < 0) | |
768 | panic("cannot initialize percpu area (err=%d)", rc); | |
769 | ||
770 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | |
ae01f84b AB |
771 | for_each_possible_cpu(cpu) { |
772 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; | |
d2e60075 | 773 | paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu]; |
ae01f84b | 774 | } |
7a0268fa AB |
775 | } |
776 | #endif | |
4cb3cee0 | 777 | |
a5d86257 AB |
778 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE |
779 | unsigned long memory_block_size_bytes(void) | |
780 | { | |
781 | if (ppc_md.memory_block_size) | |
782 | return ppc_md.memory_block_size(); | |
783 | ||
784 | return MIN_MEMORY_BLOCK_SIZE; | |
785 | } | |
786 | #endif | |
4cb3cee0 | 787 | |
ecd73cc5 | 788 | #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO) |
4cb3cee0 BH |
789 | struct ppc_pci_io ppc_pci_io; |
790 | EXPORT_SYMBOL(ppc_pci_io); | |
ecd73cc5 | 791 | #endif |
70412c55 NP |
792 | |
793 | #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF | |
794 | u64 hw_nmi_get_sample_period(int watchdog_thresh) | |
795 | { | |
796 | return ppc_proc_freq * watchdog_thresh; | |
797 | } | |
798 | #endif | |
799 | ||
800 | /* | |
801 | * The perf based hardlockup detector breaks PMU event based branches, so | |
802 | * disable it by default. Book3S has a soft-nmi hardlockup detector based | |
803 | * on the decrementer interrupt, so it does not suffer from this problem. | |
804 | * | |
805 | * It is likely to get false positives in VM guests, so disable it there | |
806 | * by default too. | |
807 | */ | |
808 | static int __init disable_hardlockup_detector(void) | |
809 | { | |
810 | #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF | |
811 | hardlockup_detector_disable(); | |
812 | #else | |
813 | if (firmware_has_feature(FW_FEATURE_LPAR)) | |
814 | hardlockup_detector_disable(); | |
815 | #endif | |
816 | ||
817 | return 0; | |
818 | } | |
819 | early_initcall(disable_hardlockup_detector); | |
aa8a5e00 ME |
820 | |
821 | #ifdef CONFIG_PPC_BOOK3S_64 | |
822 | static enum l1d_flush_type enabled_flush_types; | |
823 | static void *l1d_flush_fallback_area; | |
bc9c9304 | 824 | static bool no_rfi_flush; |
aa8a5e00 ME |
825 | bool rfi_flush; |
826 | ||
bc9c9304 ME |
827 | static int __init handle_no_rfi_flush(char *p) |
828 | { | |
829 | pr_info("rfi-flush: disabled on command line."); | |
830 | no_rfi_flush = true; | |
831 | return 0; | |
832 | } | |
833 | early_param("no_rfi_flush", handle_no_rfi_flush); | |
834 | ||
835 | /* | |
836 | * The RFI flush is not KPTI, but because users will see doco that says to use | |
837 | * nopti we hijack that option here to also disable the RFI flush. | |
838 | */ | |
839 | static int __init handle_no_pti(char *p) | |
840 | { | |
841 | pr_info("rfi-flush: disabling due to 'nopti' on command line.\n"); | |
842 | handle_no_rfi_flush(NULL); | |
843 | return 0; | |
844 | } | |
845 | early_param("nopti", handle_no_pti); | |
846 | ||
aa8a5e00 ME |
847 | static void do_nothing(void *unused) |
848 | { | |
849 | /* | |
850 | * We don't need to do the flush explicitly, just enter+exit kernel is | |
851 | * sufficient, the RFI exit handlers will do the right thing. | |
852 | */ | |
853 | } | |
854 | ||
855 | void rfi_flush_enable(bool enable) | |
856 | { | |
857 | if (rfi_flush == enable) | |
858 | return; | |
859 | ||
860 | if (enable) { | |
861 | do_rfi_flush_fixups(enabled_flush_types); | |
862 | on_each_cpu(do_nothing, NULL, 1); | |
863 | } else | |
864 | do_rfi_flush_fixups(L1D_FLUSH_NONE); | |
865 | ||
866 | rfi_flush = enable; | |
867 | } | |
868 | ||
869 | static void init_fallback_flush(void) | |
870 | { | |
871 | u64 l1d_size, limit; | |
872 | int cpu; | |
873 | ||
874 | l1d_size = ppc64_caches.l1d.size; | |
ebf0b6a8 | 875 | limit = min(ppc64_bolted_size(), ppc64_rma_size); |
aa8a5e00 ME |
876 | |
877 | /* | |
878 | * Align to L1d size, and size it at 2x L1d size, to catch possible | |
879 | * hardware prefetch runoff. We don't have a recipe for load patterns to | |
880 | * reliably avoid the prefetcher. | |
881 | */ | |
882 | l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit)); | |
883 | memset(l1d_flush_fallback_area, 0, l1d_size * 2); | |
884 | ||
885 | for_each_possible_cpu(cpu) { | |
d2e60075 NP |
886 | struct paca_struct *paca = paca_ptrs[cpu]; |
887 | paca->rfi_flush_fallback_area = l1d_flush_fallback_area; | |
888 | paca->l1d_flush_size = l1d_size; | |
aa8a5e00 ME |
889 | } |
890 | } | |
891 | ||
892 | void __init setup_rfi_flush(enum l1d_flush_type types, bool enable) | |
893 | { | |
894 | if (types & L1D_FLUSH_FALLBACK) { | |
895 | pr_info("rfi-flush: Using fallback displacement flush\n"); | |
896 | init_fallback_flush(); | |
897 | } | |
898 | ||
899 | if (types & L1D_FLUSH_ORI) | |
900 | pr_info("rfi-flush: Using ori type flush\n"); | |
901 | ||
902 | if (types & L1D_FLUSH_MTTRIG) | |
903 | pr_info("rfi-flush: Using mttrig type flush\n"); | |
904 | ||
905 | enabled_flush_types = types; | |
906 | ||
bc9c9304 ME |
907 | if (!no_rfi_flush) |
908 | rfi_flush_enable(enable); | |
aa8a5e00 | 909 | } |
fd6e440f | 910 | |
236003e6 ME |
911 | #ifdef CONFIG_DEBUG_FS |
912 | static int rfi_flush_set(void *data, u64 val) | |
913 | { | |
914 | if (val == 1) | |
915 | rfi_flush_enable(true); | |
916 | else if (val == 0) | |
917 | rfi_flush_enable(false); | |
918 | else | |
919 | return -EINVAL; | |
920 | ||
921 | return 0; | |
922 | } | |
923 | ||
924 | static int rfi_flush_get(void *data, u64 *val) | |
925 | { | |
926 | *val = rfi_flush ? 1 : 0; | |
927 | return 0; | |
928 | } | |
929 | ||
930 | DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); | |
931 | ||
932 | static __init int rfi_flush_debugfs_init(void) | |
933 | { | |
934 | debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); | |
935 | return 0; | |
936 | } | |
937 | device_initcall(rfi_flush_debugfs_init); | |
938 | #endif | |
939 | ||
fd6e440f ME |
940 | ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) |
941 | { | |
942 | if (rfi_flush) | |
943 | return sprintf(buf, "Mitigation: RFI Flush\n"); | |
944 | ||
945 | return sprintf(buf, "Vulnerable\n"); | |
946 | } | |
aa8a5e00 | 947 | #endif /* CONFIG_PPC_BOOK3S_64 */ |