]>
Commit | Line | Data |
---|---|---|
40ef8cbc PM |
1 | /* |
2 | * | |
3 | * Common boot and setup code. | |
4 | * | |
5 | * Copyright (C) 2001 PPC64 Team, IBM Corp | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | ||
4b16f8e2 | 13 | #include <linux/export.h> |
40ef8cbc PM |
14 | #include <linux/string.h> |
15 | #include <linux/sched.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/reboot.h> | |
19 | #include <linux/delay.h> | |
20 | #include <linux/initrd.h> | |
40ef8cbc PM |
21 | #include <linux/seq_file.h> |
22 | #include <linux/ioport.h> | |
23 | #include <linux/console.h> | |
24 | #include <linux/utsname.h> | |
25 | #include <linux/tty.h> | |
26 | #include <linux/root_dev.h> | |
27 | #include <linux/notifier.h> | |
28 | #include <linux/cpu.h> | |
29 | #include <linux/unistd.h> | |
30 | #include <linux/serial.h> | |
31 | #include <linux/serial_8250.h> | |
57c8a661 | 32 | #include <linux/memblock.h> |
12d04eef | 33 | #include <linux/pci.h> |
945feb17 | 34 | #include <linux/lockdep.h> |
a5d86257 | 35 | #include <linux/memory.h> |
c54b2bf1 | 36 | #include <linux/nmi.h> |
a6146888 | 37 | |
236003e6 | 38 | #include <asm/debugfs.h> |
40ef8cbc | 39 | #include <asm/io.h> |
0cc4746c | 40 | #include <asm/kdump.h> |
40ef8cbc PM |
41 | #include <asm/prom.h> |
42 | #include <asm/processor.h> | |
43 | #include <asm/pgtable.h> | |
40ef8cbc PM |
44 | #include <asm/smp.h> |
45 | #include <asm/elf.h> | |
46 | #include <asm/machdep.h> | |
47 | #include <asm/paca.h> | |
40ef8cbc PM |
48 | #include <asm/time.h> |
49 | #include <asm/cputable.h> | |
5a61ef74 | 50 | #include <asm/dt_cpu_ftrs.h> |
40ef8cbc PM |
51 | #include <asm/sections.h> |
52 | #include <asm/btext.h> | |
53 | #include <asm/nvram.h> | |
54 | #include <asm/setup.h> | |
40ef8cbc PM |
55 | #include <asm/rtas.h> |
56 | #include <asm/iommu.h> | |
57 | #include <asm/serial.h> | |
58 | #include <asm/cache.h> | |
59 | #include <asm/page.h> | |
60 | #include <asm/mmu.h> | |
40ef8cbc | 61 | #include <asm/firmware.h> |
f78541dc | 62 | #include <asm/xmon.h> |
dcad47fc | 63 | #include <asm/udbg.h> |
593e537b | 64 | #include <asm/kexec.h> |
d36b4c4f | 65 | #include <asm/code-patching.h> |
5d31a96e | 66 | #include <asm/livepatch.h> |
d3cbff1b | 67 | #include <asm/opal.h> |
b1923caa | 68 | #include <asm/cputhreads.h> |
c2e480ba | 69 | #include <asm/hw_irq.h> |
2c86cd18 | 70 | #include <asm/feature-fixups.h> |
40ef8cbc | 71 | |
1696d0fb NP |
72 | #include "setup.h" |
73 | ||
40ef8cbc PM |
74 | #ifdef DEBUG |
75 | #define DBG(fmt...) udbg_printf(fmt) | |
76 | #else | |
77 | #define DBG(fmt...) | |
78 | #endif | |
79 | ||
8246aca7 | 80 | int spinning_secondaries; |
40ef8cbc PM |
81 | u64 ppc64_pft_size; |
82 | ||
dabcafd3 | 83 | struct ppc64_caches ppc64_caches = { |
e2827fe5 BH |
84 | .l1d = { |
85 | .block_size = 0x40, | |
86 | .log_block_size = 6, | |
87 | }, | |
88 | .l1i = { | |
89 | .block_size = 0x40, | |
90 | .log_block_size = 6 | |
91 | }, | |
dabcafd3 | 92 | }; |
40ef8cbc PM |
93 | EXPORT_SYMBOL_GPL(ppc64_caches); |
94 | ||
28efc35f | 95 | #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP) |
b1923caa | 96 | void __init setup_tlb_core_data(void) |
28efc35f SW |
97 | { |
98 | int cpu; | |
99 | ||
82d86de2 SW |
100 | BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0); |
101 | ||
28efc35f SW |
102 | for_each_possible_cpu(cpu) { |
103 | int first = cpu_first_thread_sibling(cpu); | |
104 | ||
d9e1831a SW |
105 | /* |
106 | * If we boot via kdump on a non-primary thread, | |
107 | * make sure we point at the thread that actually | |
108 | * set up this TLB. | |
109 | */ | |
110 | if (cpu_first_thread_sibling(boot_cpuid) == first) | |
111 | first = boot_cpuid; | |
112 | ||
d2e60075 | 113 | paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd; |
28efc35f SW |
114 | |
115 | /* | |
116 | * If we have threads, we need either tlbsrx. | |
117 | * or e6500 tablewalk mode, or else TLB handlers | |
118 | * will be racy and could produce duplicate entries. | |
0d2b5cdc | 119 | * Should we panic instead? |
28efc35f | 120 | */ |
0d2b5cdc ME |
121 | WARN_ONCE(smt_enabled_at_boot >= 2 && |
122 | !mmu_has_feature(MMU_FTR_USE_TLBRSRV) && | |
123 | book3e_htw_mode != PPC_HTW_E6500, | |
124 | "%s: unsupported MMU configuration\n", __func__); | |
28efc35f SW |
125 | } |
126 | } | |
28efc35f SW |
127 | #endif |
128 | ||
40ef8cbc PM |
129 | #ifdef CONFIG_SMP |
130 | ||
954e6da5 | 131 | static char *smt_enabled_cmdline; |
40ef8cbc PM |
132 | |
133 | /* Look for ibm,smt-enabled OF option */ | |
b1923caa | 134 | void __init check_smt_enabled(void) |
40ef8cbc PM |
135 | { |
136 | struct device_node *dn; | |
a7f67bdf | 137 | const char *smt_option; |
40ef8cbc | 138 | |
954e6da5 NF |
139 | /* Default to enabling all threads */ |
140 | smt_enabled_at_boot = threads_per_core; | |
40ef8cbc | 141 | |
954e6da5 NF |
142 | /* Allow the command line to overrule the OF option */ |
143 | if (smt_enabled_cmdline) { | |
144 | if (!strcmp(smt_enabled_cmdline, "on")) | |
145 | smt_enabled_at_boot = threads_per_core; | |
146 | else if (!strcmp(smt_enabled_cmdline, "off")) | |
147 | smt_enabled_at_boot = 0; | |
148 | else { | |
1618bd53 | 149 | int smt; |
954e6da5 NF |
150 | int rc; |
151 | ||
1618bd53 | 152 | rc = kstrtoint(smt_enabled_cmdline, 10, &smt); |
954e6da5 NF |
153 | if (!rc) |
154 | smt_enabled_at_boot = | |
1618bd53 | 155 | min(threads_per_core, smt); |
954e6da5 NF |
156 | } |
157 | } else { | |
158 | dn = of_find_node_by_path("/options"); | |
159 | if (dn) { | |
160 | smt_option = of_get_property(dn, "ibm,smt-enabled", | |
161 | NULL); | |
162 | ||
163 | if (smt_option) { | |
164 | if (!strcmp(smt_option, "on")) | |
165 | smt_enabled_at_boot = threads_per_core; | |
166 | else if (!strcmp(smt_option, "off")) | |
167 | smt_enabled_at_boot = 0; | |
168 | } | |
169 | ||
170 | of_node_put(dn); | |
171 | } | |
172 | } | |
40ef8cbc PM |
173 | } |
174 | ||
175 | /* Look for smt-enabled= cmdline option */ | |
176 | static int __init early_smt_enabled(char *p) | |
177 | { | |
954e6da5 | 178 | smt_enabled_cmdline = p; |
40ef8cbc PM |
179 | return 0; |
180 | } | |
181 | early_param("smt-enabled", early_smt_enabled); | |
182 | ||
40ef8cbc PM |
183 | #endif /* CONFIG_SMP */ |
184 | ||
25e13814 | 185 | /** Fix up paca fields required for the boot cpu */ |
009776ba | 186 | static void __init fixup_boot_paca(void) |
25e13814 ME |
187 | { |
188 | /* The boot cpu is started */ | |
189 | get_paca()->cpu_start = 1; | |
190 | /* Allow percpu accesses to work until we setup percpu data */ | |
191 | get_paca()->data_offset = 0; | |
c2e480ba | 192 | /* Mark interrupts disabled in PACA */ |
4e26bc4a | 193 | irq_soft_mask_set(IRQS_DISABLED); |
25e13814 ME |
194 | } |
195 | ||
009776ba | 196 | static void __init configure_exceptions(void) |
8f619b54 | 197 | { |
633440f1 | 198 | /* |
d3cbff1b BH |
199 | * Setup the trampolines from the lowmem exception vectors |
200 | * to the kdump kernel when not using a relocatable kernel. | |
633440f1 | 201 | */ |
d3cbff1b BH |
202 | setup_kdump_trampoline(); |
203 | ||
204 | /* Under a PAPR hypervisor, we need hypercalls */ | |
205 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { | |
206 | /* Enable AIL if possible */ | |
207 | pseries_enable_reloc_on_exc(); | |
208 | ||
209 | /* | |
210 | * Tell the hypervisor that we want our exceptions to | |
211 | * be taken in little endian mode. | |
212 | * | |
213 | * We don't call this for big endian as our calling convention | |
214 | * makes us always enter in BE, and the call may fail under | |
215 | * some circumstances with kdump. | |
216 | */ | |
217 | #ifdef __LITTLE_ENDIAN__ | |
218 | pseries_little_endian_exceptions(); | |
219 | #endif | |
220 | } else { | |
221 | /* Set endian mode using OPAL */ | |
222 | if (firmware_has_feature(FW_FEATURE_OPAL)) | |
223 | opal_configure_cores(); | |
224 | ||
c0a36013 | 225 | /* AIL on native is done in cpu_ready_for_interrupts() */ |
8f619b54 BH |
226 | } |
227 | } | |
228 | ||
d3cbff1b BH |
229 | static void cpu_ready_for_interrupts(void) |
230 | { | |
c0a36013 BH |
231 | /* |
232 | * Enable AIL if supported, and we are in hypervisor mode. This | |
233 | * is called once for every processor. | |
234 | * | |
235 | * If we are not in hypervisor mode the job is done once for | |
236 | * the whole partition in configure_exceptions(). | |
237 | */ | |
5511a45f ME |
238 | if (cpu_has_feature(CPU_FTR_HVMODE) && |
239 | cpu_has_feature(CPU_FTR_ARCH_207S)) { | |
c0a36013 BH |
240 | unsigned long lpcr = mfspr(SPRN_LPCR); |
241 | mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); | |
242 | } | |
243 | ||
7ed23e1b | 244 | /* |
dd9a8c5a MN |
245 | * Set HFSCR:TM based on CPU features: |
246 | * In the special case of TM no suspend (P9N DD2.1), Linux is | |
247 | * told TM is off via the dt-ftrs but told to (partially) use | |
248 | * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM] | |
249 | * will be off from dt-ftrs but we need to turn it on for the | |
250 | * no suspend case. | |
7ed23e1b | 251 | */ |
dd9a8c5a MN |
252 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
253 | if (cpu_has_feature(CPU_FTR_TM_COMP)) | |
254 | mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM); | |
255 | else | |
256 | mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM); | |
257 | } | |
7ed23e1b | 258 | |
d3cbff1b BH |
259 | /* Set IR and DR in PACA MSR */ |
260 | get_paca()->kernel_msr = MSR_KERNEL; | |
261 | } | |
262 | ||
c0abd0c7 NP |
263 | unsigned long spr_default_dscr = 0; |
264 | ||
265 | void __init record_spr_defaults(void) | |
266 | { | |
267 | if (early_cpu_has_feature(CPU_FTR_DSCR)) | |
268 | spr_default_dscr = mfspr(SPRN_DSCR); | |
269 | } | |
270 | ||
40ef8cbc PM |
271 | /* |
272 | * Early initialization entry point. This is called by head.S | |
273 | * with MMU translation disabled. We rely on the "feature" of | |
274 | * the CPU that ignores the top 2 bits of the address in real | |
275 | * mode so we can access kernel globals normally provided we | |
276 | * only toy with things in the RMO region. From here, we do | |
95f72d1e | 277 | * some early parsing of the device-tree to setup out MEMBLOCK |
40ef8cbc PM |
278 | * data structures, and allocate & initialize the hash table |
279 | * and segment tables so we can start running with translation | |
280 | * enabled. | |
281 | * | |
282 | * It is this function which will call the probe() callback of | |
283 | * the various platform types and copy the matching one to the | |
284 | * global ppc_md structure. Your platform can eventually do | |
285 | * some very early initializations from the probe() routine, but | |
286 | * this is not recommended, be very careful as, for example, the | |
287 | * device-tree is not accessible via normal means at this point. | |
288 | */ | |
289 | ||
290 | void __init early_setup(unsigned long dt_ptr) | |
291 | { | |
6a7e4064 GL |
292 | static __initdata struct paca_struct boot_paca; |
293 | ||
24d96495 BH |
294 | /* -------- printk is _NOT_ safe to use here ! ------- */ |
295 | ||
5a61ef74 NP |
296 | /* Try new device tree based feature discovery ... */ |
297 | if (!dt_cpu_ftrs_init(__va(dt_ptr))) | |
298 | /* Otherwise use the old style CPU table */ | |
299 | identify_cpu(0, mfspr(SPRN_PVR)); | |
42c4aaad | 300 | |
33dbcf72 | 301 | /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ |
1426d5a3 ME |
302 | initialise_paca(&boot_paca, 0); |
303 | setup_paca(&boot_paca); | |
25e13814 | 304 | fixup_boot_paca(); |
33dbcf72 | 305 | |
24d96495 BH |
306 | /* -------- printk is now safe to use ------- */ |
307 | ||
f2fd2513 BH |
308 | /* Enable early debugging if any specified (see udbg.h) */ |
309 | udbg_early_init(); | |
310 | ||
e8222502 | 311 | DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); |
40ef8cbc | 312 | |
40ef8cbc | 313 | /* |
3c607ce2 LV |
314 | * Do early initialization using the flattened device |
315 | * tree, such as retrieving the physical memory map or | |
316 | * calculating/retrieving the hash table size. | |
40ef8cbc PM |
317 | */ |
318 | early_init_devtree(__va(dt_ptr)); | |
319 | ||
4df20460 | 320 | /* Now we know the logical id of our boot cpu, setup the paca. */ |
4890aea6 NP |
321 | if (boot_cpuid != 0) { |
322 | /* Poison paca_ptrs[0] again if it's not the boot cpu */ | |
323 | memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0])); | |
324 | } | |
d2e60075 | 325 | setup_paca(paca_ptrs[boot_cpuid]); |
25e13814 | 326 | fixup_boot_paca(); |
4df20460 | 327 | |
63c254a5 | 328 | /* |
d3cbff1b BH |
329 | * Configure exception handlers. This include setting up trampolines |
330 | * if needed, setting exception endian mode, etc... | |
63c254a5 | 331 | */ |
d3cbff1b | 332 | configure_exceptions(); |
0cc4746c | 333 | |
c4bd6cb8 BH |
334 | /* Apply all the dynamic patching */ |
335 | apply_feature_fixups(); | |
97f6e0cc | 336 | setup_feature_keys(); |
c4bd6cb8 | 337 | |
9e8066f3 ME |
338 | /* Initialize the hash table or TLB handling */ |
339 | early_init_mmu(); | |
340 | ||
1696d0fb NP |
341 | /* |
342 | * After firmware and early platform setup code has set things up, | |
343 | * we note the SPR values for configurable control/performance | |
344 | * registers, and use those as initial defaults. | |
345 | */ | |
346 | record_spr_defaults(); | |
347 | ||
a944a9c4 BH |
348 | /* |
349 | * At this point, we can let interrupts switch to virtual mode | |
350 | * (the MMU has been setup), so adjust the MSR in the PACA to | |
8f619b54 | 351 | * have IR and DR set and enable AIL if it exists |
a944a9c4 | 352 | */ |
8f619b54 | 353 | cpu_ready_for_interrupts(); |
a944a9c4 | 354 | |
d1039786 NR |
355 | /* |
356 | * We enable ftrace here, but since we only support DYNAMIC_FTRACE, it | |
357 | * will only actually get enabled on the boot cpu much later once | |
358 | * ftrace itself has been initialized. | |
359 | */ | |
360 | this_cpu_enable_ftrace(); | |
361 | ||
40ef8cbc | 362 | DBG(" <- early_setup()\n"); |
7191b615 BH |
363 | |
364 | #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX | |
365 | /* | |
366 | * This needs to be done *last* (after the above DBG() even) | |
367 | * | |
368 | * Right after we return from this function, we turn on the MMU | |
369 | * which means the real-mode access trick that btext does will | |
370 | * no longer work, it needs to switch to using a real MMU | |
371 | * mapping. This call will ensure that it does | |
372 | */ | |
373 | btext_map(); | |
374 | #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ | |
40ef8cbc PM |
375 | } |
376 | ||
799d6046 PM |
377 | #ifdef CONFIG_SMP |
378 | void early_setup_secondary(void) | |
379 | { | |
103b7827 | 380 | /* Mark interrupts disabled in PACA */ |
4e26bc4a | 381 | irq_soft_mask_set(IRQS_DISABLED); |
799d6046 | 382 | |
757c74d2 BH |
383 | /* Initialize the hash table or TLB handling */ |
384 | early_init_mmu_secondary(); | |
a944a9c4 BH |
385 | |
386 | /* | |
387 | * At this point, we can let interrupts switch to virtual mode | |
388 | * (the MMU has been setup), so adjust the MSR in the PACA to | |
389 | * have IR and DR set. | |
390 | */ | |
8f619b54 | 391 | cpu_ready_for_interrupts(); |
799d6046 PM |
392 | } |
393 | ||
394 | #endif /* CONFIG_SMP */ | |
40ef8cbc | 395 | |
8c1aef6a NP |
396 | void panic_smp_self_stop(void) |
397 | { | |
398 | hard_irq_disable(); | |
399 | spin_begin(); | |
400 | while (1) | |
401 | spin_cpu_relax(); | |
402 | } | |
403 | ||
da665885 | 404 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE) |
567cf94d SW |
405 | static bool use_spinloop(void) |
406 | { | |
339a3293 NP |
407 | if (IS_ENABLED(CONFIG_PPC_BOOK3S)) { |
408 | /* | |
409 | * See comments in head_64.S -- not all platforms insert | |
410 | * secondaries at __secondary_hold and wait at the spin | |
411 | * loop. | |
412 | */ | |
413 | if (firmware_has_feature(FW_FEATURE_OPAL)) | |
414 | return false; | |
567cf94d | 415 | return true; |
339a3293 | 416 | } |
567cf94d SW |
417 | |
418 | /* | |
419 | * When book3e boots from kexec, the ePAPR spin table does | |
420 | * not get used. | |
421 | */ | |
422 | return of_property_read_bool(of_chosen, "linux,booted-from-kexec"); | |
423 | } | |
424 | ||
b8f51021 ME |
425 | void smp_release_cpus(void) |
426 | { | |
758438a7 | 427 | unsigned long *ptr; |
9d07bc84 | 428 | int i; |
b8f51021 | 429 | |
567cf94d SW |
430 | if (!use_spinloop()) |
431 | return; | |
432 | ||
b8f51021 ME |
433 | DBG(" -> smp_release_cpus()\n"); |
434 | ||
435 | /* All secondary cpus are spinning on a common spinloop, release them | |
436 | * all now so they can start to spin on their individual paca | |
437 | * spinloops. For non SMP kernels, the secondary cpus never get out | |
438 | * of the common spinloop. | |
1f6a93e4 | 439 | */ |
b8f51021 | 440 | |
758438a7 ME |
441 | ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop |
442 | - PHYSICAL_START); | |
2751b628 | 443 | *ptr = ppc_function_entry(generic_secondary_smp_init); |
9d07bc84 BH |
444 | |
445 | /* And wait a bit for them to catch up */ | |
446 | for (i = 0; i < 100000; i++) { | |
447 | mb(); | |
448 | HMT_low(); | |
7ac87abb | 449 | if (spinning_secondaries == 0) |
9d07bc84 BH |
450 | break; |
451 | udelay(1); | |
452 | } | |
7ac87abb | 453 | DBG("spinning_secondaries = %d\n", spinning_secondaries); |
b8f51021 ME |
454 | |
455 | DBG(" <- smp_release_cpus()\n"); | |
456 | } | |
da665885 | 457 | #endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */ |
b8f51021 | 458 | |
40ef8cbc | 459 | /* |
799d6046 PM |
460 | * Initialize some remaining members of the ppc64_caches and systemcfg |
461 | * structures | |
40ef8cbc PM |
462 | * (at least until we get rid of them completely). This is mostly some |
463 | * cache informations about the CPU that will be used by cache flush | |
464 | * routines and/or provided to userland | |
465 | */ | |
e2827fe5 BH |
466 | |
467 | static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize, | |
468 | u32 bsize, u32 sets) | |
469 | { | |
470 | info->size = size; | |
471 | info->sets = sets; | |
472 | info->line_size = lsize; | |
473 | info->block_size = bsize; | |
474 | info->log_block_size = __ilog2(bsize); | |
6ba422c7 AB |
475 | if (bsize) |
476 | info->blocks_per_page = PAGE_SIZE / bsize; | |
477 | else | |
478 | info->blocks_per_page = 0; | |
98a5f361 BH |
479 | |
480 | if (sets == 0) | |
481 | info->assoc = 0xffff; | |
482 | else | |
483 | info->assoc = size / (sets * lsize); | |
e2827fe5 BH |
484 | } |
485 | ||
486 | static bool __init parse_cache_info(struct device_node *np, | |
487 | bool icache, | |
488 | struct ppc_cache_info *info) | |
489 | { | |
490 | static const char *ipropnames[] __initdata = { | |
491 | "i-cache-size", | |
492 | "i-cache-sets", | |
493 | "i-cache-block-size", | |
494 | "i-cache-line-size", | |
495 | }; | |
496 | static const char *dpropnames[] __initdata = { | |
497 | "d-cache-size", | |
498 | "d-cache-sets", | |
499 | "d-cache-block-size", | |
500 | "d-cache-line-size", | |
501 | }; | |
502 | const char **propnames = icache ? ipropnames : dpropnames; | |
503 | const __be32 *sizep, *lsizep, *bsizep, *setsp; | |
504 | u32 size, lsize, bsize, sets; | |
505 | bool success = true; | |
506 | ||
507 | size = 0; | |
508 | sets = -1u; | |
509 | lsize = bsize = cur_cpu_spec->dcache_bsize; | |
510 | sizep = of_get_property(np, propnames[0], NULL); | |
511 | if (sizep != NULL) | |
512 | size = be32_to_cpu(*sizep); | |
513 | setsp = of_get_property(np, propnames[1], NULL); | |
514 | if (setsp != NULL) | |
515 | sets = be32_to_cpu(*setsp); | |
516 | bsizep = of_get_property(np, propnames[2], NULL); | |
517 | lsizep = of_get_property(np, propnames[3], NULL); | |
518 | if (bsizep == NULL) | |
519 | bsizep = lsizep; | |
520 | if (lsizep != NULL) | |
521 | lsize = be32_to_cpu(*lsizep); | |
522 | if (bsizep != NULL) | |
523 | bsize = be32_to_cpu(*bsizep); | |
524 | if (sizep == NULL || bsizep == NULL || lsizep == NULL) | |
525 | success = false; | |
526 | ||
527 | /* | |
528 | * OF is weird .. it represents fully associative caches | |
529 | * as "1 way" which doesn't make much sense and doesn't | |
530 | * leave room for direct mapped. We'll assume that 0 | |
531 | * in OF means direct mapped for that reason. | |
532 | */ | |
533 | if (sets == 1) | |
534 | sets = 0; | |
535 | else if (sets == 0) | |
536 | sets = 1; | |
537 | ||
538 | init_cache_info(info, size, lsize, bsize, sets); | |
539 | ||
540 | return success; | |
541 | } | |
542 | ||
b1923caa | 543 | void __init initialize_cache_info(void) |
40ef8cbc | 544 | { |
608b4214 BH |
545 | struct device_node *cpu = NULL, *l2, *l3 = NULL; |
546 | u32 pvr; | |
40ef8cbc PM |
547 | |
548 | DBG(" -> initialize_cache_info()\n"); | |
549 | ||
608b4214 BH |
550 | /* |
551 | * All shipping POWER8 machines have a firmware bug that | |
552 | * puts incorrect information in the device-tree. This will | |
553 | * be (hopefully) fixed for future chips but for now hard | |
554 | * code the values if we are running on one of these | |
555 | */ | |
556 | pvr = PVR_VER(mfspr(SPRN_PVR)); | |
557 | if (pvr == PVR_POWER8 || pvr == PVR_POWER8E || | |
558 | pvr == PVR_POWER8NVL) { | |
559 | /* size lsize blk sets */ | |
560 | init_cache_info(&ppc64_caches.l1i, 0x8000, 128, 128, 32); | |
561 | init_cache_info(&ppc64_caches.l1d, 0x10000, 128, 128, 64); | |
562 | init_cache_info(&ppc64_caches.l2, 0x80000, 128, 0, 512); | |
563 | init_cache_info(&ppc64_caches.l3, 0x800000, 128, 0, 8192); | |
564 | } else | |
565 | cpu = of_find_node_by_type(NULL, "cpu"); | |
40ef8cbc | 566 | |
e2827fe5 BH |
567 | /* |
568 | * We're assuming *all* of the CPUs have the same | |
569 | * d-cache and i-cache sizes... -Peter | |
570 | */ | |
65e01f38 BH |
571 | if (cpu) { |
572 | if (!parse_cache_info(cpu, false, &ppc64_caches.l1d)) | |
e2827fe5 BH |
573 | DBG("Argh, can't find dcache properties !\n"); |
574 | ||
65e01f38 | 575 | if (!parse_cache_info(cpu, true, &ppc64_caches.l1i)) |
e2827fe5 | 576 | DBG("Argh, can't find icache properties !\n"); |
65e01f38 BH |
577 | |
578 | /* | |
579 | * Try to find the L2 and L3 if any. Assume they are | |
580 | * unified and use the D-side properties. | |
581 | */ | |
582 | l2 = of_find_next_cache_node(cpu); | |
583 | of_node_put(cpu); | |
584 | if (l2) { | |
585 | parse_cache_info(l2, false, &ppc64_caches.l2); | |
586 | l3 = of_find_next_cache_node(l2); | |
587 | of_node_put(l2); | |
588 | } | |
589 | if (l3) { | |
590 | parse_cache_info(l3, false, &ppc64_caches.l3); | |
591 | of_node_put(l3); | |
592 | } | |
40ef8cbc PM |
593 | } |
594 | ||
9df549af | 595 | /* For use by binfmt_elf */ |
e2827fe5 BH |
596 | dcache_bsize = ppc64_caches.l1d.block_size; |
597 | icache_bsize = ppc64_caches.l1i.block_size; | |
9df549af | 598 | |
5a61ef74 NP |
599 | cur_cpu_spec->dcache_bsize = dcache_bsize; |
600 | cur_cpu_spec->icache_bsize = icache_bsize; | |
601 | ||
40ef8cbc PM |
602 | DBG(" <- initialize_cache_info()\n"); |
603 | } | |
604 | ||
1af19331 NP |
605 | /* |
606 | * This returns the limit below which memory accesses to the linear | |
607 | * mapping are guarnateed not to cause an architectural exception (e.g., | |
608 | * TLB or SLB miss fault). | |
609 | * | |
610 | * This is used to allocate PACAs and various interrupt stacks that | |
611 | * that are accessed early in interrupt handlers that must not cause | |
612 | * re-entrant interrupts. | |
40bd587a | 613 | */ |
1af19331 | 614 | __init u64 ppc64_bolted_size(void) |
095c7965 | 615 | { |
40bd587a BH |
616 | #ifdef CONFIG_PPC_BOOK3E |
617 | /* Freescale BookE bolts the entire linear mapping */ | |
1af19331 NP |
618 | /* XXX: BookE ppc64_rma_limit setup seems to disagree? */ |
619 | if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) | |
40bd587a BH |
620 | return linear_map_top; |
621 | /* Other BookE, we assume the first GB is bolted */ | |
622 | return 1ul << 30; | |
623 | #else | |
1af19331 | 624 | /* BookS radix, does not take faults on linear mapping */ |
d5507190 NP |
625 | if (early_radix_enabled()) |
626 | return ULONG_MAX; | |
627 | ||
1af19331 NP |
628 | /* BookS hash, the first segment is bolted */ |
629 | if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT)) | |
095c7965 | 630 | return 1UL << SID_SHIFT_1T; |
095c7965 | 631 | return 1UL << SID_SHIFT; |
40bd587a | 632 | #endif |
095c7965 AB |
633 | } |
634 | ||
f3865f9a NP |
635 | static void *__init alloc_stack(unsigned long limit, int cpu) |
636 | { | |
637 | unsigned long pa; | |
638 | ||
66f93c5a NP |
639 | BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16); |
640 | ||
f3865f9a NP |
641 | pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit, |
642 | early_cpu_to_node(cpu), MEMBLOCK_NONE); | |
643 | if (!pa) { | |
644 | pa = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); | |
645 | if (!pa) | |
646 | panic("cannot allocate stacks"); | |
647 | } | |
648 | ||
649 | return __va(pa); | |
650 | } | |
651 | ||
b1923caa | 652 | void __init irqstack_early_init(void) |
40ef8cbc | 653 | { |
1af19331 | 654 | u64 limit = ppc64_bolted_size(); |
40ef8cbc PM |
655 | unsigned int i; |
656 | ||
657 | /* | |
8f4da26e | 658 | * Interrupt stacks must be in the first segment since we |
d5507190 NP |
659 | * cannot afford to take SLB misses on them. They are not |
660 | * accessed in realmode. | |
40ef8cbc | 661 | */ |
0e551954 | 662 | for_each_possible_cpu(i) { |
f3865f9a NP |
663 | softirq_ctx[i] = alloc_stack(limit, i); |
664 | hardirq_ctx[i] = alloc_stack(limit, i); | |
40ef8cbc PM |
665 | } |
666 | } | |
40ef8cbc | 667 | |
2d27cfd3 | 668 | #ifdef CONFIG_PPC_BOOK3E |
b1923caa | 669 | void __init exc_lvl_early_init(void) |
2d27cfd3 BH |
670 | { |
671 | unsigned int i; | |
672 | ||
673 | for_each_possible_cpu(i) { | |
f3865f9a NP |
674 | void *sp; |
675 | ||
676 | sp = alloc_stack(ULONG_MAX, i); | |
677 | critirq_ctx[i] = sp; | |
678 | paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE; | |
160c7324 | 679 | |
f3865f9a NP |
680 | sp = alloc_stack(ULONG_MAX, i); |
681 | dbgirq_ctx[i] = sp; | |
682 | paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE; | |
160c7324 | 683 | |
f3865f9a NP |
684 | sp = alloc_stack(ULONG_MAX, i); |
685 | mcheckirq_ctx[i] = sp; | |
686 | paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE; | |
2d27cfd3 | 687 | } |
d36b4c4f KG |
688 | |
689 | if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) | |
565c2f24 | 690 | patch_exception(0x040, exc_debug_debug_book3e); |
2d27cfd3 | 691 | } |
2d27cfd3 BH |
692 | #endif |
693 | ||
34f19ff1 NP |
694 | /* |
695 | * Emergency stacks are used for a range of things, from asynchronous | |
696 | * NMIs (system reset, machine check) to synchronous, process context. | |
697 | * We set preempt_count to zero, even though that isn't necessarily correct. To | |
698 | * get the right value we'd need to copy it from the previous thread_info, but | |
699 | * doing that might fault causing more problems. | |
700 | * TODO: what to do with accounting? | |
701 | */ | |
702 | static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu) | |
703 | { | |
704 | ti->task = NULL; | |
705 | ti->cpu = cpu; | |
706 | ti->preempt_count = 0; | |
707 | ti->local_flags = 0; | |
708 | ti->flags = 0; | |
709 | klp_init_thread_info(ti); | |
710 | } | |
711 | ||
40ef8cbc PM |
712 | /* |
713 | * Stack space used when we detect a bad kernel stack pointer, and | |
729b0f71 MS |
714 | * early in SMP boots before relocation is enabled. Exclusive emergency |
715 | * stack for machine checks. | |
40ef8cbc | 716 | */ |
b1923caa | 717 | void __init emergency_stack_init(void) |
40ef8cbc | 718 | { |
095c7965 | 719 | u64 limit; |
40ef8cbc PM |
720 | unsigned int i; |
721 | ||
722 | /* | |
723 | * Emergency stacks must be under 256MB, we cannot afford to take | |
724 | * SLB misses on them. The ABI also requires them to be 128-byte | |
725 | * aligned. | |
726 | * | |
727 | * Since we use these as temporary stacks during secondary CPU | |
d5507190 NP |
728 | * bringup, machine check, system reset, and HMI, we need to get |
729 | * at them in real mode. This means they must also be within the RMO | |
730 | * region. | |
34f19ff1 NP |
731 | * |
732 | * The IRQ stacks allocated elsewhere in this file are zeroed and | |
733 | * initialized in kernel/irq.c. These are initialized here in order | |
734 | * to have emergency stacks available as early as possible. | |
40ef8cbc | 735 | */ |
1af19331 | 736 | limit = min(ppc64_bolted_size(), ppc64_rma_size); |
40ef8cbc | 737 | |
3243d874 | 738 | for_each_possible_cpu(i) { |
5d31a96e | 739 | struct thread_info *ti; |
f3865f9a NP |
740 | |
741 | ti = alloc_stack(limit, i); | |
34f19ff1 NP |
742 | memset(ti, 0, THREAD_SIZE); |
743 | emerg_stack_init_thread_info(ti, i); | |
d2e60075 | 744 | paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE; |
729b0f71 MS |
745 | |
746 | #ifdef CONFIG_PPC_BOOK3S_64 | |
b1ee8a3d | 747 | /* emergency stack for NMI exception handling. */ |
f3865f9a | 748 | ti = alloc_stack(limit, i); |
34f19ff1 NP |
749 | memset(ti, 0, THREAD_SIZE); |
750 | emerg_stack_init_thread_info(ti, i); | |
d2e60075 | 751 | paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE; |
b1ee8a3d | 752 | |
729b0f71 | 753 | /* emergency stack for machine check exception handling. */ |
f3865f9a | 754 | ti = alloc_stack(limit, i); |
34f19ff1 NP |
755 | memset(ti, 0, THREAD_SIZE); |
756 | emerg_stack_init_thread_info(ti, i); | |
d2e60075 | 757 | paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE; |
729b0f71 | 758 | #endif |
3243d874 | 759 | } |
40ef8cbc PM |
760 | } |
761 | ||
7a0268fa | 762 | #ifdef CONFIG_SMP |
c2a7e818 TH |
763 | #define PCPU_DYN_SIZE () |
764 | ||
765 | static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) | |
7a0268fa | 766 | { |
ccfa2a0f | 767 | return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS), |
97ad1087 | 768 | MEMBLOCK_ALLOC_ACCESSIBLE, |
ccfa2a0f MR |
769 | early_cpu_to_node(cpu)); |
770 | ||
c2a7e818 | 771 | } |
7a0268fa | 772 | |
c2a7e818 TH |
773 | static void __init pcpu_fc_free(void *ptr, size_t size) |
774 | { | |
2013288f | 775 | memblock_free(__pa(ptr), size); |
c2a7e818 | 776 | } |
7a0268fa | 777 | |
c2a7e818 TH |
778 | static int pcpu_cpu_distance(unsigned int from, unsigned int to) |
779 | { | |
ba4a648f | 780 | if (early_cpu_to_node(from) == early_cpu_to_node(to)) |
c2a7e818 TH |
781 | return LOCAL_DISTANCE; |
782 | else | |
783 | return REMOTE_DISTANCE; | |
784 | } | |
785 | ||
ae01f84b AB |
786 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; |
787 | EXPORT_SYMBOL(__per_cpu_offset); | |
788 | ||
c2a7e818 TH |
789 | void __init setup_per_cpu_areas(void) |
790 | { | |
791 | const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; | |
792 | size_t atom_size; | |
793 | unsigned long delta; | |
794 | unsigned int cpu; | |
795 | int rc; | |
796 | ||
797 | /* | |
798 | * Linear mapping is one of 4K, 1M and 16M. For 4K, no need | |
799 | * to group units. For larger mappings, use 1M atom which | |
800 | * should be large enough to contain a number of units. | |
801 | */ | |
802 | if (mmu_linear_psize == MMU_PAGE_4K) | |
803 | atom_size = PAGE_SIZE; | |
804 | else | |
805 | atom_size = 1 << 20; | |
806 | ||
807 | rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance, | |
808 | pcpu_fc_alloc, pcpu_fc_free); | |
809 | if (rc < 0) | |
810 | panic("cannot initialize percpu area (err=%d)", rc); | |
811 | ||
812 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | |
ae01f84b AB |
813 | for_each_possible_cpu(cpu) { |
814 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; | |
d2e60075 | 815 | paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu]; |
ae01f84b | 816 | } |
7a0268fa AB |
817 | } |
818 | #endif | |
4cb3cee0 | 819 | |
a5d86257 AB |
820 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE |
821 | unsigned long memory_block_size_bytes(void) | |
822 | { | |
823 | if (ppc_md.memory_block_size) | |
824 | return ppc_md.memory_block_size(); | |
825 | ||
826 | return MIN_MEMORY_BLOCK_SIZE; | |
827 | } | |
828 | #endif | |
4cb3cee0 | 829 | |
ecd73cc5 | 830 | #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO) |
4cb3cee0 BH |
831 | struct ppc_pci_io ppc_pci_io; |
832 | EXPORT_SYMBOL(ppc_pci_io); | |
ecd73cc5 | 833 | #endif |
70412c55 NP |
834 | |
835 | #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF | |
836 | u64 hw_nmi_get_sample_period(int watchdog_thresh) | |
837 | { | |
838 | return ppc_proc_freq * watchdog_thresh; | |
839 | } | |
840 | #endif | |
841 | ||
842 | /* | |
843 | * The perf based hardlockup detector breaks PMU event based branches, so | |
844 | * disable it by default. Book3S has a soft-nmi hardlockup detector based | |
845 | * on the decrementer interrupt, so it does not suffer from this problem. | |
846 | * | |
847 | * It is likely to get false positives in VM guests, so disable it there | |
848 | * by default too. | |
849 | */ | |
850 | static int __init disable_hardlockup_detector(void) | |
851 | { | |
852 | #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF | |
853 | hardlockup_detector_disable(); | |
854 | #else | |
855 | if (firmware_has_feature(FW_FEATURE_LPAR)) | |
856 | hardlockup_detector_disable(); | |
857 | #endif | |
858 | ||
859 | return 0; | |
860 | } | |
861 | early_initcall(disable_hardlockup_detector); | |
aa8a5e00 ME |
862 | |
863 | #ifdef CONFIG_PPC_BOOK3S_64 | |
864 | static enum l1d_flush_type enabled_flush_types; | |
865 | static void *l1d_flush_fallback_area; | |
bc9c9304 | 866 | static bool no_rfi_flush; |
aa8a5e00 ME |
867 | bool rfi_flush; |
868 | ||
bc9c9304 ME |
869 | static int __init handle_no_rfi_flush(char *p) |
870 | { | |
871 | pr_info("rfi-flush: disabled on command line."); | |
872 | no_rfi_flush = true; | |
873 | return 0; | |
874 | } | |
875 | early_param("no_rfi_flush", handle_no_rfi_flush); | |
876 | ||
877 | /* | |
878 | * The RFI flush is not KPTI, but because users will see doco that says to use | |
879 | * nopti we hijack that option here to also disable the RFI flush. | |
880 | */ | |
881 | static int __init handle_no_pti(char *p) | |
882 | { | |
883 | pr_info("rfi-flush: disabling due to 'nopti' on command line.\n"); | |
884 | handle_no_rfi_flush(NULL); | |
885 | return 0; | |
886 | } | |
887 | early_param("nopti", handle_no_pti); | |
888 | ||
aa8a5e00 ME |
889 | static void do_nothing(void *unused) |
890 | { | |
891 | /* | |
892 | * We don't need to do the flush explicitly, just enter+exit kernel is | |
893 | * sufficient, the RFI exit handlers will do the right thing. | |
894 | */ | |
895 | } | |
896 | ||
897 | void rfi_flush_enable(bool enable) | |
898 | { | |
aa8a5e00 ME |
899 | if (enable) { |
900 | do_rfi_flush_fixups(enabled_flush_types); | |
901 | on_each_cpu(do_nothing, NULL, 1); | |
902 | } else | |
903 | do_rfi_flush_fixups(L1D_FLUSH_NONE); | |
904 | ||
905 | rfi_flush = enable; | |
906 | } | |
907 | ||
501a78cb | 908 | static void __ref init_fallback_flush(void) |
aa8a5e00 ME |
909 | { |
910 | u64 l1d_size, limit; | |
911 | int cpu; | |
912 | ||
abf110f3 ME |
913 | /* Only allocate the fallback flush area once (at boot time). */ |
914 | if (l1d_flush_fallback_area) | |
915 | return; | |
916 | ||
aa8a5e00 | 917 | l1d_size = ppc64_caches.l1d.size; |
9dfbf78e MS |
918 | |
919 | /* | |
920 | * If there is no d-cache-size property in the device tree, l1d_size | |
921 | * could be zero. That leads to the loop in the asm wrapping around to | |
922 | * 2^64-1, and then walking off the end of the fallback area and | |
923 | * eventually causing a page fault which is fatal. Just default to | |
924 | * something vaguely sane. | |
925 | */ | |
926 | if (!l1d_size) | |
927 | l1d_size = (64 * 1024); | |
928 | ||
ebf0b6a8 | 929 | limit = min(ppc64_bolted_size(), ppc64_rma_size); |
aa8a5e00 ME |
930 | |
931 | /* | |
932 | * Align to L1d size, and size it at 2x L1d size, to catch possible | |
933 | * hardware prefetch runoff. We don't have a recipe for load patterns to | |
934 | * reliably avoid the prefetcher. | |
935 | */ | |
936 | l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit)); | |
937 | memset(l1d_flush_fallback_area, 0, l1d_size * 2); | |
938 | ||
939 | for_each_possible_cpu(cpu) { | |
d2e60075 NP |
940 | struct paca_struct *paca = paca_ptrs[cpu]; |
941 | paca->rfi_flush_fallback_area = l1d_flush_fallback_area; | |
942 | paca->l1d_flush_size = l1d_size; | |
aa8a5e00 ME |
943 | } |
944 | } | |
945 | ||
abf110f3 | 946 | void setup_rfi_flush(enum l1d_flush_type types, bool enable) |
aa8a5e00 ME |
947 | { |
948 | if (types & L1D_FLUSH_FALLBACK) { | |
0063d61c | 949 | pr_info("rfi-flush: fallback displacement flush available\n"); |
aa8a5e00 ME |
950 | init_fallback_flush(); |
951 | } | |
952 | ||
953 | if (types & L1D_FLUSH_ORI) | |
0063d61c | 954 | pr_info("rfi-flush: ori type flush available\n"); |
aa8a5e00 ME |
955 | |
956 | if (types & L1D_FLUSH_MTTRIG) | |
0063d61c | 957 | pr_info("rfi-flush: mttrig type flush available\n"); |
aa8a5e00 ME |
958 | |
959 | enabled_flush_types = types; | |
960 | ||
bc9c9304 ME |
961 | if (!no_rfi_flush) |
962 | rfi_flush_enable(enable); | |
aa8a5e00 | 963 | } |
fd6e440f | 964 | |
236003e6 ME |
965 | #ifdef CONFIG_DEBUG_FS |
966 | static int rfi_flush_set(void *data, u64 val) | |
967 | { | |
1e2a9fc7 ME |
968 | bool enable; |
969 | ||
236003e6 | 970 | if (val == 1) |
1e2a9fc7 | 971 | enable = true; |
236003e6 | 972 | else if (val == 0) |
1e2a9fc7 | 973 | enable = false; |
236003e6 ME |
974 | else |
975 | return -EINVAL; | |
976 | ||
1e2a9fc7 ME |
977 | /* Only do anything if we're changing state */ |
978 | if (enable != rfi_flush) | |
979 | rfi_flush_enable(enable); | |
980 | ||
236003e6 ME |
981 | return 0; |
982 | } | |
983 | ||
984 | static int rfi_flush_get(void *data, u64 *val) | |
985 | { | |
986 | *val = rfi_flush ? 1 : 0; | |
987 | return 0; | |
988 | } | |
989 | ||
990 | DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); | |
991 | ||
992 | static __init int rfi_flush_debugfs_init(void) | |
993 | { | |
994 | debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); | |
995 | return 0; | |
996 | } | |
997 | device_initcall(rfi_flush_debugfs_init); | |
998 | #endif | |
aa8a5e00 | 999 | #endif /* CONFIG_PPC_BOOK3S_64 */ |