1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * This file contains low level CPU setup functions.
5 * Copyright 2009 Freescale Semiconductor, Inc.
7 * Based on cpu_setup_6xx code by
11 #include <linux/linkage.h>
14 #include <asm/processor.h>
15 #include <asm/cputable.h>
16 #include <asm/ppc_asm.h>
17 #include <asm/nohash/mmu-e500.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/mpc85xx.h>
21 _GLOBAL(__e500_icache_setup)
23 andi. r3, r0, L1CSR1_ICE
24 bnelr /* Already enabled */
25 oris r0, r0, L1CSR1_CPE@h
26 ori r0, r0, (L1CSR1_ICFI | L1CSR1_ICLFR | L1CSR1_ICE)
27 mtspr SPRN_L1CSR1, r0 /* Enable I-Cache */
31 _GLOBAL(__e500_dcache_setup)
33 andi. r3, r0, L1CSR0_DCE
34 bnelr /* Already enabled */
38 mtspr SPRN_L1CSR0, r0 /* Disable */
41 li r0, (L1CSR0_DCFI | L1CSR0_CLFC)
42 mtspr SPRN_L1CSR0, r0 /* Invalidate */
44 1: mfspr r0, SPRN_L1CSR0
45 andi. r3, r0, L1CSR0_CLFC
46 bne+ 1b /* Wait for lock bits reset */
47 oris r0, r0, L1CSR0_CPE@h
48 ori r0, r0, L1CSR0_DCE
51 mtspr SPRN_L1CSR0, r0 /* Enable */
56 * FIXME - we haven't yet done testing to determine a reasonable default
57 * value for PW20_WAIT_IDLE_BIT.
59 #define PW20_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */
60 _GLOBAL(setup_pw20_idle)
61 mfspr r3, SPRN_PWRMGTCR0
63 /* Set PW20_WAIT bit, enable pw20 state*/
64 ori r3, r3, PWRMGTCR0_PW20_WAIT
65 li r11, PW20_WAIT_IDLE_BIT
67 /* Set Automatic PW20 Core Idle Count */
68 rlwimi r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT
70 mtspr SPRN_PWRMGTCR0, r3
75 * FIXME - we haven't yet done testing to determine a reasonable default
76 * value for AV_WAIT_IDLE_BIT.
78 #define AV_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */
79 _GLOBAL(setup_altivec_idle)
80 mfspr r3, SPRN_PWRMGTCR0
82 /* Enable Altivec Idle */
83 oris r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h
84 li r11, AV_WAIT_IDLE_BIT
86 /* Set Automatic AltiVec Idle Count */
87 rlwimi r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT
89 mtspr SPRN_PWRMGTCR0, r3
93 #ifdef CONFIG_PPC_E500MC
94 _GLOBAL(__setup_cpu_e6500)
97 bl setup_altivec_ivors
98 /* Touch IVOR42 only if the CPU supports E.HV category */
100 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
106 bl setup_altivec_idle
110 #endif /* CONFIG_PPC_E500MC */
113 #ifdef CONFIG_PPC_E500
114 #ifndef CONFIG_PPC_E500MC
115 _GLOBAL(__setup_cpu_e500v1)
116 _GLOBAL(__setup_cpu_e500v2)
118 bl __e500_icache_setup
119 bl __e500_dcache_setup
120 bl __setup_e500_ivors
121 #if defined(CONFIG_FSL_RIO) || defined(CONFIG_FSL_PCI)
122 /* Ensure that RFXE is set */
124 oris r3,r3,HID1_RFXE@h
129 #else /* CONFIG_PPC_E500MC */
130 _GLOBAL(__setup_cpu_e500mc)
131 _GLOBAL(__setup_cpu_e5500)
133 bl __e500_icache_setup
134 bl __e500_dcache_setup
135 bl __setup_e500mc_ivors
137 * We only want to touch IVOR38-41 if we're running on hardware
138 * that supports category E.HV. The architectural way to determine
139 * this is MMUCFG[LPIDSIZE].
141 mfspr r3, SPRN_MMUCFG
142 rlwinm. r3, r3, 0, MMUCFG_LPIDSIZE
147 lwz r3, CPU_SPEC_FEATURES(r4)
148 /* We need this check as cpu_setup is also called for
149 * the secondary cores. So, if we have already cleared
150 * the feature on the primary core, avoid doing it on the
153 andi. r6, r3, CPU_FTR_EMB_HV
155 rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
156 stw r3, CPU_SPEC_FEATURES(r4)
160 #endif /* CONFIG_PPC_E500MC */
161 #endif /* CONFIG_PPC_E500 */
162 #endif /* CONFIG_PPC32 */
164 #ifdef CONFIG_PPC_BOOK3E_64
165 _GLOBAL(__restore_cpu_e6500)
167 bl setup_altivec_ivors
168 /* Touch IVOR42 only if the CPU supports E.HV category */
169 mfspr r10,SPRN_MMUCFG
170 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
175 bl setup_altivec_idle
176 bl __restore_cpu_e5500
180 _GLOBAL(__restore_cpu_e5500)
182 bl __e500_icache_setup
183 bl __e500_dcache_setup
184 bl __setup_base_ivors
185 bl setup_perfmon_ivor
186 bl setup_doorbell_ivors
188 * We only want to touch IVOR38-41 if we're running on hardware
189 * that supports category E.HV. The architectural way to determine
190 * this is MMUCFG[LPIDSIZE].
192 mfspr r10,SPRN_MMUCFG
193 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
200 _GLOBAL(__setup_cpu_e5500)
202 bl __e500_icache_setup
203 bl __e500_dcache_setup
204 bl __setup_base_ivors
205 bl setup_perfmon_ivor
206 bl setup_doorbell_ivors
208 * We only want to touch IVOR38-41 if we're running on hardware
209 * that supports category E.HV. The architectural way to determine
210 * this is MMUCFG[LPIDSIZE].
212 mfspr r10,SPRN_MMUCFG
213 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
218 ld r10,CPU_SPEC_FEATURES(r4)
219 LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV)
221 std r10,CPU_SPEC_FEATURES(r4)
227 /* flush L1 data cache, it can apply to e500v2, e500mc and e5500 */
228 _GLOBAL(flush_dcache_L1)
233 rlwinm r5,r3,9,3 /* Extract cache block size */
234 twlgti r5,1 /* Only 32 and 64 byte cache blocks
235 * are currently defined.
238 subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
239 * log2(number of ways)
241 slw r5,r4,r5 /* r5 = cache block size */
243 rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
244 mulli r7,r7,13 /* An 8-way cache will require 13
249 /* save off HID0 and set DCFA */
251 ori r9,r8,HID0_DCFA@l
255 LOAD_REG_IMMEDIATE(r6, KERNELBASE)
259 1: lwz r3,0(r4) /* Load... */
267 1: dcbf 0,r4 /* ...and flush. */
279 SYM_FUNC_START_LOCAL(has_L2_cache)
280 /* skip L2 cache on P2040/P2040E as they have no L2 cache */
282 /* shift right by 8 bits and clear E bit of SVR */
283 rlwinm r4, r3, 24, ~0x800
286 ori r3, r3, SVR_P2040@l
295 SYM_FUNC_END(has_L2_cache)
297 /* flush backside L2 cache */
298 SYM_FUNC_START_LOCAL(flush_backside_L2_cache)
305 /* Flush the L2 cache */
306 mfspr r3, SPRN_L2CSR0
307 ori r3, r3, L2CSR0_L2FL@l
313 /* check if it is complete */
314 1: mfspr r3,SPRN_L2CSR0
315 andi. r3, r3, L2CSR0_L2FL@l
319 SYM_FUNC_END(flush_backside_L2_cache)
321 _GLOBAL(cpu_down_flush_e500v2)
327 _GLOBAL(cpu_down_flush_e500mc)
328 _GLOBAL(cpu_down_flush_e5500)
331 bl flush_backside_L2_cache
335 /* L1 Data Cache of e6500 contains no modified data, no flush is required */
336 _GLOBAL(cpu_down_flush_e6500)