]>
Commit | Line | Data |
---|---|---|
2f78eae5 YS |
1 | /* |
2 | * Copyright 2014 Freescale Semiconductor, Inc. | |
3 | * | |
4 | * SPDX-License-Identifier: GPL-2.0+ | |
5 | */ | |
6 | ||
7 | #include <common.h> | |
8 | #include <asm/io.h> | |
9 | #include <asm/system.h> | |
10 | #include <asm/armv8/mmu.h> | |
11 | #include <asm/io.h> | |
12 | #include <asm/arch-fsl-lsch3/immap_lsch3.h> | |
7b3bd9a7 | 13 | #include <fsl-mc/fsl_mc.h> |
2f78eae5 | 14 | #include "cpu.h" |
40f8dec5 | 15 | #include "mp.h" |
2f78eae5 YS |
16 | #include "speed.h" |
17 | ||
18 | DECLARE_GLOBAL_DATA_PTR; | |
19 | ||
20 | #ifndef CONFIG_SYS_DCACHE_OFF | |
21 | /* | |
22 | * To start MMU before DDR is available, we create MMU table in SRAM. | |
23 | * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three | |
24 | * levels of translation tables here to cover 40-bit address space. | |
25 | * We use 4KB granule size, with 40 bits physical address, T0SZ=24 | |
26 | * Level 0 IA[39], table address @0 | |
27 | * Level 1 IA[31:30], table address @01000, 0x2000 | |
28 | * Level 2 IA[29:21], table address @0x3000 | |
29 | */ | |
30 | ||
31 | #define SECTION_SHIFT_L0 39UL | |
32 | #define SECTION_SHIFT_L1 30UL | |
33 | #define SECTION_SHIFT_L2 21UL | |
34 | #define BLOCK_SIZE_L0 0x8000000000UL | |
35 | #define BLOCK_SIZE_L1 (1 << SECTION_SHIFT_L1) | |
36 | #define BLOCK_SIZE_L2 (1 << SECTION_SHIFT_L2) | |
37 | #define CONFIG_SYS_IFC_BASE 0x30000000 | |
38 | #define CONFIG_SYS_IFC_SIZE 0x10000000 | |
39 | #define CONFIG_SYS_IFC_BASE2 0x500000000 | |
40 | #define CONFIG_SYS_IFC_SIZE2 0x100000000 | |
41 | #define TCR_EL2_PS_40BIT (2 << 16) | |
42 | #define LSCH3_VA_BITS (40) | |
43 | #define LSCH3_TCR (TCR_TG0_4K | \ | |
44 | TCR_EL2_PS_40BIT | \ | |
45 | TCR_SHARED_NON | \ | |
46 | TCR_ORGN_NC | \ | |
47 | TCR_IRGN_NC | \ | |
48 | TCR_T0SZ(LSCH3_VA_BITS)) | |
49 | ||
50 | /* | |
51 | * Final MMU | |
52 | * Let's start from the same layout as early MMU and modify as needed. | |
53 | * IFC regions will be cache-inhibit. | |
54 | */ | |
55 | #define FINAL_QBMAN_CACHED_MEM 0x818000000UL | |
56 | #define FINAL_QBMAN_CACHED_SIZE 0x4000000 | |
57 | ||
58 | ||
59 | static inline void early_mmu_setup(void) | |
60 | { | |
61 | int el; | |
62 | u64 i; | |
63 | u64 section_l1t0, section_l1t1, section_l2; | |
64 | u64 *level0_table = (u64 *)CONFIG_SYS_FSL_OCRAM_BASE; | |
65 | u64 *level1_table_0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x1000); | |
66 | u64 *level1_table_1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x2000); | |
67 | u64 *level2_table = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x3000); | |
68 | ||
69 | ||
70 | level0_table[0] = | |
71 | (u64)level1_table_0 | PMD_TYPE_TABLE; | |
72 | level0_table[1] = | |
73 | (u64)level1_table_1 | PMD_TYPE_TABLE; | |
74 | ||
75 | /* | |
76 | * set level 1 table 0 to cache_inhibit, covering 0 to 512GB | |
77 | * set level 1 table 1 to cache enabled, covering 512GB to 1TB | |
78 | * set level 2 table to cache-inhibit, covering 0 to 1GB | |
79 | */ | |
80 | section_l1t0 = 0; | |
81 | section_l1t1 = BLOCK_SIZE_L0; | |
82 | section_l2 = 0; | |
83 | for (i = 0; i < 512; i++) { | |
84 | set_pgtable_section(level1_table_0, i, section_l1t0, | |
85 | MT_DEVICE_NGNRNE); | |
86 | set_pgtable_section(level1_table_1, i, section_l1t1, | |
87 | MT_NORMAL); | |
88 | set_pgtable_section(level2_table, i, section_l2, | |
89 | MT_DEVICE_NGNRNE); | |
90 | section_l1t0 += BLOCK_SIZE_L1; | |
91 | section_l1t1 += BLOCK_SIZE_L1; | |
92 | section_l2 += BLOCK_SIZE_L2; | |
93 | } | |
94 | ||
95 | level1_table_0[0] = | |
96 | (u64)level2_table | PMD_TYPE_TABLE; | |
97 | level1_table_0[1] = | |
98 | 0x40000000 | PMD_SECT_AF | PMD_TYPE_SECT | | |
99 | PMD_ATTRINDX(MT_DEVICE_NGNRNE); | |
100 | level1_table_0[2] = | |
101 | 0x80000000 | PMD_SECT_AF | PMD_TYPE_SECT | | |
102 | PMD_ATTRINDX(MT_NORMAL); | |
103 | level1_table_0[3] = | |
104 | 0xc0000000 | PMD_SECT_AF | PMD_TYPE_SECT | | |
105 | PMD_ATTRINDX(MT_NORMAL); | |
106 | ||
107 | /* Rewrite table to enable cache */ | |
108 | set_pgtable_section(level2_table, | |
109 | CONFIG_SYS_FSL_OCRAM_BASE >> SECTION_SHIFT_L2, | |
110 | CONFIG_SYS_FSL_OCRAM_BASE, | |
111 | MT_NORMAL); | |
112 | for (i = CONFIG_SYS_IFC_BASE >> SECTION_SHIFT_L2; | |
113 | i < (CONFIG_SYS_IFC_BASE + CONFIG_SYS_IFC_SIZE) | |
114 | >> SECTION_SHIFT_L2; i++) { | |
115 | section_l2 = i << SECTION_SHIFT_L2; | |
116 | set_pgtable_section(level2_table, i, | |
117 | section_l2, MT_NORMAL); | |
118 | } | |
119 | ||
120 | el = current_el(); | |
121 | set_ttbr_tcr_mair(el, (u64)level0_table, LSCH3_TCR, MEMORY_ATTRIBUTES); | |
122 | set_sctlr(get_sctlr() | CR_M); | |
123 | } | |
124 | ||
125 | /* | |
126 | * This final tale looks similar to early table, but different in detail. | |
127 | * These tables are in regular memory. Cache on IFC is disabled. One sub table | |
128 | * is added to enable cache for QBMan. | |
129 | */ | |
130 | static inline void final_mmu_setup(void) | |
131 | { | |
132 | int el; | |
133 | u64 i, tbl_base, tbl_limit, section_base; | |
134 | u64 section_l1t0, section_l1t1, section_l2; | |
135 | u64 *level0_table = (u64 *)gd->arch.tlb_addr; | |
136 | u64 *level1_table_0 = (u64 *)(gd->arch.tlb_addr + 0x1000); | |
137 | u64 *level1_table_1 = (u64 *)(gd->arch.tlb_addr + 0x2000); | |
138 | u64 *level2_table_0 = (u64 *)(gd->arch.tlb_addr + 0x3000); | |
139 | u64 *level2_table_1 = (u64 *)(gd->arch.tlb_addr + 0x4000); | |
140 | ||
141 | ||
142 | level0_table[0] = | |
143 | (u64)level1_table_0 | PMD_TYPE_TABLE; | |
144 | level0_table[1] = | |
145 | (u64)level1_table_1 | PMD_TYPE_TABLE; | |
146 | ||
147 | /* | |
148 | * set level 1 table 0 to cache_inhibit, covering 0 to 512GB | |
149 | * set level 1 table 1 to cache enabled, covering 512GB to 1TB | |
150 | * set level 2 table 0 to cache-inhibit, covering 0 to 1GB | |
151 | */ | |
152 | section_l1t0 = 0; | |
6c747f4a | 153 | section_l1t1 = BLOCK_SIZE_L0 | PMD_SECT_OUTER_SHARE; |
2f78eae5 YS |
154 | section_l2 = 0; |
155 | for (i = 0; i < 512; i++) { | |
156 | set_pgtable_section(level1_table_0, i, section_l1t0, | |
157 | MT_DEVICE_NGNRNE); | |
158 | set_pgtable_section(level1_table_1, i, section_l1t1, | |
159 | MT_NORMAL); | |
160 | set_pgtable_section(level2_table_0, i, section_l2, | |
161 | MT_DEVICE_NGNRNE); | |
162 | section_l1t0 += BLOCK_SIZE_L1; | |
163 | section_l1t1 += BLOCK_SIZE_L1; | |
164 | section_l2 += BLOCK_SIZE_L2; | |
165 | } | |
166 | ||
167 | level1_table_0[0] = | |
168 | (u64)level2_table_0 | PMD_TYPE_TABLE; | |
169 | level1_table_0[2] = | |
170 | 0x80000000 | PMD_SECT_AF | PMD_TYPE_SECT | | |
6c747f4a | 171 | PMD_SECT_OUTER_SHARE | PMD_ATTRINDX(MT_NORMAL); |
2f78eae5 YS |
172 | level1_table_0[3] = |
173 | 0xc0000000 | PMD_SECT_AF | PMD_TYPE_SECT | | |
6c747f4a | 174 | PMD_SECT_OUTER_SHARE | PMD_ATTRINDX(MT_NORMAL); |
2f78eae5 YS |
175 | |
176 | /* Rewrite table to enable cache */ | |
177 | set_pgtable_section(level2_table_0, | |
178 | CONFIG_SYS_FSL_OCRAM_BASE >> SECTION_SHIFT_L2, | |
179 | CONFIG_SYS_FSL_OCRAM_BASE, | |
180 | MT_NORMAL); | |
181 | ||
182 | /* | |
183 | * Fill in other part of tables if cache is needed | |
184 | * If finer granularity than 1GB is needed, sub table | |
185 | * should be created. | |
186 | */ | |
187 | section_base = FINAL_QBMAN_CACHED_MEM & ~(BLOCK_SIZE_L1 - 1); | |
188 | i = section_base >> SECTION_SHIFT_L1; | |
189 | level1_table_0[i] = (u64)level2_table_1 | PMD_TYPE_TABLE; | |
190 | section_l2 = section_base; | |
191 | for (i = 0; i < 512; i++) { | |
192 | set_pgtable_section(level2_table_1, i, section_l2, | |
193 | MT_DEVICE_NGNRNE); | |
194 | section_l2 += BLOCK_SIZE_L2; | |
195 | } | |
196 | tbl_base = FINAL_QBMAN_CACHED_MEM & (BLOCK_SIZE_L1 - 1); | |
197 | tbl_limit = (FINAL_QBMAN_CACHED_MEM + FINAL_QBMAN_CACHED_SIZE) & | |
198 | (BLOCK_SIZE_L1 - 1); | |
199 | for (i = tbl_base >> SECTION_SHIFT_L2; | |
200 | i < tbl_limit >> SECTION_SHIFT_L2; i++) { | |
201 | section_l2 = section_base + (i << SECTION_SHIFT_L2); | |
202 | set_pgtable_section(level2_table_1, i, | |
203 | section_l2, MT_NORMAL); | |
204 | } | |
205 | ||
206 | /* flush new MMU table */ | |
207 | flush_dcache_range(gd->arch.tlb_addr, | |
208 | gd->arch.tlb_addr + gd->arch.tlb_size); | |
209 | ||
210 | /* point TTBR to the new table */ | |
211 | el = current_el(); | |
212 | asm volatile("dsb sy"); | |
213 | if (el == 1) { | |
214 | asm volatile("msr ttbr0_el1, %0" | |
215 | : : "r" ((u64)level0_table) : "memory"); | |
216 | } else if (el == 2) { | |
217 | asm volatile("msr ttbr0_el2, %0" | |
218 | : : "r" ((u64)level0_table) : "memory"); | |
219 | } else if (el == 3) { | |
220 | asm volatile("msr ttbr0_el3, %0" | |
221 | : : "r" ((u64)level0_table) : "memory"); | |
222 | } else { | |
223 | hang(); | |
224 | } | |
225 | asm volatile("isb"); | |
226 | ||
227 | /* | |
228 | * MMU is already enabled, just need to invalidate TLB to load the | |
229 | * new table. The new table is compatible with the current table, if | |
230 | * MMU somehow walks through the new table before invalidation TLB, | |
231 | * it still works. So we don't need to turn off MMU here. | |
232 | */ | |
233 | } | |
234 | ||
235 | int arch_cpu_init(void) | |
236 | { | |
237 | icache_enable(); | |
238 | __asm_invalidate_dcache_all(); | |
239 | __asm_invalidate_tlb_all(); | |
240 | early_mmu_setup(); | |
241 | set_sctlr(get_sctlr() | CR_C); | |
242 | return 0; | |
243 | } | |
244 | ||
2f78eae5 YS |
245 | /* |
246 | * This function is called from lib/board.c. | |
247 | * It recreates MMU table in main memory. MMU and d-cache are enabled earlier. | |
248 | * There is no need to disable d-cache for this operation. | |
249 | */ | |
250 | void enable_caches(void) | |
251 | { | |
252 | final_mmu_setup(); | |
253 | __asm_invalidate_tlb_all(); | |
254 | } | |
255 | #endif | |
256 | ||
257 | static inline u32 initiator_type(u32 cluster, int init_id) | |
258 | { | |
259 | struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); | |
260 | u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK; | |
261 | u32 type = in_le32(&gur->tp_ityp[idx]); | |
262 | ||
263 | if (type & TP_ITYP_AV) | |
264 | return type; | |
265 | ||
266 | return 0; | |
267 | } | |
268 | ||
269 | u32 cpu_mask(void) | |
270 | { | |
271 | struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); | |
272 | int i = 0, count = 0; | |
273 | u32 cluster, type, mask = 0; | |
274 | ||
275 | do { | |
276 | int j; | |
277 | cluster = in_le32(&gur->tp_cluster[i].lower); | |
278 | for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { | |
279 | type = initiator_type(cluster, j); | |
280 | if (type) { | |
281 | if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM) | |
282 | mask |= 1 << count; | |
283 | count++; | |
284 | } | |
285 | } | |
286 | i++; | |
287 | } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC); | |
288 | ||
289 | return mask; | |
290 | } | |
291 | ||
292 | /* | |
293 | * Return the number of cores on this SOC. | |
294 | */ | |
295 | int cpu_numcores(void) | |
296 | { | |
297 | return hweight32(cpu_mask()); | |
298 | } | |
299 | ||
300 | int fsl_qoriq_core_to_cluster(unsigned int core) | |
301 | { | |
302 | struct ccsr_gur __iomem *gur = | |
303 | (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); | |
304 | int i = 0, count = 0; | |
305 | u32 cluster; | |
306 | ||
307 | do { | |
308 | int j; | |
309 | cluster = in_le32(&gur->tp_cluster[i].lower); | |
310 | for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { | |
311 | if (initiator_type(cluster, j)) { | |
312 | if (count == core) | |
313 | return i; | |
314 | count++; | |
315 | } | |
316 | } | |
317 | i++; | |
318 | } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC); | |
319 | ||
320 | return -1; /* cannot identify the cluster */ | |
321 | } | |
322 | ||
323 | u32 fsl_qoriq_core_to_type(unsigned int core) | |
324 | { | |
325 | struct ccsr_gur __iomem *gur = | |
326 | (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); | |
327 | int i = 0, count = 0; | |
328 | u32 cluster, type; | |
329 | ||
330 | do { | |
331 | int j; | |
332 | cluster = in_le32(&gur->tp_cluster[i].lower); | |
333 | for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { | |
334 | type = initiator_type(cluster, j); | |
335 | if (type) { | |
336 | if (count == core) | |
337 | return type; | |
338 | count++; | |
339 | } | |
340 | } | |
341 | i++; | |
342 | } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC); | |
343 | ||
344 | return -1; /* cannot identify the cluster */ | |
345 | } | |
346 | ||
347 | #ifdef CONFIG_DISPLAY_CPUINFO | |
348 | int print_cpuinfo(void) | |
349 | { | |
350 | struct sys_info sysinfo; | |
351 | char buf[32]; | |
352 | unsigned int i, core; | |
353 | u32 type; | |
354 | ||
355 | get_sys_info(&sysinfo); | |
356 | puts("Clock Configuration:"); | |
357 | for_each_cpu(i, core, cpu_numcores(), cpu_mask()) { | |
358 | if (!(i % 3)) | |
359 | puts("\n "); | |
360 | type = TP_ITYP_VER(fsl_qoriq_core_to_type(core)); | |
361 | printf("CPU%d(%s):%-4s MHz ", core, | |
362 | type == TY_ITYP_VER_A7 ? "A7 " : | |
363 | (type == TY_ITYP_VER_A53 ? "A53" : | |
364 | (type == TY_ITYP_VER_A57 ? "A57" : " ")), | |
365 | strmhz(buf, sysinfo.freq_processor[core])); | |
366 | } | |
367 | printf("\n Bus: %-4s MHz ", | |
368 | strmhz(buf, sysinfo.freq_systembus)); | |
369 | printf("DDR: %-4s MHz", strmhz(buf, sysinfo.freq_ddrbus)); | |
b87e6f88 | 370 | printf(" DP-DDR: %-4s MHz", strmhz(buf, sysinfo.freq_ddrbus2)); |
2f78eae5 YS |
371 | puts("\n"); |
372 | ||
373 | return 0; | |
374 | } | |
375 | #endif | |
b940ca64 GR |
376 | |
377 | int cpu_eth_init(bd_t *bis) | |
378 | { | |
379 | int error = 0; | |
380 | ||
381 | #ifdef CONFIG_FSL_MC_ENET | |
382 | error = mc_init(bis); | |
383 | #endif | |
384 | return error; | |
385 | } | |
40f8dec5 YS |
386 | |
387 | ||
388 | int arch_early_init_r(void) | |
389 | { | |
390 | int rv; | |
391 | rv = fsl_lsch3_wake_seconday_cores(); | |
392 | ||
393 | if (rv) | |
394 | printf("Did not wake secondary cores\n"); | |
395 | ||
396 | return 0; | |
397 | } |