]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
0ae76531 DF |
2 | /* |
3 | * (C) Copyright 2013 | |
4 | * David Feng <[email protected]> | |
5 | * | |
5e2ec773 AG |
6 | * (C) Copyright 2016 |
7 | * Alexander Graf <[email protected]> | |
0ae76531 DF |
8 | */ |
9 | ||
10 | #include <common.h> | |
9edefc27 | 11 | #include <cpu_func.h> |
db41d65a | 12 | #include <hang.h> |
f7ae49fc | 13 | #include <log.h> |
90526e9f | 14 | #include <asm/cache.h> |
401d1c4f | 15 | #include <asm/global_data.h> |
0ae76531 DF |
16 | #include <asm/system.h> |
17 | #include <asm/armv8/mmu.h> | |
18 | ||
19 | DECLARE_GLOBAL_DATA_PTR; | |
20 | ||
10015025 | 21 | #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) |
94f7ff36 | 22 | |
5e2ec773 AG |
23 | /* |
24 | * With 4k page granule, a virtual address is split into 4 lookup parts | |
25 | * spanning 9 bits each: | |
26 | * | |
27 | * _______________________________________________ | |
28 | * | | | | | | | | |
29 | * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off | | |
30 | * |_______|_______|_______|_______|_______|_______| | |
31 | * 63-48 47-39 38-30 29-21 20-12 11-00 | |
32 | * | |
33 | * mask page size | |
34 | * | |
35 | * Lv0: FF8000000000 -- | |
36 | * Lv1: 7FC0000000 1G | |
37 | * Lv2: 3FE00000 2M | |
38 | * Lv3: 1FF000 4K | |
39 | * off: FFF | |
40 | */ | |
94f7ff36 | 41 | |
252cdb46 | 42 | u64 get_tcr(int el, u64 *pips, u64 *pva_bits) |
0691484a AG |
43 | { |
44 | u64 max_addr = 0; | |
45 | u64 ips, va_bits; | |
46 | u64 tcr; | |
47 | int i; | |
48 | ||
49 | /* Find the largest address we need to support */ | |
d473f0c6 | 50 | for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) |
cd4b0c5f | 51 | max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size); |
0691484a AG |
52 | |
53 | /* Calculate the maximum physical (and thus virtual) address */ | |
54 | if (max_addr > (1ULL << 44)) { | |
55 | ips = 5; | |
56 | va_bits = 48; | |
57 | } else if (max_addr > (1ULL << 42)) { | |
58 | ips = 4; | |
59 | va_bits = 44; | |
60 | } else if (max_addr > (1ULL << 40)) { | |
61 | ips = 3; | |
62 | va_bits = 42; | |
63 | } else if (max_addr > (1ULL << 36)) { | |
64 | ips = 2; | |
65 | va_bits = 40; | |
66 | } else if (max_addr > (1ULL << 32)) { | |
67 | ips = 1; | |
68 | va_bits = 36; | |
69 | } else { | |
70 | ips = 0; | |
71 | va_bits = 32; | |
72 | } | |
73 | ||
74 | if (el == 1) { | |
9bb367a5 | 75 | tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE; |
0691484a AG |
76 | } else if (el == 2) { |
77 | tcr = TCR_EL2_RSVD | (ips << 16); | |
78 | } else { | |
79 | tcr = TCR_EL3_RSVD | (ips << 16); | |
80 | } | |
81 | ||
82 | /* PTWs cacheable, inner/outer WBWA and inner shareable */ | |
5e2ec773 AG |
83 | tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA; |
84 | tcr |= TCR_T0SZ(va_bits); | |
0691484a AG |
85 | |
86 | if (pips) | |
87 | *pips = ips; | |
88 | if (pva_bits) | |
89 | *pva_bits = va_bits; | |
90 | ||
91 | return tcr; | |
92 | } | |
93 | ||
5e2ec773 AG |
94 | #define MAX_PTE_ENTRIES 512 |
95 | ||
96 | static int pte_type(u64 *pte) | |
97 | { | |
98 | return *pte & PTE_TYPE_MASK; | |
99 | } | |
100 | ||
101 | /* Returns the LSB number for a PTE on level <level> */ | |
102 | static int level2shift(int level) | |
103 | { | |
104 | /* Page is 12 bits wide, every level translates 9 bits */ | |
105 | return (12 + 9 * (3 - level)); | |
106 | } | |
107 | ||
108 | static u64 *find_pte(u64 addr, int level) | |
94f7ff36 | 109 | { |
5e2ec773 AG |
110 | int start_level = 0; |
111 | u64 *pte; | |
112 | u64 idx; | |
113 | u64 va_bits; | |
114 | int i; | |
115 | ||
116 | debug("addr=%llx level=%d\n", addr, level); | |
117 | ||
118 | get_tcr(0, NULL, &va_bits); | |
119 | if (va_bits < 39) | |
120 | start_level = 1; | |
121 | ||
122 | if (level < start_level) | |
123 | return NULL; | |
124 | ||
125 | /* Walk through all page table levels to find our PTE */ | |
126 | pte = (u64*)gd->arch.tlb_addr; | |
127 | for (i = start_level; i < 4; i++) { | |
128 | idx = (addr >> level2shift(i)) & 0x1FF; | |
129 | pte += idx; | |
130 | debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte); | |
131 | ||
132 | /* Found it */ | |
133 | if (i == level) | |
134 | return pte; | |
135 | /* PTE is no table (either invalid or block), can't traverse */ | |
136 | if (pte_type(pte) != PTE_TYPE_TABLE) | |
137 | return NULL; | |
138 | /* Off to the next level */ | |
139 | pte = (u64*)(*pte & 0x0000fffffffff000ULL); | |
140 | } | |
141 | ||
142 | /* Should never reach here */ | |
143 | return NULL; | |
144 | } | |
145 | ||
146 | /* Returns and creates a new full table (512 entries) */ | |
147 | static u64 *create_table(void) | |
148 | { | |
149 | u64 *new_table = (u64*)gd->arch.tlb_fillptr; | |
150 | u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64); | |
151 | ||
152 | /* Allocate MAX_PTE_ENTRIES pte entries */ | |
153 | gd->arch.tlb_fillptr += pt_len; | |
154 | ||
155 | if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size) | |
156 | panic("Insufficient RAM for page table: 0x%lx > 0x%lx. " | |
157 | "Please increase the size in get_page_table_size()", | |
158 | gd->arch.tlb_fillptr - gd->arch.tlb_addr, | |
159 | gd->arch.tlb_size); | |
160 | ||
161 | /* Mark all entries as invalid */ | |
162 | memset(new_table, 0, pt_len); | |
163 | ||
164 | return new_table; | |
165 | } | |
166 | ||
167 | static void set_pte_table(u64 *pte, u64 *table) | |
168 | { | |
169 | /* Point *pte to the new table */ | |
170 | debug("Setting %p to addr=%p\n", pte, table); | |
171 | *pte = PTE_TYPE_TABLE | (ulong)table; | |
172 | } | |
173 | ||
f733d466 YS |
174 | /* Splits a block PTE into table with subpages spanning the old block */ |
175 | static void split_block(u64 *pte, int level) | |
176 | { | |
177 | u64 old_pte = *pte; | |
178 | u64 *new_table; | |
179 | u64 i = 0; | |
180 | /* level describes the parent level, we need the child ones */ | |
181 | int levelshift = level2shift(level + 1); | |
182 | ||
183 | if (pte_type(pte) != PTE_TYPE_BLOCK) | |
184 | panic("PTE %p (%llx) is not a block. Some driver code wants to " | |
185 | "modify dcache settings for an range not covered in " | |
186 | "mem_map.", pte, old_pte); | |
187 | ||
188 | new_table = create_table(); | |
189 | debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table); | |
190 | ||
191 | for (i = 0; i < MAX_PTE_ENTRIES; i++) { | |
192 | new_table[i] = old_pte | (i << levelshift); | |
193 | ||
194 | /* Level 3 block PTEs have the table type */ | |
195 | if ((level + 1) == 3) | |
196 | new_table[i] |= PTE_TYPE_TABLE; | |
197 | ||
198 | debug("Setting new_table[%lld] = %llx\n", i, new_table[i]); | |
199 | } | |
200 | ||
201 | /* Set the new table into effect */ | |
202 | set_pte_table(pte, new_table); | |
203 | } | |
204 | ||
5e2ec773 AG |
205 | /* Add one mm_region map entry to the page tables */ |
206 | static void add_map(struct mm_region *map) | |
207 | { | |
208 | u64 *pte; | |
cd4b0c5f YS |
209 | u64 virt = map->virt; |
210 | u64 phys = map->phys; | |
5e2ec773 AG |
211 | u64 size = map->size; |
212 | u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF; | |
213 | u64 blocksize; | |
214 | int level; | |
215 | u64 *new_table; | |
216 | ||
217 | while (size) { | |
cd4b0c5f | 218 | pte = find_pte(virt, 0); |
5e2ec773 | 219 | if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) { |
cd4b0c5f | 220 | debug("Creating table for virt 0x%llx\n", virt); |
5e2ec773 AG |
221 | new_table = create_table(); |
222 | set_pte_table(pte, new_table); | |
223 | } | |
224 | ||
225 | for (level = 1; level < 4; level++) { | |
cd4b0c5f | 226 | pte = find_pte(virt, level); |
f733d466 YS |
227 | if (!pte) |
228 | panic("pte not found\n"); | |
cd4b0c5f | 229 | |
5e2ec773 | 230 | blocksize = 1ULL << level2shift(level); |
cd4b0c5f YS |
231 | debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n", |
232 | virt, size, blocksize); | |
233 | if (size >= blocksize && !(virt & (blocksize - 1))) { | |
5e2ec773 | 234 | /* Page fits, create block PTE */ |
cd4b0c5f YS |
235 | debug("Setting PTE %p to block virt=%llx\n", |
236 | pte, virt); | |
90351547 PF |
237 | if (level == 3) |
238 | *pte = phys | attrs | PTE_TYPE_PAGE; | |
239 | else | |
240 | *pte = phys | attrs; | |
cd4b0c5f YS |
241 | virt += blocksize; |
242 | phys += blocksize; | |
5e2ec773 AG |
243 | size -= blocksize; |
244 | break; | |
f733d466 | 245 | } else if (pte_type(pte) == PTE_TYPE_FAULT) { |
5e2ec773 | 246 | /* Page doesn't fit, create subpages */ |
cd4b0c5f YS |
247 | debug("Creating subtable for virt 0x%llx blksize=%llx\n", |
248 | virt, blocksize); | |
5e2ec773 AG |
249 | new_table = create_table(); |
250 | set_pte_table(pte, new_table); | |
f733d466 | 251 | } else if (pte_type(pte) == PTE_TYPE_BLOCK) { |
cd4b0c5f YS |
252 | debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n", |
253 | virt, blocksize); | |
f733d466 | 254 | split_block(pte, level); |
5e2ec773 AG |
255 | } |
256 | } | |
257 | } | |
258 | } | |
259 | ||
5e2ec773 AG |
260 | enum pte_type { |
261 | PTE_INVAL, | |
262 | PTE_BLOCK, | |
263 | PTE_LEVEL, | |
264 | }; | |
265 | ||
266 | /* | |
267 | * This is a recursively called function to count the number of | |
268 | * page tables we need to cover a particular PTE range. If you | |
269 | * call this with level = -1 you basically get the full 48 bit | |
270 | * coverage. | |
271 | */ | |
272 | static int count_required_pts(u64 addr, int level, u64 maxaddr) | |
273 | { | |
274 | int levelshift = level2shift(level); | |
275 | u64 levelsize = 1ULL << levelshift; | |
276 | u64 levelmask = levelsize - 1; | |
277 | u64 levelend = addr + levelsize; | |
278 | int r = 0; | |
279 | int i; | |
280 | enum pte_type pte_type = PTE_INVAL; | |
281 | ||
d473f0c6 | 282 | for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) { |
5e2ec773 | 283 | struct mm_region *map = &mem_map[i]; |
cd4b0c5f | 284 | u64 start = map->virt; |
5e2ec773 AG |
285 | u64 end = start + map->size; |
286 | ||
287 | /* Check if the PTE would overlap with the map */ | |
288 | if (max(addr, start) <= min(levelend, end)) { | |
289 | start = max(addr, start); | |
290 | end = min(levelend, end); | |
291 | ||
292 | /* We need a sub-pt for this level */ | |
293 | if ((start & levelmask) || (end & levelmask)) { | |
294 | pte_type = PTE_LEVEL; | |
295 | break; | |
94f7ff36 ST |
296 | } |
297 | ||
5e2ec773 AG |
298 | /* Lv0 can not do block PTEs, so do levels here too */ |
299 | if (level <= 0) { | |
300 | pte_type = PTE_LEVEL; | |
301 | break; | |
302 | } | |
303 | ||
304 | /* PTE is active, but fits into a block */ | |
305 | pte_type = PTE_BLOCK; | |
94f7ff36 ST |
306 | } |
307 | } | |
5e2ec773 AG |
308 | |
309 | /* | |
310 | * Block PTEs at this level are already covered by the parent page | |
311 | * table, so we only need to count sub page tables. | |
312 | */ | |
313 | if (pte_type == PTE_LEVEL) { | |
314 | int sublevel = level + 1; | |
315 | u64 sublevelsize = 1ULL << level2shift(sublevel); | |
316 | ||
317 | /* Account for the new sub page table ... */ | |
318 | r = 1; | |
319 | ||
320 | /* ... and for all child page tables that one might have */ | |
321 | for (i = 0; i < MAX_PTE_ENTRIES; i++) { | |
322 | r += count_required_pts(addr, sublevel, maxaddr); | |
323 | addr += sublevelsize; | |
324 | ||
325 | if (addr >= maxaddr) { | |
326 | /* | |
327 | * We reached the end of address space, no need | |
328 | * to look any further. | |
329 | */ | |
330 | break; | |
331 | } | |
332 | } | |
333 | } | |
334 | ||
335 | return r; | |
336 | } | |
337 | ||
338 | /* Returns the estimated required size of all page tables */ | |
c05016ab | 339 | __weak u64 get_page_table_size(void) |
5e2ec773 AG |
340 | { |
341 | u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64); | |
342 | u64 size = 0; | |
343 | u64 va_bits; | |
344 | int start_level = 0; | |
345 | ||
346 | get_tcr(0, NULL, &va_bits); | |
347 | if (va_bits < 39) | |
348 | start_level = 1; | |
349 | ||
350 | /* Account for all page tables we would need to cover our memory map */ | |
351 | size = one_pt * count_required_pts(0, start_level - 1, 1ULL << va_bits); | |
352 | ||
353 | /* | |
354 | * We need to duplicate our page table once to have an emergency pt to | |
355 | * resort to when splitting page tables later on | |
356 | */ | |
357 | size *= 2; | |
358 | ||
359 | /* | |
360 | * We may need to split page tables later on if dcache settings change, | |
361 | * so reserve up to 4 (random pick) page tables for that. | |
362 | */ | |
363 | size += one_pt * 4; | |
364 | ||
365 | return size; | |
366 | } | |
367 | ||
252cdb46 | 368 | void setup_pgtables(void) |
5e2ec773 AG |
369 | { |
370 | int i; | |
371 | ||
252cdb46 YS |
372 | if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr) |
373 | panic("Page table pointer not setup."); | |
374 | ||
5e2ec773 AG |
375 | /* |
376 | * Allocate the first level we're on with invalidate entries. | |
377 | * If the starting level is 0 (va_bits >= 39), then this is our | |
378 | * Lv0 page table, otherwise it's the entry Lv1 page table. | |
379 | */ | |
380 | create_table(); | |
381 | ||
382 | /* Now add all MMU table entries one after another to the table */ | |
d473f0c6 | 383 | for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) |
5e2ec773 | 384 | add_map(&mem_map[i]); |
5e2ec773 AG |
385 | } |
386 | ||
387 | static void setup_all_pgtables(void) | |
388 | { | |
389 | u64 tlb_addr = gd->arch.tlb_addr; | |
0e170947 | 390 | u64 tlb_size = gd->arch.tlb_size; |
5e2ec773 AG |
391 | |
392 | /* Reset the fill ptr */ | |
393 | gd->arch.tlb_fillptr = tlb_addr; | |
394 | ||
395 | /* Create normal system page tables */ | |
396 | setup_pgtables(); | |
397 | ||
398 | /* Create emergency page tables */ | |
0e170947 AG |
399 | gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr - |
400 | (uintptr_t)gd->arch.tlb_addr; | |
5e2ec773 AG |
401 | gd->arch.tlb_addr = gd->arch.tlb_fillptr; |
402 | setup_pgtables(); | |
403 | gd->arch.tlb_emerg = gd->arch.tlb_addr; | |
404 | gd->arch.tlb_addr = tlb_addr; | |
0e170947 | 405 | gd->arch.tlb_size = tlb_size; |
94f7ff36 ST |
406 | } |
407 | ||
0ae76531 | 408 | /* to activate the MMU we need to set up virtual memory */ |
3c6af3ba | 409 | __weak void mmu_setup(void) |
0ae76531 | 410 | { |
8b19dff5 | 411 | int el; |
0ae76531 | 412 | |
5e2ec773 AG |
413 | /* Set up page tables only once */ |
414 | if (!gd->arch.tlb_fillptr) | |
415 | setup_all_pgtables(); | |
0691484a AG |
416 | |
417 | el = current_el(); | |
418 | set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL), | |
419 | MEMORY_ATTRIBUTES); | |
0691484a | 420 | |
0ae76531 DF |
421 | /* enable the mmu */ |
422 | set_sctlr(get_sctlr() | CR_M); | |
423 | } | |
424 | ||
425 | /* | |
426 | * Performs a invalidation of the entire data cache at all levels | |
427 | */ | |
428 | void invalidate_dcache_all(void) | |
429 | { | |
1e6ad55c | 430 | __asm_invalidate_dcache_all(); |
1ab557a0 | 431 | __asm_invalidate_l3_dcache(); |
0ae76531 DF |
432 | } |
433 | ||
434 | /* | |
dcd468b8 YS |
435 | * Performs a clean & invalidation of the entire data cache at all levels. |
436 | * This function needs to be inline to avoid using stack. | |
1ab557a0 | 437 | * __asm_flush_l3_dcache return status of timeout |
0ae76531 | 438 | */ |
dcd468b8 | 439 | inline void flush_dcache_all(void) |
0ae76531 | 440 | { |
dcd468b8 YS |
441 | int ret; |
442 | ||
0ae76531 | 443 | __asm_flush_dcache_all(); |
1ab557a0 | 444 | ret = __asm_flush_l3_dcache(); |
dcd468b8 YS |
445 | if (ret) |
446 | debug("flushing dcache returns 0x%x\n", ret); | |
447 | else | |
448 | debug("flushing dcache successfully.\n"); | |
0ae76531 DF |
449 | } |
450 | ||
add49671 | 451 | #ifndef CONFIG_SYS_DISABLE_DCACHE_OPS |
0ae76531 DF |
452 | /* |
453 | * Invalidates range in all levels of D-cache/unified cache | |
454 | */ | |
455 | void invalidate_dcache_range(unsigned long start, unsigned long stop) | |
456 | { | |
6775a820 | 457 | __asm_invalidate_dcache_range(start, stop); |
0ae76531 DF |
458 | } |
459 | ||
460 | /* | |
461 | * Flush range(clean & invalidate) from all levels of D-cache/unified cache | |
462 | */ | |
463 | void flush_dcache_range(unsigned long start, unsigned long stop) | |
464 | { | |
465 | __asm_flush_dcache_range(start, stop); | |
466 | } | |
add49671 VR |
467 | #else |
468 | void invalidate_dcache_range(unsigned long start, unsigned long stop) | |
469 | { | |
470 | } | |
471 | ||
472 | void flush_dcache_range(unsigned long start, unsigned long stop) | |
473 | { | |
474 | } | |
475 | #endif /* CONFIG_SYS_DISABLE_DCACHE_OPS */ | |
0ae76531 DF |
476 | |
477 | void dcache_enable(void) | |
478 | { | |
479 | /* The data cache is not active unless the mmu is enabled */ | |
480 | if (!(get_sctlr() & CR_M)) { | |
481 | invalidate_dcache_all(); | |
482 | __asm_invalidate_tlb_all(); | |
483 | mmu_setup(); | |
484 | } | |
485 | ||
486 | set_sctlr(get_sctlr() | CR_C); | |
487 | } | |
488 | ||
489 | void dcache_disable(void) | |
490 | { | |
491 | uint32_t sctlr; | |
492 | ||
493 | sctlr = get_sctlr(); | |
494 | ||
495 | /* if cache isn't enabled no need to disable */ | |
496 | if (!(sctlr & CR_C)) | |
497 | return; | |
498 | ||
499 | set_sctlr(sctlr & ~(CR_C|CR_M)); | |
500 | ||
501 | flush_dcache_all(); | |
502 | __asm_invalidate_tlb_all(); | |
503 | } | |
504 | ||
505 | int dcache_status(void) | |
506 | { | |
507 | return (get_sctlr() & CR_C) != 0; | |
508 | } | |
509 | ||
dad17fd5 SDPP |
510 | u64 *__weak arch_get_page_table(void) { |
511 | puts("No page table offset defined\n"); | |
512 | ||
513 | return NULL; | |
514 | } | |
515 | ||
5e2ec773 AG |
516 | static bool is_aligned(u64 addr, u64 size, u64 align) |
517 | { | |
518 | return !(addr & (align - 1)) && !(size & (align - 1)); | |
519 | } | |
520 | ||
7f9b9f31 YS |
521 | /* Use flag to indicate if attrs has more than d-cache attributes */ |
522 | static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level) | |
5e2ec773 AG |
523 | { |
524 | int levelshift = level2shift(level); | |
525 | u64 levelsize = 1ULL << levelshift; | |
526 | u64 *pte = find_pte(start, level); | |
527 | ||
528 | /* Can we can just modify the current level block PTE? */ | |
529 | if (is_aligned(start, size, levelsize)) { | |
7f9b9f31 YS |
530 | if (flag) { |
531 | *pte &= ~PMD_ATTRMASK; | |
532 | *pte |= attrs & PMD_ATTRMASK; | |
533 | } else { | |
534 | *pte &= ~PMD_ATTRINDX_MASK; | |
535 | *pte |= attrs & PMD_ATTRINDX_MASK; | |
536 | } | |
5e2ec773 AG |
537 | debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level); |
538 | ||
539 | return levelsize; | |
540 | } | |
541 | ||
542 | /* Unaligned or doesn't fit, maybe split block into table */ | |
543 | debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte); | |
544 | ||
545 | /* Maybe we need to split the block into a table */ | |
546 | if (pte_type(pte) == PTE_TYPE_BLOCK) | |
547 | split_block(pte, level); | |
548 | ||
549 | /* And then double-check it became a table or already is one */ | |
550 | if (pte_type(pte) != PTE_TYPE_TABLE) | |
551 | panic("PTE %p (%llx) for addr=%llx should be a table", | |
552 | pte, *pte, start); | |
553 | ||
554 | /* Roll on to the next page table level */ | |
555 | return 0; | |
556 | } | |
557 | ||
558 | void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, | |
559 | enum dcache_option option) | |
560 | { | |
b4b26192 | 561 | u64 attrs = PMD_ATTRINDX(option >> 2); |
5e2ec773 AG |
562 | u64 real_start = start; |
563 | u64 real_size = size; | |
564 | ||
565 | debug("start=%lx size=%lx\n", (ulong)start, (ulong)size); | |
566 | ||
252cdb46 YS |
567 | if (!gd->arch.tlb_emerg) |
568 | panic("Emergency page table not setup."); | |
569 | ||
5e2ec773 AG |
570 | /* |
571 | * We can not modify page tables that we're currently running on, | |
572 | * so we first need to switch to the "emergency" page tables where | |
573 | * we can safely modify our primary page tables and then switch back | |
574 | */ | |
575 | __asm_switch_ttbr(gd->arch.tlb_emerg); | |
576 | ||
577 | /* | |
578 | * Loop through the address range until we find a page granule that fits | |
579 | * our alignment constraints, then set it to the new cache attributes | |
580 | */ | |
581 | while (size > 0) { | |
582 | int level; | |
583 | u64 r; | |
584 | ||
585 | for (level = 1; level < 4; level++) { | |
7f9b9f31 YS |
586 | /* Set d-cache attributes only */ |
587 | r = set_one_region(start, size, attrs, false, level); | |
5e2ec773 AG |
588 | if (r) { |
589 | /* PTE successfully replaced */ | |
590 | size -= r; | |
591 | start += r; | |
592 | break; | |
593 | } | |
594 | } | |
595 | ||
596 | } | |
597 | ||
598 | /* We're done modifying page tables, switch back to our primary ones */ | |
599 | __asm_switch_ttbr(gd->arch.tlb_addr); | |
600 | ||
601 | /* | |
602 | * Make sure there's nothing stale in dcache for a region that might | |
603 | * have caches off now | |
604 | */ | |
605 | flush_dcache_range(real_start, real_start + real_size); | |
606 | } | |
94f7ff36 | 607 | |
7f9b9f31 YS |
608 | /* |
609 | * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits. | |
610 | * The procecess is break-before-make. The target region will be marked as | |
611 | * invalid during the process of changing. | |
612 | */ | |
613 | void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs) | |
614 | { | |
615 | int level; | |
616 | u64 r, size, start; | |
617 | ||
618 | start = addr; | |
619 | size = siz; | |
620 | /* | |
621 | * Loop through the address range until we find a page granule that fits | |
622 | * our alignment constraints, then set it to "invalid". | |
623 | */ | |
624 | while (size > 0) { | |
625 | for (level = 1; level < 4; level++) { | |
626 | /* Set PTE to fault */ | |
627 | r = set_one_region(start, size, PTE_TYPE_FAULT, true, | |
628 | level); | |
629 | if (r) { | |
630 | /* PTE successfully invalidated */ | |
631 | size -= r; | |
632 | start += r; | |
633 | break; | |
634 | } | |
635 | } | |
636 | } | |
637 | ||
638 | flush_dcache_range(gd->arch.tlb_addr, | |
639 | gd->arch.tlb_addr + gd->arch.tlb_size); | |
640 | __asm_invalidate_tlb_all(); | |
641 | ||
642 | /* | |
643 | * Loop through the address range until we find a page granule that fits | |
644 | * our alignment constraints, then set it to the new cache attributes | |
645 | */ | |
646 | start = addr; | |
647 | size = siz; | |
648 | while (size > 0) { | |
649 | for (level = 1; level < 4; level++) { | |
650 | /* Set PTE to new attributes */ | |
651 | r = set_one_region(start, size, attrs, true, level); | |
652 | if (r) { | |
653 | /* PTE successfully updated */ | |
654 | size -= r; | |
655 | start += r; | |
656 | break; | |
657 | } | |
658 | } | |
659 | } | |
660 | flush_dcache_range(gd->arch.tlb_addr, | |
661 | gd->arch.tlb_addr + gd->arch.tlb_size); | |
662 | __asm_invalidate_tlb_all(); | |
663 | } | |
664 | ||
10015025 | 665 | #else /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */ |
0ae76531 | 666 | |
19503c31 AG |
667 | /* |
668 | * For SPL builds, we may want to not have dcache enabled. Any real U-Boot | |
669 | * running however really wants to have dcache and the MMU active. Check that | |
670 | * everything is sane and give the developer a hint if it isn't. | |
671 | */ | |
672 | #ifndef CONFIG_SPL_BUILD | |
673 | #error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache. | |
674 | #endif | |
675 | ||
0ae76531 DF |
676 | void invalidate_dcache_all(void) |
677 | { | |
678 | } | |
679 | ||
680 | void flush_dcache_all(void) | |
681 | { | |
682 | } | |
683 | ||
0ae76531 DF |
684 | void dcache_enable(void) |
685 | { | |
686 | } | |
687 | ||
688 | void dcache_disable(void) | |
689 | { | |
690 | } | |
691 | ||
692 | int dcache_status(void) | |
693 | { | |
694 | return 0; | |
695 | } | |
696 | ||
dad17fd5 SDPP |
697 | void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, |
698 | enum dcache_option option) | |
699 | { | |
700 | } | |
701 | ||
10015025 | 702 | #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */ |
0ae76531 | 703 | |
10015025 | 704 | #if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) |
0ae76531 DF |
705 | |
706 | void icache_enable(void) | |
707 | { | |
1ab557a0 | 708 | invalidate_icache_all(); |
0ae76531 DF |
709 | set_sctlr(get_sctlr() | CR_I); |
710 | } | |
711 | ||
712 | void icache_disable(void) | |
713 | { | |
714 | set_sctlr(get_sctlr() & ~CR_I); | |
715 | } | |
716 | ||
717 | int icache_status(void) | |
718 | { | |
719 | return (get_sctlr() & CR_I) != 0; | |
720 | } | |
721 | ||
722 | void invalidate_icache_all(void) | |
723 | { | |
724 | __asm_invalidate_icache_all(); | |
1ab557a0 | 725 | __asm_invalidate_l3_icache(); |
0ae76531 DF |
726 | } |
727 | ||
10015025 | 728 | #else /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */ |
0ae76531 DF |
729 | |
730 | void icache_enable(void) | |
731 | { | |
732 | } | |
733 | ||
734 | void icache_disable(void) | |
735 | { | |
736 | } | |
737 | ||
738 | int icache_status(void) | |
739 | { | |
740 | return 0; | |
741 | } | |
742 | ||
743 | void invalidate_icache_all(void) | |
744 | { | |
745 | } | |
746 | ||
10015025 | 747 | #endif /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */ |
0ae76531 DF |
748 | |
749 | /* | |
750 | * Enable dCache & iCache, whether cache is actually enabled | |
751 | * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF | |
752 | */ | |
2f78eae5 | 753 | void __weak enable_caches(void) |
0ae76531 DF |
754 | { |
755 | icache_enable(); | |
756 | dcache_enable(); | |
757 | } |