]>
Commit | Line | Data |
---|---|---|
0ae76531 DF |
1 | /* |
2 | * (C) Copyright 2013 | |
3 | * David Feng <[email protected]> | |
4 | * | |
5 | * SPDX-License-Identifier: GPL-2.0+ | |
6 | */ | |
7 | ||
8 | #include <common.h> | |
9 | #include <asm/system.h> | |
10 | #include <asm/armv8/mmu.h> | |
11 | ||
12 | DECLARE_GLOBAL_DATA_PTR; | |
13 | ||
14 | #ifndef CONFIG_SYS_DCACHE_OFF | |
99799220 AW |
15 | inline void set_pgtable_section(u64 *page_table, u64 index, u64 section, |
16 | u64 memory_type, u64 share) | |
0ae76531 | 17 | { |
0ae76531 DF |
18 | u64 value; |
19 | ||
22932ffc | 20 | value = section | PMD_TYPE_SECT | PMD_SECT_AF; |
0ae76531 | 21 | value |= PMD_ATTRINDX(memory_type); |
99799220 AW |
22 | value |= share; |
23 | page_table[index] = value; | |
24 | } | |
25 | ||
26 | inline void set_pgtable_table(u64 *page_table, u64 index, u64 *table_addr) | |
27 | { | |
28 | u64 value; | |
29 | ||
30 | value = (u64)table_addr | PMD_TYPE_TABLE; | |
22932ffc | 31 | page_table[index] = value; |
0ae76531 DF |
32 | } |
33 | ||
34 | /* to activate the MMU we need to set up virtual memory */ | |
35 | static void mmu_setup(void) | |
36 | { | |
0ae76531 | 37 | bd_t *bd = gd->bd; |
8b19dff5 TR |
38 | u64 *page_table = (u64 *)gd->arch.tlb_addr, i, j; |
39 | int el; | |
0ae76531 DF |
40 | |
41 | /* Setup an identity-mapping for all spaces */ | |
22932ffc YS |
42 | for (i = 0; i < (PGTABLE_SIZE >> 3); i++) { |
43 | set_pgtable_section(page_table, i, i << SECTION_SHIFT, | |
99799220 | 44 | MT_DEVICE_NGNRNE, PMD_SECT_NON_SHARE); |
22932ffc | 45 | } |
0ae76531 DF |
46 | |
47 | /* Setup an identity-mapping for all RAM space */ | |
48 | for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { | |
49 | ulong start = bd->bi_dram[i].start; | |
50 | ulong end = bd->bi_dram[i].start + bd->bi_dram[i].size; | |
51 | for (j = start >> SECTION_SHIFT; | |
52 | j < end >> SECTION_SHIFT; j++) { | |
22932ffc | 53 | set_pgtable_section(page_table, j, j << SECTION_SHIFT, |
99799220 | 54 | MT_NORMAL, PMD_SECT_NON_SHARE); |
0ae76531 DF |
55 | } |
56 | } | |
57 | ||
58 | /* load TTBR0 */ | |
59 | el = current_el(); | |
f5222cfd | 60 | if (el == 1) { |
22932ffc | 61 | set_ttbr_tcr_mair(el, gd->arch.tlb_addr, |
ad3d6e88 | 62 | TCR_EL1_RSVD | TCR_FLAGS | TCR_EL1_IPS_BITS, |
22932ffc | 63 | MEMORY_ATTRIBUTES); |
f5222cfd | 64 | } else if (el == 2) { |
22932ffc | 65 | set_ttbr_tcr_mair(el, gd->arch.tlb_addr, |
ad3d6e88 | 66 | TCR_EL2_RSVD | TCR_FLAGS | TCR_EL2_IPS_BITS, |
22932ffc | 67 | MEMORY_ATTRIBUTES); |
f5222cfd | 68 | } else { |
22932ffc | 69 | set_ttbr_tcr_mair(el, gd->arch.tlb_addr, |
ad3d6e88 | 70 | TCR_EL3_RSVD | TCR_FLAGS | TCR_EL3_IPS_BITS, |
22932ffc | 71 | MEMORY_ATTRIBUTES); |
f5222cfd | 72 | } |
0ae76531 DF |
73 | /* enable the mmu */ |
74 | set_sctlr(get_sctlr() | CR_M); | |
75 | } | |
76 | ||
77 | /* | |
78 | * Performs a invalidation of the entire data cache at all levels | |
79 | */ | |
80 | void invalidate_dcache_all(void) | |
81 | { | |
1e6ad55c | 82 | __asm_invalidate_dcache_all(); |
0ae76531 DF |
83 | } |
84 | ||
85 | /* | |
dcd468b8 YS |
86 | * Performs a clean & invalidation of the entire data cache at all levels. |
87 | * This function needs to be inline to avoid using stack. | |
88 | * __asm_flush_l3_cache return status of timeout | |
0ae76531 | 89 | */ |
dcd468b8 | 90 | inline void flush_dcache_all(void) |
0ae76531 | 91 | { |
dcd468b8 YS |
92 | int ret; |
93 | ||
0ae76531 | 94 | __asm_flush_dcache_all(); |
dcd468b8 YS |
95 | ret = __asm_flush_l3_cache(); |
96 | if (ret) | |
97 | debug("flushing dcache returns 0x%x\n", ret); | |
98 | else | |
99 | debug("flushing dcache successfully.\n"); | |
0ae76531 DF |
100 | } |
101 | ||
102 | /* | |
103 | * Invalidates range in all levels of D-cache/unified cache | |
104 | */ | |
105 | void invalidate_dcache_range(unsigned long start, unsigned long stop) | |
106 | { | |
107 | __asm_flush_dcache_range(start, stop); | |
108 | } | |
109 | ||
110 | /* | |
111 | * Flush range(clean & invalidate) from all levels of D-cache/unified cache | |
112 | */ | |
113 | void flush_dcache_range(unsigned long start, unsigned long stop) | |
114 | { | |
115 | __asm_flush_dcache_range(start, stop); | |
116 | } | |
117 | ||
118 | void dcache_enable(void) | |
119 | { | |
120 | /* The data cache is not active unless the mmu is enabled */ | |
121 | if (!(get_sctlr() & CR_M)) { | |
122 | invalidate_dcache_all(); | |
123 | __asm_invalidate_tlb_all(); | |
124 | mmu_setup(); | |
125 | } | |
126 | ||
127 | set_sctlr(get_sctlr() | CR_C); | |
128 | } | |
129 | ||
130 | void dcache_disable(void) | |
131 | { | |
132 | uint32_t sctlr; | |
133 | ||
134 | sctlr = get_sctlr(); | |
135 | ||
136 | /* if cache isn't enabled no need to disable */ | |
137 | if (!(sctlr & CR_C)) | |
138 | return; | |
139 | ||
140 | set_sctlr(sctlr & ~(CR_C|CR_M)); | |
141 | ||
142 | flush_dcache_all(); | |
143 | __asm_invalidate_tlb_all(); | |
144 | } | |
145 | ||
146 | int dcache_status(void) | |
147 | { | |
148 | return (get_sctlr() & CR_C) != 0; | |
149 | } | |
150 | ||
dad17fd5 SDPP |
151 | u64 *__weak arch_get_page_table(void) { |
152 | puts("No page table offset defined\n"); | |
153 | ||
154 | return NULL; | |
155 | } | |
156 | ||
157 | void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, | |
158 | enum dcache_option option) | |
159 | { | |
160 | u64 *page_table = arch_get_page_table(); | |
161 | u64 upto, end; | |
162 | ||
163 | if (page_table == NULL) | |
164 | return; | |
165 | ||
166 | end = ALIGN(start + size, (1 << MMU_SECTION_SHIFT)) >> | |
167 | MMU_SECTION_SHIFT; | |
168 | start = start >> MMU_SECTION_SHIFT; | |
169 | for (upto = start; upto < end; upto++) { | |
170 | page_table[upto] &= ~PMD_ATTRINDX_MASK; | |
171 | page_table[upto] |= PMD_ATTRINDX(option); | |
172 | } | |
173 | asm volatile("dsb sy"); | |
174 | __asm_invalidate_tlb_all(); | |
175 | asm volatile("dsb sy"); | |
176 | asm volatile("isb"); | |
177 | start = start << MMU_SECTION_SHIFT; | |
178 | end = end << MMU_SECTION_SHIFT; | |
179 | flush_dcache_range(start, end); | |
180 | asm volatile("dsb sy"); | |
181 | } | |
0ae76531 DF |
182 | #else /* CONFIG_SYS_DCACHE_OFF */ |
183 | ||
184 | void invalidate_dcache_all(void) | |
185 | { | |
186 | } | |
187 | ||
188 | void flush_dcache_all(void) | |
189 | { | |
190 | } | |
191 | ||
0ae76531 DF |
192 | void dcache_enable(void) |
193 | { | |
194 | } | |
195 | ||
196 | void dcache_disable(void) | |
197 | { | |
198 | } | |
199 | ||
200 | int dcache_status(void) | |
201 | { | |
202 | return 0; | |
203 | } | |
204 | ||
dad17fd5 SDPP |
205 | void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, |
206 | enum dcache_option option) | |
207 | { | |
208 | } | |
209 | ||
0ae76531 DF |
210 | #endif /* CONFIG_SYS_DCACHE_OFF */ |
211 | ||
212 | #ifndef CONFIG_SYS_ICACHE_OFF | |
213 | ||
214 | void icache_enable(void) | |
215 | { | |
1e6ad55c | 216 | __asm_invalidate_icache_all(); |
0ae76531 DF |
217 | set_sctlr(get_sctlr() | CR_I); |
218 | } | |
219 | ||
220 | void icache_disable(void) | |
221 | { | |
222 | set_sctlr(get_sctlr() & ~CR_I); | |
223 | } | |
224 | ||
225 | int icache_status(void) | |
226 | { | |
227 | return (get_sctlr() & CR_I) != 0; | |
228 | } | |
229 | ||
230 | void invalidate_icache_all(void) | |
231 | { | |
232 | __asm_invalidate_icache_all(); | |
233 | } | |
234 | ||
235 | #else /* CONFIG_SYS_ICACHE_OFF */ | |
236 | ||
237 | void icache_enable(void) | |
238 | { | |
239 | } | |
240 | ||
241 | void icache_disable(void) | |
242 | { | |
243 | } | |
244 | ||
245 | int icache_status(void) | |
246 | { | |
247 | return 0; | |
248 | } | |
249 | ||
250 | void invalidate_icache_all(void) | |
251 | { | |
252 | } | |
253 | ||
254 | #endif /* CONFIG_SYS_ICACHE_OFF */ | |
255 | ||
256 | /* | |
257 | * Enable dCache & iCache, whether cache is actually enabled | |
258 | * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF | |
259 | */ | |
2f78eae5 | 260 | void __weak enable_caches(void) |
0ae76531 DF |
261 | { |
262 | icache_enable(); | |
263 | dcache_enable(); | |
264 | } |