]>
Commit | Line | Data |
---|---|---|
afeeceb0 EI |
1 | /* |
2 | * Microblaze MMU emulation for qemu. | |
3 | * | |
4 | * Copyright (c) 2009 Edgar E. Iglesias | |
dadc1064 | 5 | * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd. |
afeeceb0 EI |
6 | * |
7 | * This library is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU Lesser General Public | |
9 | * License as published by the Free Software Foundation; either | |
10 | * version 2 of the License, or (at your option) any later version. | |
11 | * | |
12 | * This library is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * Lesser General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 18 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
afeeceb0 | 19 | */ |
afeeceb0 | 20 | |
8fd9dece | 21 | #include "qemu/osdep.h" |
afeeceb0 | 22 | #include "cpu.h" |
63c91552 | 23 | #include "exec/exec-all.h" |
afeeceb0 EI |
24 | |
25 | #define D(x) | |
26 | ||
27 | static unsigned int tlb_decode_size(unsigned int f) | |
28 | { | |
29 | static const unsigned int sizes[] = { | |
30 | 1 * 1024, 4 * 1024, 16 * 1024, 64 * 1024, 256 * 1024, | |
31 | 1 * 1024 * 1024, 4 * 1024 * 1024, 16 * 1024 * 1024 | |
32 | }; | |
33 | assert(f < ARRAY_SIZE(sizes)); | |
34 | return sizes[f]; | |
35 | } | |
36 | ||
68cee38a | 37 | static void mmu_flush_idx(CPUMBState *env, unsigned int idx) |
afeeceb0 | 38 | { |
31b030d4 | 39 | CPUState *cs = CPU(mb_env_get_cpu(env)); |
afeeceb0 EI |
40 | struct microblaze_mmu *mmu = &env->mmu; |
41 | unsigned int tlb_size; | |
42 | uint32_t tlb_tag, end, t; | |
43 | ||
44 | t = mmu->rams[RAM_TAG][idx]; | |
45 | if (!(t & TLB_VALID)) | |
46 | return; | |
47 | ||
48 | tlb_tag = t & TLB_EPN_MASK; | |
49 | tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7); | |
50 | end = tlb_tag + tlb_size; | |
51 | ||
52 | while (tlb_tag < end) { | |
31b030d4 | 53 | tlb_flush_page(cs, tlb_tag); |
afeeceb0 EI |
54 | tlb_tag += TARGET_PAGE_SIZE; |
55 | } | |
56 | } | |
57 | ||
68cee38a | 58 | static void mmu_change_pid(CPUMBState *env, unsigned int newpid) |
afeeceb0 EI |
59 | { |
60 | struct microblaze_mmu *mmu = &env->mmu; | |
61 | unsigned int i; | |
183aa454 | 62 | uint32_t t; |
afeeceb0 EI |
63 | |
64 | if (newpid & ~0xff) | |
1d512a65 | 65 | qemu_log_mask(LOG_GUEST_ERROR, "Illegal rpid=%x\n", newpid); |
afeeceb0 EI |
66 | |
67 | for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) { | |
68 | /* Lookup and decode. */ | |
69 | t = mmu->rams[RAM_TAG][i]; | |
70 | if (t & TLB_VALID) { | |
afeeceb0 EI |
71 | if (mmu->tids[i] && ((mmu->regs[MMU_R_PID] & 0xff) == mmu->tids[i])) |
72 | mmu_flush_idx(env, i); | |
73 | } | |
74 | } | |
75 | } | |
76 | ||
77 | /* rw - 0 = read, 1 = write, 2 = fetch. */ | |
78 | unsigned int mmu_translate(struct microblaze_mmu *mmu, | |
79 | struct microblaze_mmu_lookup *lu, | |
80 | target_ulong vaddr, int rw, int mmu_idx) | |
81 | { | |
82 | unsigned int i, hit = 0; | |
83 | unsigned int tlb_ex = 0, tlb_wr = 0, tlb_zsel; | |
d2f004c3 EI |
84 | uint64_t tlb_tag, tlb_rpn, mask; |
85 | uint32_t tlb_size, t0; | |
afeeceb0 EI |
86 | |
87 | lu->err = ERR_MISS; | |
88 | for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) { | |
d2f004c3 | 89 | uint64_t t, d; |
afeeceb0 EI |
90 | |
91 | /* Lookup and decode. */ | |
92 | t = mmu->rams[RAM_TAG][i]; | |
d2f004c3 | 93 | D(qemu_log("TLB %d valid=%" PRId64 "\n", i, t & TLB_VALID)); |
afeeceb0 EI |
94 | if (t & TLB_VALID) { |
95 | tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7); | |
96 | if (tlb_size < TARGET_PAGE_SIZE) { | |
97 | qemu_log("%d pages not supported\n", tlb_size); | |
98 | abort(); | |
99 | } | |
100 | ||
d2f004c3 | 101 | mask = ~((uint64_t)tlb_size - 1); |
afeeceb0 EI |
102 | tlb_tag = t & TLB_EPN_MASK; |
103 | if ((vaddr & mask) != (tlb_tag & mask)) { | |
d2f004c3 | 104 | D(qemu_log("TLB %d vaddr=%" PRIx64 " != tag=%" PRIx64 "\n", |
afeeceb0 EI |
105 | i, vaddr & mask, tlb_tag & mask)); |
106 | continue; | |
107 | } | |
108 | if (mmu->tids[i] | |
109 | && ((mmu->regs[MMU_R_PID] & 0xff) != mmu->tids[i])) { | |
110 | D(qemu_log("TLB %d pid=%x != tid=%x\n", | |
111 | i, mmu->regs[MMU_R_PID], mmu->tids[i])); | |
112 | continue; | |
113 | } | |
114 | ||
115 | /* Bring in the data part. */ | |
116 | d = mmu->rams[RAM_DATA][i]; | |
117 | tlb_ex = d & TLB_EX; | |
118 | tlb_wr = d & TLB_WR; | |
119 | ||
e03ba136 | 120 | /* Now let's see if there is a zone that overrides the protbits. */ |
afeeceb0 EI |
121 | tlb_zsel = (d >> 4) & 0xf; |
122 | t0 = mmu->regs[MMU_R_ZPR] >> (30 - (tlb_zsel * 2)); | |
123 | t0 &= 0x3; | |
3c50a71f EI |
124 | |
125 | if (tlb_zsel > mmu->c_mmu_zones) { | |
1d512a65 | 126 | qemu_log_mask(LOG_GUEST_ERROR, "tlb zone select out of range! %d\n", tlb_zsel); |
3c50a71f EI |
127 | t0 = 1; /* Ignore. */ |
128 | } | |
129 | ||
130 | if (mmu->c_mmu == 1) { | |
131 | t0 = 1; /* Zones are disabled. */ | |
132 | } | |
133 | ||
afeeceb0 EI |
134 | switch (t0) { |
135 | case 0: | |
136 | if (mmu_idx == MMU_USER_IDX) | |
137 | continue; | |
138 | break; | |
139 | case 2: | |
140 | if (mmu_idx != MMU_USER_IDX) { | |
141 | tlb_ex = 1; | |
142 | tlb_wr = 1; | |
143 | } | |
144 | break; | |
145 | case 3: | |
146 | tlb_ex = 1; | |
147 | tlb_wr = 1; | |
148 | break; | |
3c50a71f | 149 | default: break; |
afeeceb0 EI |
150 | } |
151 | ||
afeeceb0 EI |
152 | lu->err = ERR_PROT; |
153 | lu->prot = PAGE_READ; | |
154 | if (tlb_wr) | |
155 | lu->prot |= PAGE_WRITE; | |
156 | else if (rw == 1) | |
157 | goto done; | |
158 | if (tlb_ex) | |
159 | lu->prot |=PAGE_EXEC; | |
160 | else if (rw == 2) { | |
161 | goto done; | |
162 | } | |
163 | ||
164 | tlb_rpn = d & TLB_RPN_MASK; | |
165 | ||
166 | lu->vaddr = tlb_tag; | |
167 | lu->paddr = tlb_rpn; | |
168 | lu->size = tlb_size; | |
169 | lu->err = ERR_HIT; | |
170 | lu->idx = i; | |
171 | hit = 1; | |
172 | goto done; | |
173 | } | |
174 | } | |
175 | done: | |
d2f004c3 | 176 | D(qemu_log("MMU vaddr=%" PRIx64 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n", |
afeeceb0 EI |
177 | vaddr, rw, tlb_wr, tlb_ex, hit)); |
178 | return hit; | |
179 | } | |
180 | ||
181 | /* Writes/reads to the MMU's special regs end up here. */ | |
68cee38a | 182 | uint32_t mmu_read(CPUMBState *env, uint32_t rn) |
afeeceb0 EI |
183 | { |
184 | unsigned int i; | |
bd9e6608 | 185 | uint32_t r = 0; |
afeeceb0 | 186 | |
3c50a71f | 187 | if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) { |
1d512a65 | 188 | qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n"); |
3c50a71f EI |
189 | return 0; |
190 | } | |
191 | ||
afeeceb0 EI |
192 | switch (rn) { |
193 | /* Reads to HI/LO trig reads from the mmu rams. */ | |
194 | case MMU_R_TLBLO: | |
195 | case MMU_R_TLBHI: | |
3c50a71f | 196 | if (!(env->mmu.c_mmu_tlb_access & 1)) { |
1d512a65 | 197 | qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn); |
3c50a71f EI |
198 | return 0; |
199 | } | |
200 | ||
afeeceb0 EI |
201 | i = env->mmu.regs[MMU_R_TLBX] & 0xff; |
202 | r = env->mmu.rams[rn & 1][i]; | |
203 | if (rn == MMU_R_TLBHI) | |
204 | env->mmu.regs[MMU_R_PID] = env->mmu.tids[i]; | |
205 | break; | |
3c50a71f EI |
206 | case MMU_R_PID: |
207 | case MMU_R_ZPR: | |
208 | if (!(env->mmu.c_mmu_tlb_access & 1)) { | |
1d512a65 | 209 | qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn); |
3c50a71f EI |
210 | return 0; |
211 | } | |
212 | r = env->mmu.regs[rn]; | |
213 | break; | |
96716533 EI |
214 | case MMU_R_TLBX: |
215 | r = env->mmu.regs[rn]; | |
216 | break; | |
bd9e6608 EI |
217 | case MMU_R_TLBSX: |
218 | qemu_log_mask(LOG_GUEST_ERROR, "TLBSX is write-only.\n"); | |
219 | break; | |
afeeceb0 | 220 | default: |
96716533 | 221 | qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn); |
afeeceb0 EI |
222 | break; |
223 | } | |
224 | D(qemu_log("%s rn=%d=%x\n", __func__, rn, r)); | |
225 | return r; | |
226 | } | |
227 | ||
68cee38a | 228 | void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v) |
afeeceb0 | 229 | { |
00c8cb0a | 230 | MicroBlazeCPU *cpu = mb_env_get_cpu(env); |
afeeceb0 EI |
231 | unsigned int i; |
232 | D(qemu_log("%s rn=%d=%x old=%x\n", __func__, rn, v, env->mmu.regs[rn])); | |
233 | ||
3c50a71f | 234 | if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) { |
1d512a65 | 235 | qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n"); |
3c50a71f EI |
236 | return; |
237 | } | |
238 | ||
afeeceb0 EI |
239 | switch (rn) { |
240 | /* Writes to HI/LO trig writes to the mmu rams. */ | |
241 | case MMU_R_TLBLO: | |
242 | case MMU_R_TLBHI: | |
243 | i = env->mmu.regs[MMU_R_TLBX] & 0xff; | |
244 | if (rn == MMU_R_TLBHI) { | |
245 | if (i < 3 && !(v & TLB_VALID) && qemu_loglevel_mask(~0)) | |
0a22f8cf EI |
246 | qemu_log_mask(LOG_GUEST_ERROR, |
247 | "invalidating index %x at pc=%" PRIx64 "\n", | |
afeeceb0 EI |
248 | i, env->sregs[SR_PC]); |
249 | env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff; | |
250 | mmu_flush_idx(env, i); | |
251 | } | |
252 | env->mmu.rams[rn & 1][i] = v; | |
253 | ||
254 | D(qemu_log("%s ram[%d][%d]=%x\n", __func__, rn & 1, i, v)); | |
255 | break; | |
256 | case MMU_R_ZPR: | |
3c50a71f | 257 | if (env->mmu.c_mmu_tlb_access <= 1) { |
1d512a65 | 258 | qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn); |
3c50a71f EI |
259 | return; |
260 | } | |
261 | ||
d0f3654f EI |
262 | /* Changes to the zone protection reg flush the QEMU TLB. |
263 | Fortunately, these are very uncommon. */ | |
264 | if (v != env->mmu.regs[rn]) { | |
d10eb08f | 265 | tlb_flush(CPU(cpu)); |
d0f3654f EI |
266 | } |
267 | env->mmu.regs[rn] = v; | |
268 | break; | |
afeeceb0 | 269 | case MMU_R_PID: |
3c50a71f | 270 | if (env->mmu.c_mmu_tlb_access <= 1) { |
1d512a65 | 271 | qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn); |
3c50a71f EI |
272 | return; |
273 | } | |
274 | ||
afeeceb0 EI |
275 | if (v != env->mmu.regs[rn]) { |
276 | mmu_change_pid(env, v); | |
277 | env->mmu.regs[rn] = v; | |
278 | } | |
279 | break; | |
fce6a8ec EI |
280 | case MMU_R_TLBX: |
281 | /* Bit 31 is read-only. */ | |
282 | env->mmu.regs[rn] = deposit32(env->mmu.regs[rn], 0, 31, v); | |
283 | break; | |
afeeceb0 EI |
284 | case MMU_R_TLBSX: |
285 | { | |
286 | struct microblaze_mmu_lookup lu; | |
287 | int hit; | |
3c50a71f EI |
288 | |
289 | if (env->mmu.c_mmu_tlb_access <= 1) { | |
1d512a65 | 290 | qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn); |
3c50a71f EI |
291 | return; |
292 | } | |
293 | ||
afeeceb0 | 294 | hit = mmu_translate(&env->mmu, &lu, |
97ed5ccd | 295 | v & TLB_EPN_MASK, 0, cpu_mmu_index(env, false)); |
afeeceb0 EI |
296 | if (hit) { |
297 | env->mmu.regs[MMU_R_TLBX] = lu.idx; | |
a2207b59 EI |
298 | } else { |
299 | env->mmu.regs[MMU_R_TLBX] |= R_TBLX_MISS_MASK; | |
300 | } | |
afeeceb0 EI |
301 | break; |
302 | } | |
303 | default: | |
96716533 | 304 | qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn); |
afeeceb0 EI |
305 | break; |
306 | } | |
307 | } | |
308 | ||
309 | void mmu_init(struct microblaze_mmu *mmu) | |
310 | { | |
3c50a71f EI |
311 | int i; |
312 | for (i = 0; i < ARRAY_SIZE(mmu->regs); i++) { | |
313 | mmu->regs[i] = 0; | |
314 | } | |
afeeceb0 | 315 | } |