]>
Commit | Line | Data |
---|---|---|
afeeceb0 EI |
1 | /* |
2 | * Microblaze MMU emulation for qemu. | |
3 | * | |
4 | * Copyright (c) 2009 Edgar E. Iglesias | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
afeeceb0 EI |
18 | */ |
19 | #include <stdio.h> | |
20 | #include <stdlib.h> | |
21 | #include <assert.h> | |
22 | ||
23 | #include "config.h" | |
24 | #include "cpu.h" | |
25 | #include "exec-all.h" | |
26 | ||
27 | #define D(x) | |
28 | ||
29 | static unsigned int tlb_decode_size(unsigned int f) | |
30 | { | |
31 | static const unsigned int sizes[] = { | |
32 | 1 * 1024, 4 * 1024, 16 * 1024, 64 * 1024, 256 * 1024, | |
33 | 1 * 1024 * 1024, 4 * 1024 * 1024, 16 * 1024 * 1024 | |
34 | }; | |
35 | assert(f < ARRAY_SIZE(sizes)); | |
36 | return sizes[f]; | |
37 | } | |
38 | ||
6b2fce90 | 39 | static void mmu_flush_idx(CPUState *env, unsigned int idx) |
afeeceb0 EI |
40 | { |
41 | struct microblaze_mmu *mmu = &env->mmu; | |
42 | unsigned int tlb_size; | |
43 | uint32_t tlb_tag, end, t; | |
44 | ||
45 | t = mmu->rams[RAM_TAG][idx]; | |
46 | if (!(t & TLB_VALID)) | |
47 | return; | |
48 | ||
49 | tlb_tag = t & TLB_EPN_MASK; | |
50 | tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7); | |
51 | end = tlb_tag + tlb_size; | |
52 | ||
53 | while (tlb_tag < end) { | |
54 | tlb_flush_page(env, tlb_tag); | |
55 | tlb_tag += TARGET_PAGE_SIZE; | |
56 | } | |
57 | } | |
58 | ||
59 | static void mmu_change_pid(CPUState *env, unsigned int newpid) | |
60 | { | |
61 | struct microblaze_mmu *mmu = &env->mmu; | |
62 | unsigned int i; | |
63 | unsigned int tlb_size; | |
64 | uint32_t tlb_tag, mask, t; | |
65 | ||
66 | if (newpid & ~0xff) | |
67 | qemu_log("Illegal rpid=%x\n", newpid); | |
68 | ||
69 | for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) { | |
70 | /* Lookup and decode. */ | |
71 | t = mmu->rams[RAM_TAG][i]; | |
72 | if (t & TLB_VALID) { | |
73 | tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7); | |
74 | mask = ~(tlb_size - 1); | |
75 | ||
76 | tlb_tag = t & TLB_EPN_MASK; | |
77 | if (mmu->tids[i] && ((mmu->regs[MMU_R_PID] & 0xff) == mmu->tids[i])) | |
78 | mmu_flush_idx(env, i); | |
79 | } | |
80 | } | |
81 | } | |
82 | ||
83 | /* rw - 0 = read, 1 = write, 2 = fetch. */ | |
84 | unsigned int mmu_translate(struct microblaze_mmu *mmu, | |
85 | struct microblaze_mmu_lookup *lu, | |
86 | target_ulong vaddr, int rw, int mmu_idx) | |
87 | { | |
88 | unsigned int i, hit = 0; | |
89 | unsigned int tlb_ex = 0, tlb_wr = 0, tlb_zsel; | |
90 | unsigned int tlb_size; | |
91 | uint32_t tlb_tag, tlb_rpn, mask, t0; | |
92 | ||
93 | lu->err = ERR_MISS; | |
94 | for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) { | |
95 | uint32_t t, d; | |
96 | ||
97 | /* Lookup and decode. */ | |
98 | t = mmu->rams[RAM_TAG][i]; | |
99 | D(qemu_log("TLB %d valid=%d\n", i, t & TLB_VALID)); | |
100 | if (t & TLB_VALID) { | |
101 | tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7); | |
102 | if (tlb_size < TARGET_PAGE_SIZE) { | |
103 | qemu_log("%d pages not supported\n", tlb_size); | |
104 | abort(); | |
105 | } | |
106 | ||
107 | mask = ~(tlb_size - 1); | |
108 | tlb_tag = t & TLB_EPN_MASK; | |
109 | if ((vaddr & mask) != (tlb_tag & mask)) { | |
110 | D(qemu_log("TLB %d vaddr=%x != tag=%x\n", | |
111 | i, vaddr & mask, tlb_tag & mask)); | |
112 | continue; | |
113 | } | |
114 | if (mmu->tids[i] | |
115 | && ((mmu->regs[MMU_R_PID] & 0xff) != mmu->tids[i])) { | |
116 | D(qemu_log("TLB %d pid=%x != tid=%x\n", | |
117 | i, mmu->regs[MMU_R_PID], mmu->tids[i])); | |
118 | continue; | |
119 | } | |
120 | ||
121 | /* Bring in the data part. */ | |
122 | d = mmu->rams[RAM_DATA][i]; | |
123 | tlb_ex = d & TLB_EX; | |
124 | tlb_wr = d & TLB_WR; | |
125 | ||
126 | /* Now lets see if there is a zone that overrides the protbits. */ | |
127 | tlb_zsel = (d >> 4) & 0xf; | |
128 | t0 = mmu->regs[MMU_R_ZPR] >> (30 - (tlb_zsel * 2)); | |
129 | t0 &= 0x3; | |
3c50a71f EI |
130 | |
131 | if (tlb_zsel > mmu->c_mmu_zones) { | |
132 | qemu_log("tlb zone select out of range! %d\n", tlb_zsel); | |
133 | t0 = 1; /* Ignore. */ | |
134 | } | |
135 | ||
136 | if (mmu->c_mmu == 1) { | |
137 | t0 = 1; /* Zones are disabled. */ | |
138 | } | |
139 | ||
afeeceb0 EI |
140 | switch (t0) { |
141 | case 0: | |
142 | if (mmu_idx == MMU_USER_IDX) | |
143 | continue; | |
144 | break; | |
145 | case 2: | |
146 | if (mmu_idx != MMU_USER_IDX) { | |
147 | tlb_ex = 1; | |
148 | tlb_wr = 1; | |
149 | } | |
150 | break; | |
151 | case 3: | |
152 | tlb_ex = 1; | |
153 | tlb_wr = 1; | |
154 | break; | |
3c50a71f | 155 | default: break; |
afeeceb0 EI |
156 | } |
157 | ||
afeeceb0 EI |
158 | lu->err = ERR_PROT; |
159 | lu->prot = PAGE_READ; | |
160 | if (tlb_wr) | |
161 | lu->prot |= PAGE_WRITE; | |
162 | else if (rw == 1) | |
163 | goto done; | |
164 | if (tlb_ex) | |
165 | lu->prot |=PAGE_EXEC; | |
166 | else if (rw == 2) { | |
167 | goto done; | |
168 | } | |
169 | ||
170 | tlb_rpn = d & TLB_RPN_MASK; | |
171 | ||
172 | lu->vaddr = tlb_tag; | |
173 | lu->paddr = tlb_rpn; | |
174 | lu->size = tlb_size; | |
175 | lu->err = ERR_HIT; | |
176 | lu->idx = i; | |
177 | hit = 1; | |
178 | goto done; | |
179 | } | |
180 | } | |
181 | done: | |
182 | D(qemu_log("MMU vaddr=%x rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n", | |
183 | vaddr, rw, tlb_wr, tlb_ex, hit)); | |
184 | return hit; | |
185 | } | |
186 | ||
187 | /* Writes/reads to the MMU's special regs end up here. */ | |
188 | uint32_t mmu_read(CPUState *env, uint32_t rn) | |
189 | { | |
190 | unsigned int i; | |
191 | uint32_t r; | |
192 | ||
3c50a71f EI |
193 | if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) { |
194 | qemu_log("MMU access on MMU-less system\n"); | |
195 | return 0; | |
196 | } | |
197 | ||
afeeceb0 EI |
198 | switch (rn) { |
199 | /* Reads to HI/LO trig reads from the mmu rams. */ | |
200 | case MMU_R_TLBLO: | |
201 | case MMU_R_TLBHI: | |
3c50a71f EI |
202 | if (!(env->mmu.c_mmu_tlb_access & 1)) { |
203 | qemu_log("Invalid access to MMU reg %d\n", rn); | |
204 | return 0; | |
205 | } | |
206 | ||
afeeceb0 EI |
207 | i = env->mmu.regs[MMU_R_TLBX] & 0xff; |
208 | r = env->mmu.rams[rn & 1][i]; | |
209 | if (rn == MMU_R_TLBHI) | |
210 | env->mmu.regs[MMU_R_PID] = env->mmu.tids[i]; | |
211 | break; | |
3c50a71f EI |
212 | case MMU_R_PID: |
213 | case MMU_R_ZPR: | |
214 | if (!(env->mmu.c_mmu_tlb_access & 1)) { | |
215 | qemu_log("Invalid access to MMU reg %d\n", rn); | |
216 | return 0; | |
217 | } | |
218 | r = env->mmu.regs[rn]; | |
219 | break; | |
afeeceb0 EI |
220 | default: |
221 | r = env->mmu.regs[rn]; | |
222 | break; | |
223 | } | |
224 | D(qemu_log("%s rn=%d=%x\n", __func__, rn, r)); | |
225 | return r; | |
226 | } | |
227 | ||
228 | void mmu_write(CPUState *env, uint32_t rn, uint32_t v) | |
229 | { | |
230 | unsigned int i; | |
231 | D(qemu_log("%s rn=%d=%x old=%x\n", __func__, rn, v, env->mmu.regs[rn])); | |
232 | ||
3c50a71f EI |
233 | if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) { |
234 | qemu_log("MMU access on MMU-less system\n"); | |
235 | return; | |
236 | } | |
237 | ||
afeeceb0 EI |
238 | switch (rn) { |
239 | /* Writes to HI/LO trig writes to the mmu rams. */ | |
240 | case MMU_R_TLBLO: | |
241 | case MMU_R_TLBHI: | |
242 | i = env->mmu.regs[MMU_R_TLBX] & 0xff; | |
243 | if (rn == MMU_R_TLBHI) { | |
244 | if (i < 3 && !(v & TLB_VALID) && qemu_loglevel_mask(~0)) | |
245 | qemu_log("invalidating index %x at pc=%x\n", | |
246 | i, env->sregs[SR_PC]); | |
247 | env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff; | |
248 | mmu_flush_idx(env, i); | |
249 | } | |
250 | env->mmu.rams[rn & 1][i] = v; | |
251 | ||
252 | D(qemu_log("%s ram[%d][%d]=%x\n", __func__, rn & 1, i, v)); | |
253 | break; | |
254 | case MMU_R_ZPR: | |
3c50a71f EI |
255 | if (env->mmu.c_mmu_tlb_access <= 1) { |
256 | qemu_log("Invalid access to MMU reg %d\n", rn); | |
257 | return; | |
258 | } | |
259 | ||
d0f3654f EI |
260 | /* Changes to the zone protection reg flush the QEMU TLB. |
261 | Fortunately, these are very uncommon. */ | |
262 | if (v != env->mmu.regs[rn]) { | |
263 | tlb_flush(env, 1); | |
264 | } | |
265 | env->mmu.regs[rn] = v; | |
266 | break; | |
afeeceb0 | 267 | case MMU_R_PID: |
3c50a71f EI |
268 | if (env->mmu.c_mmu_tlb_access <= 1) { |
269 | qemu_log("Invalid access to MMU reg %d\n", rn); | |
270 | return; | |
271 | } | |
272 | ||
afeeceb0 EI |
273 | if (v != env->mmu.regs[rn]) { |
274 | mmu_change_pid(env, v); | |
275 | env->mmu.regs[rn] = v; | |
276 | } | |
277 | break; | |
278 | case MMU_R_TLBSX: | |
279 | { | |
280 | struct microblaze_mmu_lookup lu; | |
281 | int hit; | |
3c50a71f EI |
282 | |
283 | if (env->mmu.c_mmu_tlb_access <= 1) { | |
284 | qemu_log("Invalid access to MMU reg %d\n", rn); | |
285 | return; | |
286 | } | |
287 | ||
afeeceb0 EI |
288 | hit = mmu_translate(&env->mmu, &lu, |
289 | v & TLB_EPN_MASK, 0, cpu_mmu_index(env)); | |
290 | if (hit) { | |
291 | env->mmu.regs[MMU_R_TLBX] = lu.idx; | |
292 | } else | |
293 | env->mmu.regs[MMU_R_TLBX] |= 0x80000000; | |
294 | break; | |
295 | } | |
296 | default: | |
297 | env->mmu.regs[rn] = v; | |
298 | break; | |
299 | } | |
300 | } | |
301 | ||
302 | void mmu_init(struct microblaze_mmu *mmu) | |
303 | { | |
3c50a71f EI |
304 | int i; |
305 | for (i = 0; i < ARRAY_SIZE(mmu->regs); i++) { | |
306 | mmu->regs[i] = 0; | |
307 | } | |
afeeceb0 | 308 | } |