2 * Microblaze MMU emulation for qemu.
4 * Copyright (c) 2009 Edgar E. Iglesias
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
29 static unsigned int tlb_decode_size(unsigned int f)
31 static const unsigned int sizes[] = {
32 1 * 1024, 4 * 1024, 16 * 1024, 64 * 1024, 256 * 1024,
33 1 * 1024 * 1024, 4 * 1024 * 1024, 16 * 1024 * 1024
35 assert(f < ARRAY_SIZE(sizes));
39 static void mmu_flush_idx(CPUState *env, unsigned int idx)
41 struct microblaze_mmu *mmu = &env->mmu;
42 unsigned int tlb_size;
43 uint32_t tlb_tag, end, t;
45 t = mmu->rams[RAM_TAG][idx];
49 tlb_tag = t & TLB_EPN_MASK;
50 tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
51 end = tlb_tag + tlb_size;
53 while (tlb_tag < end) {
54 tlb_flush_page(env, tlb_tag);
55 tlb_tag += TARGET_PAGE_SIZE;
59 static void mmu_change_pid(CPUState *env, unsigned int newpid)
61 struct microblaze_mmu *mmu = &env->mmu;
63 unsigned int tlb_size;
64 uint32_t tlb_tag, mask, t;
67 qemu_log("Illegal rpid=%x\n", newpid);
69 for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
70 /* Lookup and decode. */
71 t = mmu->rams[RAM_TAG][i];
73 tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
74 mask = ~(tlb_size - 1);
76 tlb_tag = t & TLB_EPN_MASK;
77 if (mmu->tids[i] && ((mmu->regs[MMU_R_PID] & 0xff) == mmu->tids[i]))
78 mmu_flush_idx(env, i);
83 /* rw - 0 = read, 1 = write, 2 = fetch. */
84 unsigned int mmu_translate(struct microblaze_mmu *mmu,
85 struct microblaze_mmu_lookup *lu,
86 target_ulong vaddr, int rw, int mmu_idx)
88 unsigned int i, hit = 0;
89 unsigned int tlb_ex = 0, tlb_wr = 0, tlb_zsel;
90 unsigned int tlb_size;
91 uint32_t tlb_tag, tlb_rpn, mask, t0;
94 for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
97 /* Lookup and decode. */
98 t = mmu->rams[RAM_TAG][i];
99 D(qemu_log("TLB %d valid=%d\n", i, t & TLB_VALID));
101 tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
102 if (tlb_size < TARGET_PAGE_SIZE) {
103 qemu_log("%d pages not supported\n", tlb_size);
107 mask = ~(tlb_size - 1);
108 tlb_tag = t & TLB_EPN_MASK;
109 if ((vaddr & mask) != (tlb_tag & mask)) {
110 D(qemu_log("TLB %d vaddr=%x != tag=%x\n",
111 i, vaddr & mask, tlb_tag & mask));
115 && ((mmu->regs[MMU_R_PID] & 0xff) != mmu->tids[i])) {
116 D(qemu_log("TLB %d pid=%x != tid=%x\n",
117 i, mmu->regs[MMU_R_PID], mmu->tids[i]));
121 /* Bring in the data part. */
122 d = mmu->rams[RAM_DATA][i];
126 /* Now lets see if there is a zone that overrides the protbits. */
127 tlb_zsel = (d >> 4) & 0xf;
128 t0 = mmu->regs[MMU_R_ZPR] >> (30 - (tlb_zsel * 2));
131 if (tlb_zsel > mmu->c_mmu_zones) {
132 qemu_log("tlb zone select out of range! %d\n", tlb_zsel);
133 t0 = 1; /* Ignore. */
136 if (mmu->c_mmu == 1) {
137 t0 = 1; /* Zones are disabled. */
142 if (mmu_idx == MMU_USER_IDX)
146 if (mmu_idx != MMU_USER_IDX) {
159 lu->prot = PAGE_READ;
161 lu->prot |= PAGE_WRITE;
165 lu->prot |=PAGE_EXEC;
170 tlb_rpn = d & TLB_RPN_MASK;
182 D(qemu_log("MMU vaddr=%x rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
183 vaddr, rw, tlb_wr, tlb_ex, hit));
187 /* Writes/reads to the MMU's special regs end up here. */
188 uint32_t mmu_read(CPUState *env, uint32_t rn)
193 if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) {
194 qemu_log("MMU access on MMU-less system\n");
199 /* Reads to HI/LO trig reads from the mmu rams. */
202 if (!(env->mmu.c_mmu_tlb_access & 1)) {
203 qemu_log("Invalid access to MMU reg %d\n", rn);
207 i = env->mmu.regs[MMU_R_TLBX] & 0xff;
208 r = env->mmu.rams[rn & 1][i];
209 if (rn == MMU_R_TLBHI)
210 env->mmu.regs[MMU_R_PID] = env->mmu.tids[i];
214 if (!(env->mmu.c_mmu_tlb_access & 1)) {
215 qemu_log("Invalid access to MMU reg %d\n", rn);
218 r = env->mmu.regs[rn];
221 r = env->mmu.regs[rn];
224 D(qemu_log("%s rn=%d=%x\n", __func__, rn, r));
228 void mmu_write(CPUState *env, uint32_t rn, uint32_t v)
231 D(qemu_log("%s rn=%d=%x old=%x\n", __func__, rn, v, env->mmu.regs[rn]));
233 if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) {
234 qemu_log("MMU access on MMU-less system\n");
239 /* Writes to HI/LO trig writes to the mmu rams. */
242 i = env->mmu.regs[MMU_R_TLBX] & 0xff;
243 if (rn == MMU_R_TLBHI) {
244 if (i < 3 && !(v & TLB_VALID) && qemu_loglevel_mask(~0))
245 qemu_log("invalidating index %x at pc=%x\n",
246 i, env->sregs[SR_PC]);
247 env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff;
248 mmu_flush_idx(env, i);
250 env->mmu.rams[rn & 1][i] = v;
252 D(qemu_log("%s ram[%d][%d]=%x\n", __func__, rn & 1, i, v));
255 if (env->mmu.c_mmu_tlb_access <= 1) {
256 qemu_log("Invalid access to MMU reg %d\n", rn);
260 /* Changes to the zone protection reg flush the QEMU TLB.
261 Fortunately, these are very uncommon. */
262 if (v != env->mmu.regs[rn]) {
265 env->mmu.regs[rn] = v;
268 if (env->mmu.c_mmu_tlb_access <= 1) {
269 qemu_log("Invalid access to MMU reg %d\n", rn);
273 if (v != env->mmu.regs[rn]) {
274 mmu_change_pid(env, v);
275 env->mmu.regs[rn] = v;
280 struct microblaze_mmu_lookup lu;
283 if (env->mmu.c_mmu_tlb_access <= 1) {
284 qemu_log("Invalid access to MMU reg %d\n", rn);
288 hit = mmu_translate(&env->mmu, &lu,
289 v & TLB_EPN_MASK, 0, cpu_mmu_index(env));
291 env->mmu.regs[MMU_R_TLBX] = lu.idx;
293 env->mmu.regs[MMU_R_TLBX] |= 0x80000000;
297 env->mmu.regs[rn] = v;
302 void mmu_init(struct microblaze_mmu *mmu)
305 for (i = 0; i < ARRAY_SIZE(mmu->regs); i++) {