2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/units.h"
22 #include "exec/helper-proto.h"
23 #include "sysemu/kvm.h"
25 #include "mmu-hash64.h"
26 #include "mmu-hash32.h"
27 #include "exec/exec-all.h"
28 #include "exec/cpu_ldst.h"
30 #include "helper_regs.h"
31 #include "qemu/error-report.h"
32 #include "qemu/qemu-print.h"
33 #include "mmu-book3s-v3.h"
34 #include "mmu-radix64.h"
36 /* #define DEBUG_MMU */
37 /* #define DEBUG_BATS */
38 /* #define DEBUG_SOFTWARE_TLB */
39 /* #define DUMP_PAGE_TABLES */
40 /* #define FLUSH_ALL_TLBS */
43 # define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0)
45 # define LOG_MMU_STATE(cpu) do { } while (0)
48 #ifdef DEBUG_SOFTWARE_TLB
49 # define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
51 # define LOG_SWTLB(...) do { } while (0)
55 # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
57 # define LOG_BATS(...) do { } while (0)
60 /*****************************************************************************/
61 /* PowerPC MMU emulation */
63 /* Context used internally during MMU translations */
64 typedef struct mmu_ctx_t mmu_ctx_t;
66 hwaddr raddr; /* Real address */
67 hwaddr eaddr; /* Effective address */
68 int prot; /* Protection bits */
69 hwaddr hash[2]; /* Pagetable hash values */
70 target_ulong ptem; /* Virtual segment ID | API */
71 int key; /* Access key */
72 int nx; /* Non-execute area */
75 /* Common routines used by software and hardware TLBs emulation */
76 static inline int pte_is_valid(target_ulong pte0)
78 return pte0 & 0x80000000 ? 1 : 0;
81 static inline void pte_invalidate(target_ulong *pte0)
86 #define PTE_PTEM_MASK 0x7FFFFFBF
87 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
89 static int pp_check(int key, int pp, int nx)
93 /* Compute access rights */
100 access |= PAGE_WRITE;
116 access = PAGE_READ | PAGE_WRITE;
127 static int check_prot(int prot, int rw, int access_type)
131 if (access_type == ACCESS_CODE) {
132 if (prot & PAGE_EXEC) {
138 if (prot & PAGE_WRITE) {
144 if (prot & PAGE_READ) {
154 static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
155 target_ulong pte1, int h,
158 target_ulong ptem, mmask;
159 int access, ret, pteh, ptev, pp;
162 /* Check validity and table match */
163 ptev = pte_is_valid(pte0);
164 pteh = (pte0 >> 6) & 1;
165 if (ptev && h == pteh) {
166 /* Check vsid & api */
167 ptem = pte0 & PTE_PTEM_MASK;
168 mmask = PTE_CHECK_MASK;
169 pp = pte1 & 0x00000003;
170 if (ptem == ctx->ptem) {
171 if (ctx->raddr != (hwaddr)-1ULL) {
172 /* all matches should have equal RPN, WIMG & PP */
173 if ((ctx->raddr & mmask) != (pte1 & mmask)) {
174 qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n");
178 /* Compute access rights */
179 access = pp_check(ctx->key, pp, ctx->nx);
180 /* Keep the matching PTE informations */
183 ret = check_prot(ctx->prot, rw, type);
186 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
188 /* Access right violation */
189 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
197 static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p,
202 /* Update page flags */
203 if (!(*pte1p & 0x00000100)) {
204 /* Update accessed flag */
205 *pte1p |= 0x00000100;
208 if (!(*pte1p & 0x00000080)) {
209 if (rw == 1 && ret == 0) {
210 /* Update changed flag */
211 *pte1p |= 0x00000080;
214 /* Force page fault for first write access */
215 ctx->prot &= ~PAGE_WRITE;
222 /* Software driven TLB helpers */
223 static inline int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr,
224 int way, int is_code)
228 /* Select TLB num in a way from address */
229 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1);
231 nr += env->tlb_per_way * way;
232 /* 6xx have separate TLBs for instructions and data */
233 if (is_code && env->id_tlbs == 1) {
240 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env)
245 /* LOG_SWTLB("Invalidate all TLBs\n"); */
246 /* Invalidate all defined software TLB */
248 if (env->id_tlbs == 1) {
251 for (nr = 0; nr < max; nr++) {
252 tlb = &env->tlb.tlb6[nr];
253 pte_invalidate(&tlb->pte0);
255 tlb_flush(env_cpu(env));
258 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
260 int is_code, int match_epn)
262 #if !defined(FLUSH_ALL_TLBS)
263 CPUState *cs = env_cpu(env);
267 /* Invalidate ITLB + DTLB, all ways */
268 for (way = 0; way < env->nb_ways; way++) {
269 nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code);
270 tlb = &env->tlb.tlb6[nr];
271 if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) {
272 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr,
274 pte_invalidate(&tlb->pte0);
275 tlb_flush_page(cs, tlb->EPN);
279 /* XXX: PowerPC specification say this is valid as well */
280 ppc6xx_tlb_invalidate_all(env);
284 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env,
285 target_ulong eaddr, int is_code)
287 ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0);
290 static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way,
291 int is_code, target_ulong pte0, target_ulong pte1)
296 nr = ppc6xx_tlb_getnum(env, EPN, way, is_code);
297 tlb = &env->tlb.tlb6[nr];
298 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
299 " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1);
300 /* Invalidate any pending reference in QEMU for this virtual address */
301 ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1);
305 /* Store last way for LRU mechanism */
309 static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
310 target_ulong eaddr, int rw, int access_type)
317 ret = -1; /* No TLB found */
318 for (way = 0; way < env->nb_ways; way++) {
319 nr = ppc6xx_tlb_getnum(env, eaddr, way,
320 access_type == ACCESS_CODE ? 1 : 0);
321 tlb = &env->tlb.tlb6[nr];
322 /* This test "emulates" the PTE index match for hardware TLBs */
323 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) {
324 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx
325 "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb,
326 pte_is_valid(tlb->pte0) ? "valid" : "inval",
327 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr);
330 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " "
331 TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb,
332 pte_is_valid(tlb->pte0) ? "valid" : "inval",
333 tlb->EPN, eaddr, tlb->pte1,
334 rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D');
335 switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1,
336 0, rw, access_type)) {
338 /* TLB inconsistency */
341 /* Access violation */
352 * XXX: we should go on looping to check all TLBs
353 * consistency but we can speed-up the whole thing as
354 * the result would be undefined if TLBs are not
364 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n",
365 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
366 /* Update page flags */
367 pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, rw);
373 /* Perform BAT hit & translation */
374 static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp,
375 int *validp, int *protp, target_ulong *BATu,
381 bl = (*BATu & 0x00001FFC) << 15;
384 if (((msr_pr == 0) && (*BATu & 0x00000002)) ||
385 ((msr_pr != 0) && (*BATu & 0x00000001))) {
387 pp = *BATl & 0x00000003;
389 prot = PAGE_READ | PAGE_EXEC;
400 static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
401 target_ulong virtual, int rw, int type)
403 target_ulong *BATlt, *BATut, *BATu, *BATl;
404 target_ulong BEPIl, BEPIu, bl;
408 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
409 type == ACCESS_CODE ? 'I' : 'D', virtual);
412 BATlt = env->IBAT[1];
413 BATut = env->IBAT[0];
416 BATlt = env->DBAT[1];
417 BATut = env->DBAT[0];
420 for (i = 0; i < env->nb_BATs; i++) {
423 BEPIu = *BATu & 0xF0000000;
424 BEPIl = *BATu & 0x0FFE0000;
425 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl);
426 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
427 " BATl " TARGET_FMT_lx "\n", __func__,
428 type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl);
429 if ((virtual & 0xF0000000) == BEPIu &&
430 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) {
433 /* Get physical address */
434 ctx->raddr = (*BATl & 0xF0000000) |
435 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) |
436 (virtual & 0x0001F000);
437 /* Compute access rights */
439 ret = check_prot(ctx->prot, rw, type);
441 LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n",
442 i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-',
443 ctx->prot & PAGE_WRITE ? 'W' : '-');
450 #if defined(DEBUG_BATS)
451 if (qemu_log_enabled()) {
452 LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual);
453 for (i = 0; i < 4; i++) {
456 BEPIu = *BATu & 0xF0000000;
457 BEPIl = *BATu & 0x0FFE0000;
458 bl = (*BATu & 0x00001FFC) << 15;
459 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
460 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
461 TARGET_FMT_lx " " TARGET_FMT_lx "\n",
462 __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual,
463 *BATu, *BATl, BEPIu, BEPIl, bl);
472 /* Perform segment based translation */
473 static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
474 target_ulong eaddr, int rw, int type)
476 PowerPCCPU *cpu = env_archcpu(env);
479 int ds, pr, target_page_bits;
481 target_ulong sr, pgidx;
486 sr = env->sr[eaddr >> 28];
487 ctx->key = (((sr & 0x20000000) && (pr != 0)) ||
488 ((sr & 0x40000000) && (pr == 0))) ? 1 : 0;
489 ds = sr & 0x80000000 ? 1 : 0;
490 ctx->nx = sr & 0x10000000 ? 1 : 0;
491 vsid = sr & 0x00FFFFFF;
492 target_page_bits = TARGET_PAGE_BITS;
493 qemu_log_mask(CPU_LOG_MMU,
494 "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx
495 " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx
496 " ir=%d dr=%d pr=%d %d t=%d\n",
497 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir,
498 (int)msr_dr, pr != 0 ? 1 : 0, rw, type);
499 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits;
501 ctx->ptem = (vsid << 7) | (pgidx >> 10);
503 qemu_log_mask(CPU_LOG_MMU,
504 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n",
505 ctx->key, ds, ctx->nx, vsid);
508 /* Check if instruction fetch is allowed, if needed */
509 if (type != ACCESS_CODE || ctx->nx == 0) {
510 /* Page address translation */
511 qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx
512 " htab_mask " TARGET_FMT_plx
513 " hash " TARGET_FMT_plx "\n",
514 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash);
516 ctx->hash[1] = ~hash;
518 /* Initialize real address with an invalid value */
519 ctx->raddr = (hwaddr)-1ULL;
520 /* Software TLB search */
521 ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type);
522 #if defined(DUMP_PAGE_TABLES)
523 if (qemu_loglevel_mask(CPU_LOG_MMU)) {
524 CPUState *cs = env_cpu(env);
526 uint32_t a0, a1, a2, a3;
528 qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx
529 "\n", ppc_hash32_hpt_base(cpu),
530 ppc_hash32_hpt_mask(env) + 0x80);
531 for (curaddr = ppc_hash32_hpt_base(cpu);
532 curaddr < (ppc_hash32_hpt_base(cpu)
533 + ppc_hash32_hpt_mask(cpu) + 0x80);
535 a0 = ldl_phys(cs->as, curaddr);
536 a1 = ldl_phys(cs->as, curaddr + 4);
537 a2 = ldl_phys(cs->as, curaddr + 8);
538 a3 = ldl_phys(cs->as, curaddr + 12);
539 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) {
540 qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n",
541 curaddr, a0, a1, a2, a3);
547 qemu_log_mask(CPU_LOG_MMU, "No access allowed\n");
553 qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
554 /* Direct-store segment : absolutely *BUGGY* for now */
557 * Direct-store implies a 32-bit MMU.
558 * Check the Segment Register's bus unit ID (BUID).
560 sr = env->sr[eaddr >> 28];
561 if ((sr & 0x1FF00000) >> 20 == 0x07f) {
563 * Memory-forced I/O controller interface access
565 * If T=1 and BUID=x'07F', the 601 performs a memory
566 * access to SR[28-31] LA[4-31], bypassing all protection
569 ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
570 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
576 /* Integer load/store : only access allowed */
579 /* No code fetch is allowed in direct-store areas */
582 /* Floating point load/store */
585 /* lwarx, ldarx or srwcx. */
589 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
591 * Should make the instruction do no-op. As it already do
592 * no-op, it's quite easy :-)
600 qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need "
601 "address translation\n");
604 if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) {
615 /* Generic TLB check function for embedded PowerPC implementations */
616 static int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb,
618 target_ulong address, uint32_t pid, int ext,
623 /* Check valid flag */
624 if (!(tlb->prot & PAGE_VALID)) {
627 mask = ~(tlb->size - 1);
628 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx
629 " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN,
630 mask, (uint32_t)tlb->PID, tlb->prot);
632 if (tlb->PID != 0 && tlb->PID != pid) {
635 /* Check effective address */
636 if ((address & mask) != tlb->EPN) {
639 *raddrp = (tlb->RPN & mask) | (address & ~mask);
641 /* Extend the physical address to 36 bits */
642 *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32;
648 /* Generic TLB search function for PowerPC embedded implementations */
649 static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address,
656 /* Default return value is no match */
658 for (i = 0; i < env->nb_tlb; i++) {
659 tlb = &env->tlb.tlbe[i];
660 if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) {
669 /* Helpers specific to PowerPC 40x implementations */
670 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
675 for (i = 0; i < env->nb_tlb; i++) {
676 tlb = &env->tlb.tlbe[i];
677 tlb->prot &= ~PAGE_VALID;
679 tlb_flush(env_cpu(env));
682 static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
683 target_ulong address, int rw,
688 int i, ret, zsel, zpr, pr;
691 raddr = (hwaddr)-1ULL;
693 for (i = 0; i < env->nb_tlb; i++) {
694 tlb = &env->tlb.tlbe[i];
695 if (ppcemb_tlb_check(env, tlb, &raddr, address,
696 env->spr[SPR_40x_PID], 0, i) < 0) {
699 zsel = (tlb->attr >> 4) & 0xF;
700 zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3;
701 LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n",
702 __func__, i, zsel, zpr, rw, tlb->attr);
703 /* Check execute enable bit */
711 /* All accesses granted */
712 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
717 /* Raise Zone protection fault. */
718 env->spr[SPR_40x_ESR] = 1 << 22;
726 /* Check from TLB entry */
727 ctx->prot = tlb->prot;
728 ret = check_prot(ctx->prot, rw, access_type);
730 env->spr[SPR_40x_ESR] = 0;
736 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
737 " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
742 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
743 " %d %d\n", __func__, address, raddr, ctx->prot, ret);
748 void store_40x_sler(CPUPPCState *env, uint32_t val)
750 /* XXX: TO BE FIXED */
751 if (val != 0x00000000) {
752 cpu_abort(env_cpu(env),
753 "Little-endian regions are not supported by now\n");
755 env->spr[SPR_405_SLER] = val;
758 static inline int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb,
759 hwaddr *raddr, int *prot,
760 target_ulong address, int rw,
761 int access_type, int i)
765 if (ppcemb_tlb_check(env, tlb, raddr, address,
766 env->spr[SPR_BOOKE_PID],
767 !env->nb_pids, i) >= 0) {
771 if (env->spr[SPR_BOOKE_PID1] &&
772 ppcemb_tlb_check(env, tlb, raddr, address,
773 env->spr[SPR_BOOKE_PID1], 0, i) >= 0) {
777 if (env->spr[SPR_BOOKE_PID2] &&
778 ppcemb_tlb_check(env, tlb, raddr, address,
779 env->spr[SPR_BOOKE_PID2], 0, i) >= 0) {
783 LOG_SWTLB("%s: TLB entry not found\n", __func__);
789 prot2 = tlb->prot & 0xF;
791 prot2 = (tlb->prot >> 4) & 0xF;
794 /* Check the address space */
795 if (access_type == ACCESS_CODE) {
796 if (msr_ir != (tlb->attr & 1)) {
797 LOG_SWTLB("%s: AS doesn't match\n", __func__);
802 if (prot2 & PAGE_EXEC) {
803 LOG_SWTLB("%s: good TLB!\n", __func__);
807 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2);
810 if (msr_dr != (tlb->attr & 1)) {
811 LOG_SWTLB("%s: AS doesn't match\n", __func__);
816 if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) {
817 LOG_SWTLB("%s: found TLB!\n", __func__);
821 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2);
828 static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
829 target_ulong address, int rw,
837 raddr = (hwaddr)-1ULL;
838 for (i = 0; i < env->nb_tlb; i++) {
839 tlb = &env->tlb.tlbe[i];
840 ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, rw,
849 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
850 " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
853 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
854 " %d %d\n", __func__, address, raddr, ctx->prot, ret);
860 static void booke206_flush_tlb(CPUPPCState *env, int flags,
861 const int check_iprot)
865 ppcmas_tlb_t *tlb = env->tlb.tlbm;
867 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
868 if (flags & (1 << i)) {
869 tlb_size = booke206_tlb_size(env, i);
870 for (j = 0; j < tlb_size; j++) {
871 if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) {
872 tlb[j].mas1 &= ~MAS1_VALID;
876 tlb += booke206_tlb_size(env, i);
879 tlb_flush(env_cpu(env));
882 static hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
887 tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
889 return 1024ULL << tlbm_size;
892 /* TLB check function for MAS based SoftTLBs */
893 static int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb,
894 hwaddr *raddrp, target_ulong address,
901 /* In 32bit mode we can only address 32bit EAs */
902 address = (uint32_t)address;
905 /* Check valid flag */
906 if (!(tlb->mas1 & MAS1_VALID)) {
910 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
911 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%"
912 PRIx64 " mask=0x%" HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%"
913 PRIx32 "\n", __func__, address, pid, tlb->mas1, tlb->mas2, mask,
914 tlb->mas7_3, tlb->mas8);
917 tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT;
918 if (tlb_pid != 0 && tlb_pid != pid) {
922 /* Check effective address */
923 if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) {
928 *raddrp = (tlb->mas7_3 & mask) | (address & ~mask);
934 static bool is_epid_mmu(int mmu_idx)
936 return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD;
939 static uint32_t mmubooke206_esr(int mmu_idx, bool rw)
945 if (is_epid_mmu(mmu_idx)) {
952 * Get EPID register given the mmu_idx. If this is regular load,
953 * construct the EPID access bits from current processor state
955 * Get the effective AS and PR bits and the PID. The PID is returned
956 * only if EPID load is requested, otherwise the caller must detect
957 * the correct EPID. Return true if valid EPID is returned.
959 static bool mmubooke206_get_as(CPUPPCState *env,
960 int mmu_idx, uint32_t *epid_out,
961 bool *as_out, bool *pr_out)
963 if (is_epid_mmu(mmu_idx)) {
965 if (mmu_idx == PPC_TLB_EPID_STORE) {
966 epidr = env->spr[SPR_BOOKE_EPSC];
968 epidr = env->spr[SPR_BOOKE_EPLC];
970 *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT;
971 *as_out = !!(epidr & EPID_EAS);
972 *pr_out = !!(epidr & EPID_EPR);
981 /* Check if the tlb found by hashing really matches */
982 static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb,
983 hwaddr *raddr, int *prot,
984 target_ulong address, int rw,
985 int access_type, int mmu_idx)
991 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr);
994 if (ppcmas_tlb_check(env, tlb, raddr, address,
995 env->spr[SPR_BOOKE_PID]) >= 0) {
999 if (env->spr[SPR_BOOKE_PID1] &&
1000 ppcmas_tlb_check(env, tlb, raddr, address,
1001 env->spr[SPR_BOOKE_PID1]) >= 0) {
1005 if (env->spr[SPR_BOOKE_PID2] &&
1006 ppcmas_tlb_check(env, tlb, raddr, address,
1007 env->spr[SPR_BOOKE_PID2]) >= 0) {
1011 if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) {
1016 LOG_SWTLB("%s: TLB entry not found\n", __func__);
1022 if (tlb->mas7_3 & MAS3_UR) {
1025 if (tlb->mas7_3 & MAS3_UW) {
1026 prot2 |= PAGE_WRITE;
1028 if (tlb->mas7_3 & MAS3_UX) {
1032 if (tlb->mas7_3 & MAS3_SR) {
1035 if (tlb->mas7_3 & MAS3_SW) {
1036 prot2 |= PAGE_WRITE;
1038 if (tlb->mas7_3 & MAS3_SX) {
1043 /* Check the address space and permissions */
1044 if (access_type == ACCESS_CODE) {
1045 /* There is no way to fetch code using epid load */
1047 if (msr_ir != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
1048 LOG_SWTLB("%s: AS doesn't match\n", __func__);
1053 if (prot2 & PAGE_EXEC) {
1054 LOG_SWTLB("%s: good TLB!\n", __func__);
1058 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2);
1061 if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
1062 LOG_SWTLB("%s: AS doesn't match\n", __func__);
1067 if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) {
1068 LOG_SWTLB("%s: found TLB!\n", __func__);
1072 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2);
1079 static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
1080 target_ulong address, int rw,
1081 int access_type, int mmu_idx)
1088 raddr = (hwaddr)-1ULL;
1090 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1091 int ways = booke206_tlb_ways(env, i);
1093 for (j = 0; j < ways; j++) {
1094 tlb = booke206_get_tlbm(env, i, address, j);
1098 ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address,
1099 rw, access_type, mmu_idx);
1110 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
1111 " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
1114 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
1115 " %d %d\n", __func__, address, raddr, ctx->prot, ret);
1121 static const char *book3e_tsize_to_str[32] = {
1122 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
1123 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
1124 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
1128 static void mmubooke_dump_mmu(CPUPPCState *env)
1130 ppcemb_tlb_t *entry;
1133 if (kvm_enabled() && !env->kvm_sw_tlb) {
1134 qemu_printf("Cannot access KVM TLB\n");
1138 qemu_printf("\nTLB:\n");
1139 qemu_printf("Effective Physical Size PID Prot "
1142 entry = &env->tlb.tlbe[0];
1143 for (i = 0; i < env->nb_tlb; i++, entry++) {
1146 uint64_t size = (uint64_t)entry->size;
1149 /* Check valid flag */
1150 if (!(entry->prot & PAGE_VALID)) {
1154 mask = ~(entry->size - 1);
1155 ea = entry->EPN & mask;
1156 pa = entry->RPN & mask;
1157 /* Extend the physical address to 36 bits */
1158 pa |= (hwaddr)(entry->RPN & 0xF) << 32;
1159 if (size >= 1 * MiB) {
1160 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB);
1162 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB);
1164 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n",
1165 (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID,
1166 entry->prot, entry->attr);
1171 static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset,
1174 ppcmas_tlb_t *entry;
1177 qemu_printf("\nTLB%d:\n", tlbn);
1178 qemu_printf("Effective Physical Size TID TS SRWX"
1179 " URWX WIMGE U0123\n");
1181 entry = &env->tlb.tlbm[offset];
1182 for (i = 0; i < tlbsize; i++, entry++) {
1183 hwaddr ea, pa, size;
1186 if (!(entry->mas1 & MAS1_VALID)) {
1190 tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
1191 size = 1024ULL << tsize;
1192 ea = entry->mas2 & ~(size - 1);
1193 pa = entry->mas7_3 & ~(size - 1);
1195 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c"
1196 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
1197 (uint64_t)ea, (uint64_t)pa,
1198 book3e_tsize_to_str[tsize],
1199 (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT,
1200 (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT,
1201 entry->mas7_3 & MAS3_SR ? 'R' : '-',
1202 entry->mas7_3 & MAS3_SW ? 'W' : '-',
1203 entry->mas7_3 & MAS3_SX ? 'X' : '-',
1204 entry->mas7_3 & MAS3_UR ? 'R' : '-',
1205 entry->mas7_3 & MAS3_UW ? 'W' : '-',
1206 entry->mas7_3 & MAS3_UX ? 'X' : '-',
1207 entry->mas2 & MAS2_W ? 'W' : '-',
1208 entry->mas2 & MAS2_I ? 'I' : '-',
1209 entry->mas2 & MAS2_M ? 'M' : '-',
1210 entry->mas2 & MAS2_G ? 'G' : '-',
1211 entry->mas2 & MAS2_E ? 'E' : '-',
1212 entry->mas7_3 & MAS3_U0 ? '0' : '-',
1213 entry->mas7_3 & MAS3_U1 ? '1' : '-',
1214 entry->mas7_3 & MAS3_U2 ? '2' : '-',
1215 entry->mas7_3 & MAS3_U3 ? '3' : '-');
1219 static void mmubooke206_dump_mmu(CPUPPCState *env)
1224 if (kvm_enabled() && !env->kvm_sw_tlb) {
1225 qemu_printf("Cannot access KVM TLB\n");
1229 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1230 int size = booke206_tlb_size(env, i);
1236 mmubooke206_dump_one_tlb(env, i, offset, size);
1241 static void mmu6xx_dump_BATs(CPUPPCState *env, int type)
1243 target_ulong *BATlt, *BATut, *BATu, *BATl;
1244 target_ulong BEPIl, BEPIu, bl;
1249 BATlt = env->IBAT[1];
1250 BATut = env->IBAT[0];
1253 BATlt = env->DBAT[1];
1254 BATut = env->DBAT[0];
1258 for (i = 0; i < env->nb_BATs; i++) {
1261 BEPIu = *BATu & 0xF0000000;
1262 BEPIl = *BATu & 0x0FFE0000;
1263 bl = (*BATu & 0x00001FFC) << 15;
1264 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx
1265 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
1266 TARGET_FMT_lx " " TARGET_FMT_lx "\n",
1267 type == ACCESS_CODE ? "code" : "data", i,
1268 *BATu, *BATl, BEPIu, BEPIl, bl);
1272 static void mmu6xx_dump_mmu(CPUPPCState *env)
1274 PowerPCCPU *cpu = env_archcpu(env);
1277 int type, way, entry, i;
1279 qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu));
1280 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu));
1282 qemu_printf("\nSegment registers:\n");
1283 for (i = 0; i < 32; i++) {
1285 if (sr & 0x80000000) {
1286 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x "
1287 "CNTLR_SPEC=0x%05x\n", i,
1288 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0,
1289 sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF),
1290 (uint32_t)(sr & 0xFFFFF));
1292 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i,
1293 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0,
1294 sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0,
1295 (uint32_t)(sr & 0x00FFFFFF));
1299 qemu_printf("\nBATs:\n");
1300 mmu6xx_dump_BATs(env, ACCESS_INT);
1301 mmu6xx_dump_BATs(env, ACCESS_CODE);
1303 if (env->id_tlbs != 1) {
1304 qemu_printf("ERROR: 6xx MMU should have separated TLB"
1305 " for code and data\n");
1308 qemu_printf("\nTLBs [EPN EPN + SIZE]\n");
1310 for (type = 0; type < 2; type++) {
1311 for (way = 0; way < env->nb_ways; way++) {
1312 for (entry = env->nb_tlb * type + env->tlb_per_way * way;
1313 entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1));
1316 tlb = &env->tlb.tlb6[entry];
1317 qemu_printf("%s TLB %02d/%02d way:%d %s ["
1318 TARGET_FMT_lx " " TARGET_FMT_lx "]\n",
1319 type ? "code" : "data", entry % env->nb_tlb,
1321 pte_is_valid(tlb->pte0) ? "valid" : "inval",
1322 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE);
1328 void dump_mmu(CPUPPCState *env)
1330 switch (env->mmu_model) {
1331 case POWERPC_MMU_BOOKE:
1332 mmubooke_dump_mmu(env);
1334 case POWERPC_MMU_BOOKE206:
1335 mmubooke206_dump_mmu(env);
1337 case POWERPC_MMU_SOFT_6xx:
1338 case POWERPC_MMU_SOFT_74xx:
1339 mmu6xx_dump_mmu(env);
1341 #if defined(TARGET_PPC64)
1342 case POWERPC_MMU_64B:
1343 case POWERPC_MMU_2_03:
1344 case POWERPC_MMU_2_06:
1345 case POWERPC_MMU_2_07:
1346 dump_slb(env_archcpu(env));
1348 case POWERPC_MMU_3_00:
1349 if (ppc64_v3_radix(env_archcpu(env))) {
1350 /* TODO - Unsupported */
1352 dump_slb(env_archcpu(env));
1357 qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__);
1361 static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx,
1362 target_ulong eaddr, int rw)
1367 ctx->prot = PAGE_READ | PAGE_EXEC;
1369 switch (env->mmu_model) {
1370 case POWERPC_MMU_SOFT_6xx:
1371 case POWERPC_MMU_SOFT_74xx:
1372 case POWERPC_MMU_SOFT_4xx:
1373 case POWERPC_MMU_REAL:
1374 case POWERPC_MMU_BOOKE:
1375 ctx->prot |= PAGE_WRITE;
1378 case POWERPC_MMU_SOFT_4xx_Z:
1379 if (unlikely(msr_pe != 0)) {
1381 * 403 family add some particular protections, using
1382 * PBL/PBU registers for accesses with no translation.
1385 /* Check PLB validity */
1386 (env->pb[0] < env->pb[1] &&
1387 /* and address in plb area */
1388 eaddr >= env->pb[0] && eaddr < env->pb[1]) ||
1389 (env->pb[2] < env->pb[3] &&
1390 eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0;
1391 if (in_plb ^ msr_px) {
1392 /* Access in protected area */
1394 /* Access is not allowed */
1398 /* Read-write access is allowed */
1399 ctx->prot |= PAGE_WRITE;
1405 /* Caller's checks mean we should never get here for other models */
1413 static int get_physical_address_wtlb(
1414 CPUPPCState *env, mmu_ctx_t *ctx,
1415 target_ulong eaddr, int rw, int access_type,
1419 bool real_mode = (access_type == ACCESS_CODE && msr_ir == 0)
1420 || (access_type != ACCESS_CODE && msr_dr == 0);
1422 switch (env->mmu_model) {
1423 case POWERPC_MMU_SOFT_6xx:
1424 case POWERPC_MMU_SOFT_74xx:
1426 ret = check_physical(env, ctx, eaddr, rw);
1428 /* Try to find a BAT */
1429 if (env->nb_BATs != 0) {
1430 ret = get_bat_6xx_tlb(env, ctx, eaddr, rw, access_type);
1433 /* We didn't match any BAT entry or don't have BATs */
1434 ret = get_segment_6xx_tlb(env, ctx, eaddr, rw, access_type);
1439 case POWERPC_MMU_SOFT_4xx:
1440 case POWERPC_MMU_SOFT_4xx_Z:
1442 ret = check_physical(env, ctx, eaddr, rw);
1444 ret = mmu40x_get_physical_address(env, ctx, eaddr,
1448 case POWERPC_MMU_BOOKE:
1449 ret = mmubooke_get_physical_address(env, ctx, eaddr,
1452 case POWERPC_MMU_BOOKE206:
1453 ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw,
1454 access_type, mmu_idx);
1456 case POWERPC_MMU_MPC8xx:
1458 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n");
1460 case POWERPC_MMU_REAL:
1462 ret = check_physical(env, ctx, eaddr, rw);
1464 cpu_abort(env_cpu(env),
1465 "PowerPC in real mode do not do any translation\n");
1469 cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n");
1476 static int get_physical_address(
1477 CPUPPCState *env, mmu_ctx_t *ctx,
1478 target_ulong eaddr, int rw, int access_type)
1480 return get_physical_address_wtlb(env, ctx, eaddr, rw, access_type, 0);
1483 hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1485 PowerPCCPU *cpu = POWERPC_CPU(cs);
1486 CPUPPCState *env = &cpu->env;
1489 switch (env->mmu_model) {
1490 #if defined(TARGET_PPC64)
1491 case POWERPC_MMU_64B:
1492 case POWERPC_MMU_2_03:
1493 case POWERPC_MMU_2_06:
1494 case POWERPC_MMU_2_07:
1495 return ppc_hash64_get_phys_page_debug(cpu, addr);
1496 case POWERPC_MMU_3_00:
1497 return ppc64_v3_get_phys_page_debug(cpu, addr);
1500 case POWERPC_MMU_32B:
1501 case POWERPC_MMU_601:
1502 return ppc_hash32_get_phys_page_debug(cpu, addr);
1508 if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) {
1511 * Some MMUs have separate TLBs for code and data. If we only
1512 * try an ACCESS_INT, we may not be able to read instructions
1513 * mapped by code TLBs, so we also try a ACCESS_CODE.
1515 if (unlikely(get_physical_address(env, &ctx, addr, 0,
1516 ACCESS_CODE) != 0)) {
1521 return ctx.raddr & TARGET_PAGE_MASK;
1524 static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address,
1525 int rw, int mmu_idx)
1529 uint32_t missed_tid = 0;
1530 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr);
1534 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
1535 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
1536 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
1537 env->spr[SPR_BOOKE_MAS3] = 0;
1538 env->spr[SPR_BOOKE_MAS6] = 0;
1539 env->spr[SPR_BOOKE_MAS7] = 0;
1543 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
1544 env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS;
1547 env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID;
1548 env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK;
1551 switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) {
1552 case MAS4_TIDSELD_PID0:
1553 missed_tid = env->spr[SPR_BOOKE_PID];
1555 case MAS4_TIDSELD_PID1:
1556 missed_tid = env->spr[SPR_BOOKE_PID1];
1558 case MAS4_TIDSELD_PID2:
1559 missed_tid = env->spr[SPR_BOOKE_PID2];
1562 env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16;
1565 env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16;
1567 env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT);
1570 /* next victim logic */
1571 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
1573 env->last_way &= booke206_tlb_ways(env, 0) - 1;
1574 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
1577 /* Perform address translation */
1578 static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
1579 int rw, int mmu_idx)
1581 CPUState *cs = env_cpu(env);
1582 PowerPCCPU *cpu = POWERPC_CPU(cs);
1590 access_type = ACCESS_CODE;
1593 access_type = env->access_type;
1595 ret = get_physical_address_wtlb(env, &ctx, address, rw,
1596 access_type, mmu_idx);
1598 tlb_set_page(cs, address & TARGET_PAGE_MASK,
1599 ctx.raddr & TARGET_PAGE_MASK, ctx.prot,
1600 mmu_idx, TARGET_PAGE_SIZE);
1602 } else if (ret < 0) {
1604 if (access_type == ACCESS_CODE) {
1607 /* No matches in page tables or TLB */
1608 switch (env->mmu_model) {
1609 case POWERPC_MMU_SOFT_6xx:
1610 cs->exception_index = POWERPC_EXCP_IFTLB;
1611 env->error_code = 1 << 18;
1612 env->spr[SPR_IMISS] = address;
1613 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem;
1615 case POWERPC_MMU_SOFT_74xx:
1616 cs->exception_index = POWERPC_EXCP_IFTLB;
1618 case POWERPC_MMU_SOFT_4xx:
1619 case POWERPC_MMU_SOFT_4xx_Z:
1620 cs->exception_index = POWERPC_EXCP_ITLB;
1621 env->error_code = 0;
1622 env->spr[SPR_40x_DEAR] = address;
1623 env->spr[SPR_40x_ESR] = 0x00000000;
1625 case POWERPC_MMU_BOOKE206:
1626 booke206_update_mas_tlb_miss(env, address, 2, mmu_idx);
1628 case POWERPC_MMU_BOOKE:
1629 cs->exception_index = POWERPC_EXCP_ITLB;
1630 env->error_code = 0;
1631 env->spr[SPR_BOOKE_DEAR] = address;
1632 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, 0);
1634 case POWERPC_MMU_MPC8xx:
1636 cpu_abort(cs, "MPC8xx MMU model is not implemented\n");
1638 case POWERPC_MMU_REAL:
1639 cpu_abort(cs, "PowerPC in real mode should never raise "
1640 "any MMU exceptions\n");
1643 cpu_abort(cs, "Unknown or invalid MMU model\n");
1648 /* Access rights violation */
1649 cs->exception_index = POWERPC_EXCP_ISI;
1650 env->error_code = 0x08000000;
1653 /* No execute protection violation */
1654 if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
1655 (env->mmu_model == POWERPC_MMU_BOOKE206)) {
1656 env->spr[SPR_BOOKE_ESR] = 0x00000000;
1658 cs->exception_index = POWERPC_EXCP_ISI;
1659 env->error_code = 0x10000000;
1662 /* Direct store exception */
1663 /* No code fetch is allowed in direct-store areas */
1664 cs->exception_index = POWERPC_EXCP_ISI;
1665 env->error_code = 0x10000000;
1671 /* No matches in page tables or TLB */
1672 switch (env->mmu_model) {
1673 case POWERPC_MMU_SOFT_6xx:
1675 cs->exception_index = POWERPC_EXCP_DSTLB;
1676 env->error_code = 1 << 16;
1678 cs->exception_index = POWERPC_EXCP_DLTLB;
1679 env->error_code = 0;
1681 env->spr[SPR_DMISS] = address;
1682 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem;
1684 env->error_code |= ctx.key << 19;
1685 env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) +
1686 get_pteg_offset32(cpu, ctx.hash[0]);
1687 env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) +
1688 get_pteg_offset32(cpu, ctx.hash[1]);
1690 case POWERPC_MMU_SOFT_74xx:
1692 cs->exception_index = POWERPC_EXCP_DSTLB;
1694 cs->exception_index = POWERPC_EXCP_DLTLB;
1697 /* Implement LRU algorithm */
1698 env->error_code = ctx.key << 19;
1699 env->spr[SPR_TLBMISS] = (address & ~((target_ulong)0x3)) |
1700 ((env->last_way + 1) & (env->nb_ways - 1));
1701 env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem;
1703 case POWERPC_MMU_SOFT_4xx:
1704 case POWERPC_MMU_SOFT_4xx_Z:
1705 cs->exception_index = POWERPC_EXCP_DTLB;
1706 env->error_code = 0;
1707 env->spr[SPR_40x_DEAR] = address;
1709 env->spr[SPR_40x_ESR] = 0x00800000;
1711 env->spr[SPR_40x_ESR] = 0x00000000;
1714 case POWERPC_MMU_MPC8xx:
1716 cpu_abort(cs, "MPC8xx MMU model is not implemented\n");
1718 case POWERPC_MMU_BOOKE206:
1719 booke206_update_mas_tlb_miss(env, address, rw, mmu_idx);
1721 case POWERPC_MMU_BOOKE:
1722 cs->exception_index = POWERPC_EXCP_DTLB;
1723 env->error_code = 0;
1724 env->spr[SPR_BOOKE_DEAR] = address;
1725 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, rw);
1727 case POWERPC_MMU_REAL:
1728 cpu_abort(cs, "PowerPC in real mode should never raise "
1729 "any MMU exceptions\n");
1732 cpu_abort(cs, "Unknown or invalid MMU model\n");
1737 /* Access rights violation */
1738 cs->exception_index = POWERPC_EXCP_DSI;
1739 env->error_code = 0;
1740 if (env->mmu_model == POWERPC_MMU_SOFT_4xx
1741 || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) {
1742 env->spr[SPR_40x_DEAR] = address;
1744 env->spr[SPR_40x_ESR] |= 0x00800000;
1746 } else if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
1747 (env->mmu_model == POWERPC_MMU_BOOKE206)) {
1748 env->spr[SPR_BOOKE_DEAR] = address;
1749 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, rw);
1751 env->spr[SPR_DAR] = address;
1753 env->spr[SPR_DSISR] = 0x0A000000;
1755 env->spr[SPR_DSISR] = 0x08000000;
1760 /* Direct store exception */
1761 switch (access_type) {
1763 /* Floating point load/store */
1764 cs->exception_index = POWERPC_EXCP_ALIGN;
1765 env->error_code = POWERPC_EXCP_ALIGN_FP;
1766 env->spr[SPR_DAR] = address;
1769 /* lwarx, ldarx or stwcx. */
1770 cs->exception_index = POWERPC_EXCP_DSI;
1771 env->error_code = 0;
1772 env->spr[SPR_DAR] = address;
1774 env->spr[SPR_DSISR] = 0x06000000;
1776 env->spr[SPR_DSISR] = 0x04000000;
1780 /* eciwx or ecowx */
1781 cs->exception_index = POWERPC_EXCP_DSI;
1782 env->error_code = 0;
1783 env->spr[SPR_DAR] = address;
1785 env->spr[SPR_DSISR] = 0x06100000;
1787 env->spr[SPR_DSISR] = 0x04100000;
1791 printf("DSI: invalid exception (%d)\n", ret);
1792 cs->exception_index = POWERPC_EXCP_PROGRAM;
1794 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL;
1795 env->spr[SPR_DAR] = address;
1807 /*****************************************************************************/
1808 /* BATs management */
1809 #if !defined(FLUSH_ALL_TLBS)
1810 static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
1813 CPUState *cs = env_cpu(env);
1814 target_ulong base, end, page;
1816 base = BATu & ~0x0001FFFF;
1817 end = base + mask + 0x00020000;
1818 if (((end - base) >> TARGET_PAGE_BITS) > 1024) {
1819 /* Flushing 1024 4K pages is slower than a complete flush */
1820 LOG_BATS("Flush all BATs\n");
1822 LOG_BATS("Flush done\n");
1825 LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " ("
1826 TARGET_FMT_lx ")\n", base, end, mask);
1827 for (page = base; page != end; page += TARGET_PAGE_SIZE) {
1828 tlb_flush_page(cs, page);
1830 LOG_BATS("Flush done\n");
1834 static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr,
1837 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID,
1838 nr, ul == 0 ? 'u' : 'l', value, env->nip);
1841 void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
1844 #if defined(FLUSH_ALL_TLBS)
1845 PowerPCCPU *cpu = env_archcpu(env);
1848 dump_store_bat(env, 'I', 0, nr, value);
1849 if (env->IBAT[0][nr] != value) {
1850 mask = (value << 15) & 0x0FFE0000UL;
1851 #if !defined(FLUSH_ALL_TLBS)
1852 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1855 * When storing valid upper BAT, mask BEPI and BRPN and
1856 * invalidate all TLBs covered by this BAT
1858 mask = (value << 15) & 0x0FFE0000UL;
1859 env->IBAT[0][nr] = (value & 0x00001FFFUL) |
1860 (value & ~0x0001FFFFUL & ~mask);
1861 env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) |
1862 (env->IBAT[1][nr] & ~0x0001FFFF & ~mask);
1863 #if !defined(FLUSH_ALL_TLBS)
1864 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1866 tlb_flush(env_cpu(env));
1871 void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value)
1873 dump_store_bat(env, 'I', 1, nr, value);
1874 env->IBAT[1][nr] = value;
1877 void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value)
1880 #if defined(FLUSH_ALL_TLBS)
1881 PowerPCCPU *cpu = env_archcpu(env);
1884 dump_store_bat(env, 'D', 0, nr, value);
1885 if (env->DBAT[0][nr] != value) {
1887 * When storing valid upper BAT, mask BEPI and BRPN and
1888 * invalidate all TLBs covered by this BAT
1890 mask = (value << 15) & 0x0FFE0000UL;
1891 #if !defined(FLUSH_ALL_TLBS)
1892 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
1894 mask = (value << 15) & 0x0FFE0000UL;
1895 env->DBAT[0][nr] = (value & 0x00001FFFUL) |
1896 (value & ~0x0001FFFFUL & ~mask);
1897 env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) |
1898 (env->DBAT[1][nr] & ~0x0001FFFF & ~mask);
1899 #if !defined(FLUSH_ALL_TLBS)
1900 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
1902 tlb_flush(env_cpu(env));
1907 void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value)
1909 dump_store_bat(env, 'D', 1, nr, value);
1910 env->DBAT[1][nr] = value;
1913 void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value)
1916 #if defined(FLUSH_ALL_TLBS)
1917 PowerPCCPU *cpu = env_archcpu(env);
1921 dump_store_bat(env, 'I', 0, nr, value);
1922 if (env->IBAT[0][nr] != value) {
1923 #if defined(FLUSH_ALL_TLBS)
1926 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
1927 if (env->IBAT[1][nr] & 0x40) {
1928 /* Invalidate BAT only if it is valid */
1929 #if !defined(FLUSH_ALL_TLBS)
1930 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1936 * When storing valid upper BAT, mask BEPI and BRPN and
1937 * invalidate all TLBs covered by this BAT
1939 env->IBAT[0][nr] = (value & 0x00001FFFUL) |
1940 (value & ~0x0001FFFFUL & ~mask);
1941 env->DBAT[0][nr] = env->IBAT[0][nr];
1942 if (env->IBAT[1][nr] & 0x40) {
1943 #if !defined(FLUSH_ALL_TLBS)
1944 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1949 #if defined(FLUSH_ALL_TLBS)
1951 tlb_flush(env_cpu(env));
1957 void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value)
1959 #if !defined(FLUSH_ALL_TLBS)
1962 PowerPCCPU *cpu = env_archcpu(env);
1966 dump_store_bat(env, 'I', 1, nr, value);
1967 if (env->IBAT[1][nr] != value) {
1968 #if defined(FLUSH_ALL_TLBS)
1971 if (env->IBAT[1][nr] & 0x40) {
1972 #if !defined(FLUSH_ALL_TLBS)
1973 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
1974 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1980 #if !defined(FLUSH_ALL_TLBS)
1981 mask = (value << 17) & 0x0FFE0000UL;
1982 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1987 env->IBAT[1][nr] = value;
1988 env->DBAT[1][nr] = value;
1989 #if defined(FLUSH_ALL_TLBS)
1991 tlb_flush(env_cpu(env));
1997 /*****************************************************************************/
1998 /* TLB management */
1999 void ppc_tlb_invalidate_all(CPUPPCState *env)
2001 #if defined(TARGET_PPC64)
2002 if (env->mmu_model & POWERPC_MMU_64) {
2003 env->tlb_need_flush = 0;
2004 tlb_flush(env_cpu(env));
2006 #endif /* defined(TARGET_PPC64) */
2007 switch (env->mmu_model) {
2008 case POWERPC_MMU_SOFT_6xx:
2009 case POWERPC_MMU_SOFT_74xx:
2010 ppc6xx_tlb_invalidate_all(env);
2012 case POWERPC_MMU_SOFT_4xx:
2013 case POWERPC_MMU_SOFT_4xx_Z:
2014 ppc4xx_tlb_invalidate_all(env);
2016 case POWERPC_MMU_REAL:
2017 cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n");
2019 case POWERPC_MMU_MPC8xx:
2021 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n");
2023 case POWERPC_MMU_BOOKE:
2024 tlb_flush(env_cpu(env));
2026 case POWERPC_MMU_BOOKE206:
2027 booke206_flush_tlb(env, -1, 0);
2029 case POWERPC_MMU_32B:
2030 case POWERPC_MMU_601:
2031 env->tlb_need_flush = 0;
2032 tlb_flush(env_cpu(env));
2036 cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model);
2041 void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
2043 #if !defined(FLUSH_ALL_TLBS)
2044 addr &= TARGET_PAGE_MASK;
2045 #if defined(TARGET_PPC64)
2046 if (env->mmu_model & POWERPC_MMU_64) {
2047 /* tlbie invalidate TLBs for all segments */
2049 * XXX: given the fact that there are too many segments to invalidate,
2050 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
2051 * we just invalidate all TLBs
2053 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
2055 #endif /* defined(TARGET_PPC64) */
2056 switch (env->mmu_model) {
2057 case POWERPC_MMU_SOFT_6xx:
2058 case POWERPC_MMU_SOFT_74xx:
2059 ppc6xx_tlb_invalidate_virt(env, addr, 0);
2060 if (env->id_tlbs == 1) {
2061 ppc6xx_tlb_invalidate_virt(env, addr, 1);
2064 case POWERPC_MMU_32B:
2065 case POWERPC_MMU_601:
2067 * Actual CPUs invalidate entire congruence classes based on
2068 * the geometry of their TLBs and some OSes take that into
2069 * account, we just mark the TLB to be flushed later (context
2070 * synchronizing event or sync instruction on 32-bit).
2072 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
2075 /* Should never reach here with other MMU models */
2079 ppc_tlb_invalidate_all(env);
2083 /*****************************************************************************/
2084 /* Special registers manipulation */
2085 void ppc_store_sdr1(CPUPPCState *env, target_ulong value)
2087 PowerPCCPU *cpu = env_archcpu(env);
2088 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value);
2090 #if defined(TARGET_PPC64)
2091 if (env->mmu_model & POWERPC_MMU_64) {
2092 target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE;
2093 target_ulong htabsize = value & SDR_64_HTABSIZE;
2095 if (value & ~sdr_mask) {
2096 error_report("Invalid bits 0x"TARGET_FMT_lx" set in SDR1",
2100 if (htabsize > 28) {
2101 error_report("Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1",
2106 #endif /* defined(TARGET_PPC64) */
2107 /* FIXME: Should check for valid HTABMASK values in 32-bit case */
2108 env->spr[SPR_SDR1] = value;
2111 #if defined(TARGET_PPC64)
2112 void ppc_store_ptcr(CPUPPCState *env, target_ulong value)
2114 PowerPCCPU *cpu = env_archcpu(env);
2115 target_ulong ptcr_mask = PTCR_PATB | PTCR_PATS;
2116 target_ulong patbsize = value & PTCR_PATS;
2118 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value);
2121 assert(env->mmu_model & POWERPC_MMU_3_00);
2123 if (value & ~ptcr_mask) {
2124 error_report("Invalid bits 0x"TARGET_FMT_lx" set in PTCR",
2125 value & ~ptcr_mask);
2129 if (patbsize > 24) {
2130 error_report("Invalid Partition Table size 0x" TARGET_FMT_lx
2131 " stored in PTCR", patbsize);
2135 env->spr[SPR_PTCR] = value;
2138 #endif /* defined(TARGET_PPC64) */
2140 /* Segment registers load and store */
2141 target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num)
2143 #if defined(TARGET_PPC64)
2144 if (env->mmu_model & POWERPC_MMU_64) {
2149 return env->sr[sr_num];
2152 void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
2154 qemu_log_mask(CPU_LOG_MMU,
2155 "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__,
2156 (int)srnum, value, env->sr[srnum]);
2157 #if defined(TARGET_PPC64)
2158 if (env->mmu_model & POWERPC_MMU_64) {
2159 PowerPCCPU *cpu = env_archcpu(env);
2160 uint64_t esid, vsid;
2163 esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V;
2166 vsid = (value & 0xfffffff) << 12;
2168 vsid |= ((value >> 27) & 0xf) << 8;
2170 ppc_store_slb(cpu, srnum, esid, vsid);
2173 if (env->sr[srnum] != value) {
2174 env->sr[srnum] = value;
2176 * Invalidating 256MB of virtual memory in 4kB pages is way
2177 * longer than flusing the whole TLB.
2179 #if !defined(FLUSH_ALL_TLBS) && 0
2181 target_ulong page, end;
2182 /* Invalidate 256 MB of virtual memory */
2183 page = (16 << 20) * srnum;
2184 end = page + (16 << 20);
2185 for (; page != end; page += TARGET_PAGE_SIZE) {
2186 tlb_flush_page(env_cpu(env), page);
2190 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
2195 /* TLB management */
2196 void helper_tlbia(CPUPPCState *env)
2198 ppc_tlb_invalidate_all(env);
2201 void helper_tlbie(CPUPPCState *env, target_ulong addr)
2203 ppc_tlb_invalidate_one(env, addr);
2206 void helper_tlbiva(CPUPPCState *env, target_ulong addr)
2208 /* tlbiva instruction only exists on BookE */
2209 assert(env->mmu_model == POWERPC_MMU_BOOKE);
2211 cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n");
2214 /* Software driven TLBs management */
2215 /* PowerPC 602/603 software TLB load instructions helpers */
2216 static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
2218 target_ulong RPN, CMP, EPN;
2221 RPN = env->spr[SPR_RPA];
2223 CMP = env->spr[SPR_ICMP];
2224 EPN = env->spr[SPR_IMISS];
2226 CMP = env->spr[SPR_DCMP];
2227 EPN = env->spr[SPR_DMISS];
2229 way = (env->spr[SPR_SRR1] >> 17) & 1;
2230 (void)EPN; /* avoid a compiler warning */
2231 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
2232 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
2234 /* Store this TLB */
2235 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2236 way, is_code, CMP, RPN);
2239 void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN)
2241 do_6xx_tlb(env, EPN, 0);
2244 void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN)
2246 do_6xx_tlb(env, EPN, 1);
2249 /* PowerPC 74xx software TLB load instructions helpers */
2250 static void do_74xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
2252 target_ulong RPN, CMP, EPN;
2255 RPN = env->spr[SPR_PTELO];
2256 CMP = env->spr[SPR_PTEHI];
2257 EPN = env->spr[SPR_TLBMISS] & ~0x3;
2258 way = env->spr[SPR_TLBMISS] & 0x3;
2259 (void)EPN; /* avoid a compiler warning */
2260 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
2261 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
2263 /* Store this TLB */
2264 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2265 way, is_code, CMP, RPN);
2268 void helper_74xx_tlbd(CPUPPCState *env, target_ulong EPN)
2270 do_74xx_tlb(env, EPN, 0);
2273 void helper_74xx_tlbi(CPUPPCState *env, target_ulong EPN)
2275 do_74xx_tlb(env, EPN, 1);
2278 /*****************************************************************************/
2279 /* PowerPC 601 specific instructions (POWER bridge) */
2281 target_ulong helper_rac(CPUPPCState *env, target_ulong addr)
2285 target_ulong ret = 0;
2288 * We don't have to generate many instances of this instruction,
2289 * as rac is supervisor only.
2291 * XXX: FIX THIS: Pretend we have no BAT
2293 nb_BATs = env->nb_BATs;
2295 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) {
2298 env->nb_BATs = nb_BATs;
2302 static inline target_ulong booke_tlb_to_page_size(int size)
2304 return 1024 << (2 * size);
2307 static inline int booke_page_size_to_tlb(target_ulong page_size)
2311 switch (page_size) {
2345 #if defined(TARGET_PPC64)
2346 case 0x000100000000ULL:
2349 case 0x000400000000ULL:
2352 case 0x001000000000ULL:
2355 case 0x004000000000ULL:
2358 case 0x010000000000ULL:
2370 /* Helpers for 4xx TLB management */
2371 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
2373 #define PPC4XX_TLBHI_V 0x00000040
2374 #define PPC4XX_TLBHI_E 0x00000020
2375 #define PPC4XX_TLBHI_SIZE_MIN 0
2376 #define PPC4XX_TLBHI_SIZE_MAX 7
2377 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
2378 #define PPC4XX_TLBHI_SIZE_SHIFT 7
2379 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
2381 #define PPC4XX_TLBLO_EX 0x00000200
2382 #define PPC4XX_TLBLO_WR 0x00000100
2383 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
2384 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
2386 target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry)
2392 entry &= PPC4XX_TLB_ENTRY_MASK;
2393 tlb = &env->tlb.tlbe[entry];
2395 if (tlb->prot & PAGE_VALID) {
2396 ret |= PPC4XX_TLBHI_V;
2398 size = booke_page_size_to_tlb(tlb->size);
2399 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
2400 size = PPC4XX_TLBHI_SIZE_DEFAULT;
2402 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
2403 env->spr[SPR_40x_PID] = tlb->PID;
2407 target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry)
2412 entry &= PPC4XX_TLB_ENTRY_MASK;
2413 tlb = &env->tlb.tlbe[entry];
2415 if (tlb->prot & PAGE_EXEC) {
2416 ret |= PPC4XX_TLBLO_EX;
2418 if (tlb->prot & PAGE_WRITE) {
2419 ret |= PPC4XX_TLBLO_WR;
2424 void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
2427 CPUState *cs = env_cpu(env);
2429 target_ulong page, end;
2431 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
2433 entry &= PPC4XX_TLB_ENTRY_MASK;
2434 tlb = &env->tlb.tlbe[entry];
2435 /* Invalidate previous TLB (if it's valid) */
2436 if (tlb->prot & PAGE_VALID) {
2437 end = tlb->EPN + tlb->size;
2438 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
2439 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
2440 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
2441 tlb_flush_page(cs, page);
2444 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
2445 & PPC4XX_TLBHI_SIZE_MASK);
2447 * We cannot handle TLB size < TARGET_PAGE_SIZE.
2448 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY
2450 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
2451 cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u "
2452 "are not supported (%d)\n"
2453 "Please implement TARGET_PAGE_BITS_VARY\n",
2454 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
2456 tlb->EPN = val & ~(tlb->size - 1);
2457 if (val & PPC4XX_TLBHI_V) {
2458 tlb->prot |= PAGE_VALID;
2459 if (val & PPC4XX_TLBHI_E) {
2460 /* XXX: TO BE FIXED */
2462 "Little-endian TLB entries are not supported by now\n");
2465 tlb->prot &= ~PAGE_VALID;
2467 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
2468 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
2469 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
2470 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
2471 tlb->prot & PAGE_READ ? 'r' : '-',
2472 tlb->prot & PAGE_WRITE ? 'w' : '-',
2473 tlb->prot & PAGE_EXEC ? 'x' : '-',
2474 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2475 /* Invalidate new TLB (if valid) */
2476 if (tlb->prot & PAGE_VALID) {
2477 end = tlb->EPN + tlb->size;
2478 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
2479 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
2480 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
2481 tlb_flush_page(cs, page);
2486 void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry,
2491 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
2493 entry &= PPC4XX_TLB_ENTRY_MASK;
2494 tlb = &env->tlb.tlbe[entry];
2495 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
2496 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
2497 tlb->prot = PAGE_READ;
2498 if (val & PPC4XX_TLBLO_EX) {
2499 tlb->prot |= PAGE_EXEC;
2501 if (val & PPC4XX_TLBLO_WR) {
2502 tlb->prot |= PAGE_WRITE;
2504 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
2505 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
2506 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
2507 tlb->prot & PAGE_READ ? 'r' : '-',
2508 tlb->prot & PAGE_WRITE ? 'w' : '-',
2509 tlb->prot & PAGE_EXEC ? 'x' : '-',
2510 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2513 target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address)
2515 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
2518 /* PowerPC 440 TLB management */
2519 void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry,
2523 target_ulong EPN, RPN, size;
2526 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
2527 __func__, word, (int)entry, value);
2530 tlb = &env->tlb.tlbe[entry];
2533 /* Just here to please gcc */
2535 EPN = value & 0xFFFFFC00;
2536 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) {
2540 size = booke_tlb_to_page_size((value >> 4) & 0xF);
2541 if ((tlb->prot & PAGE_VALID) && tlb->size < size) {
2546 tlb->attr |= (value >> 8) & 1;
2547 if (value & 0x200) {
2548 tlb->prot |= PAGE_VALID;
2550 if (tlb->prot & PAGE_VALID) {
2551 tlb->prot &= ~PAGE_VALID;
2555 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
2556 if (do_flush_tlbs) {
2557 tlb_flush(env_cpu(env));
2561 RPN = value & 0xFFFFFC0F;
2562 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) {
2563 tlb_flush(env_cpu(env));
2568 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
2569 tlb->prot = tlb->prot & PAGE_VALID;
2571 tlb->prot |= PAGE_READ << 4;
2574 tlb->prot |= PAGE_WRITE << 4;
2577 tlb->prot |= PAGE_EXEC << 4;
2580 tlb->prot |= PAGE_READ;
2583 tlb->prot |= PAGE_WRITE;
2586 tlb->prot |= PAGE_EXEC;
2592 target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word,
2600 tlb = &env->tlb.tlbe[entry];
2603 /* Just here to please gcc */
2606 size = booke_page_size_to_tlb(tlb->size);
2607 if (size < 0 || size > 0xF) {
2611 if (tlb->attr & 0x1) {
2614 if (tlb->prot & PAGE_VALID) {
2617 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
2618 env->spr[SPR_440_MMUCR] |= tlb->PID;
2624 ret = tlb->attr & ~0x1;
2625 if (tlb->prot & (PAGE_READ << 4)) {
2628 if (tlb->prot & (PAGE_WRITE << 4)) {
2631 if (tlb->prot & (PAGE_EXEC << 4)) {
2634 if (tlb->prot & PAGE_READ) {
2637 if (tlb->prot & PAGE_WRITE) {
2640 if (tlb->prot & PAGE_EXEC) {
2648 target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address)
2650 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
2653 /* PowerPC BookE 2.06 TLB management */
2655 static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env)
2657 uint32_t tlbncfg = 0;
2658 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
2659 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
2662 tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
2663 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
2665 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
2666 cpu_abort(env_cpu(env), "we don't support HES yet\n");
2669 return booke206_get_tlbm(env, tlb, ea, esel);
2672 void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid)
2674 env->spr[pidn] = pid;
2675 /* changing PIDs mean we're in a different address space now */
2676 tlb_flush(env_cpu(env));
2679 void helper_booke_set_eplc(CPUPPCState *env, target_ulong val)
2681 env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK;
2682 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD);
2684 void helper_booke_set_epsc(CPUPPCState *env, target_ulong val)
2686 env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK;
2687 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE);
2690 static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb)
2692 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) {
2693 tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK);
2695 tlb_flush(env_cpu(env));
2699 void helper_booke206_tlbwe(CPUPPCState *env)
2701 uint32_t tlbncfg, tlbn;
2703 uint32_t size_tlb, size_ps;
2707 switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) {
2708 case MAS0_WQ_ALWAYS:
2709 /* good to go, write that entry */
2712 /* XXX check if reserved */
2717 case MAS0_WQ_CLR_RSRV:
2718 /* XXX clear entry */
2721 /* no idea what to do */
2725 if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
2727 /* XXX we don't support direct LRAT setting yet */
2728 fprintf(stderr, "cpu: don't support LRAT setting yet\n");
2732 tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
2733 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
2735 tlb = booke206_cur_tlb(env);
2738 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
2739 POWERPC_EXCP_INVAL |
2740 POWERPC_EXCP_INVAL_INVAL, GETPC());
2743 /* check that we support the targeted size */
2744 size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2745 size_ps = booke206_tlbnps(env, tlbn);
2746 if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) &&
2747 !(size_ps & (1 << size_tlb))) {
2748 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
2749 POWERPC_EXCP_INVAL |
2750 POWERPC_EXCP_INVAL_INVAL, GETPC());
2754 cpu_abort(env_cpu(env), "missing HV implementation\n");
2757 if (tlb->mas1 & MAS1_VALID) {
2759 * Invalidate the page in QEMU TLB if it was a valid entry.
2761 * In "PowerPC e500 Core Family Reference Manual, Rev. 1",
2762 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction":
2763 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf)
2765 * "Note that when an L2 TLB entry is written, it may be displacing an
2766 * already valid entry in the same L2 TLB location (a victim). If a
2767 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1
2768 * TLB entry is automatically invalidated."
2770 flush_page(env, tlb);
2773 tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) |
2774 env->spr[SPR_BOOKE_MAS3];
2775 tlb->mas1 = env->spr[SPR_BOOKE_MAS1];
2777 if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
2778 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */
2779 booke206_fixed_size_tlbn(env, tlbn, tlb);
2781 if (!(tlbncfg & TLBnCFG_AVAIL)) {
2782 /* force !AVAIL TLB entries to correct page size */
2783 tlb->mas1 &= ~MAS1_TSIZE_MASK;
2784 /* XXX can be configured in MMUCSR0 */
2785 tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12;
2789 /* Make a mask from TLB size to discard invalid bits in EPN field */
2790 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
2791 /* Add a mask for page attributes */
2792 mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
2796 * Executing a tlbwe instruction in 32-bit mode will set bits
2797 * 0:31 of the TLB EPN field to zero.
2802 tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask;
2804 if (!(tlbncfg & TLBnCFG_IPROT)) {
2805 /* no IPROT supported by TLB */
2806 tlb->mas1 &= ~MAS1_IPROT;
2809 flush_page(env, tlb);
2812 static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb)
2814 int tlbn = booke206_tlbm_to_tlbn(env, tlb);
2815 int way = booke206_tlbm_to_way(env, tlb);
2817 env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT;
2818 env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT;
2819 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
2821 env->spr[SPR_BOOKE_MAS1] = tlb->mas1;
2822 env->spr[SPR_BOOKE_MAS2] = tlb->mas2;
2823 env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3;
2824 env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32;
2827 void helper_booke206_tlbre(CPUPPCState *env)
2829 ppcmas_tlb_t *tlb = NULL;
2831 tlb = booke206_cur_tlb(env);
2833 env->spr[SPR_BOOKE_MAS1] = 0;
2835 booke206_tlb_to_mas(env, tlb);
2839 void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address)
2841 ppcmas_tlb_t *tlb = NULL;
2846 spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT;
2847 sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS;
2849 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
2850 int ways = booke206_tlb_ways(env, i);
2852 for (j = 0; j < ways; j++) {
2853 tlb = booke206_get_tlbm(env, i, address, j);
2859 if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) {
2863 if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
2867 booke206_tlb_to_mas(env, tlb);
2872 /* no entry found, fill with defaults */
2873 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
2874 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
2875 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
2876 env->spr[SPR_BOOKE_MAS3] = 0;
2877 env->spr[SPR_BOOKE_MAS7] = 0;
2879 if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) {
2880 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
2883 env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16)
2886 /* next victim logic */
2887 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
2889 env->last_way &= booke206_tlb_ways(env, 0) - 1;
2890 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
2893 static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
2897 int ways = booke206_tlb_ways(env, tlbn);
2900 for (i = 0; i < ways; i++) {
2901 ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i);
2905 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
2906 if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) &&
2907 !(tlb->mas1 & MAS1_IPROT)) {
2908 tlb->mas1 &= ~MAS1_VALID;
2913 void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
2917 if (address & 0x4) {
2918 /* flush all entries */
2919 if (address & 0x8) {
2920 /* flush all of TLB1 */
2921 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1);
2923 /* flush all of TLB0 */
2924 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0);
2929 if (address & 0x8) {
2930 /* flush TLB1 entries */
2931 booke206_invalidate_ea_tlb(env, 1, address);
2936 /* flush TLB0 entries */
2937 booke206_invalidate_ea_tlb(env, 0, address);
2939 tlb_flush_page(cs, address & MAS2_EPN_MASK);
2944 void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address)
2946 /* XXX missing LPID handling */
2947 booke206_flush_tlb(env, -1, 1);
2950 void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address)
2953 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
2954 ppcmas_tlb_t *tlb = env->tlb.tlbm;
2957 /* XXX missing LPID handling */
2958 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
2959 tlb_size = booke206_tlb_size(env, i);
2960 for (j = 0; j < tlb_size; j++) {
2961 if (!(tlb[j].mas1 & MAS1_IPROT) &&
2962 ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) {
2963 tlb[j].mas1 &= ~MAS1_VALID;
2966 tlb += booke206_tlb_size(env, i);
2968 tlb_flush(env_cpu(env));
2971 void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address)
2975 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
2976 int pid = tid >> MAS6_SPID_SHIFT;
2977 int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS;
2978 int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0;
2979 /* XXX check for unsupported isize and raise an invalid opcode then */
2980 int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK;
2981 /* XXX implement MAV2 handling */
2984 /* XXX missing LPID handling */
2985 /* flush by pid and ea */
2986 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
2987 int ways = booke206_tlb_ways(env, i);
2989 for (j = 0; j < ways; j++) {
2990 tlb = booke206_get_tlbm(env, i, address, j);
2994 if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) ||
2995 (tlb->mas1 & MAS1_IPROT) ||
2996 ((tlb->mas1 & MAS1_IND) != ind) ||
2997 ((tlb->mas8 & MAS8_TGS) != sgs)) {
3000 if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) {
3001 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
3004 /* XXX e500mc doesn't match SAS, but other cores might */
3005 tlb->mas1 &= ~MAS1_VALID;
3008 tlb_flush(env_cpu(env));
3011 void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type)
3016 flags |= BOOKE206_FLUSH_TLB1;
3020 flags |= BOOKE206_FLUSH_TLB0;
3023 booke206_flush_tlb(env, flags, 1);
3027 void helper_check_tlb_flush_local(CPUPPCState *env)
3029 check_tlb_flush(env, false);
3032 void helper_check_tlb_flush_global(CPUPPCState *env)
3034 check_tlb_flush(env, true);
3037 /*****************************************************************************/
3039 bool ppc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
3040 MMUAccessType access_type, int mmu_idx,
3041 bool probe, uintptr_t retaddr)
3043 PowerPCCPU *cpu = POWERPC_CPU(cs);
3044 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
3045 CPUPPCState *env = &cpu->env;
3048 if (pcc->handle_mmu_fault) {
3049 ret = pcc->handle_mmu_fault(cpu, addr, access_type, mmu_idx);
3051 ret = cpu_ppc_handle_mmu_fault(env, addr, access_type, mmu_idx);
3053 if (unlikely(ret != 0)) {
3057 raise_exception_err_ra(env, cs->exception_index, env->error_code,