* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "cpu.h"
#include "exec/log.h"
#include "helper_regs.h"
#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
+#include "qemu/qemu-print.h"
#include "mmu-book3s-v3.h"
#include "mmu-radix64.h"
-//#define DEBUG_MMU
-//#define DEBUG_BATS
-//#define DEBUG_SOFTWARE_TLB
-//#define DUMP_PAGE_TABLES
-//#define FLUSH_ALL_TLBS
+/* #define DEBUG_MMU */
+/* #define DEBUG_BATS */
+/* #define DEBUG_SOFTWARE_TLB */
+/* #define DUMP_PAGE_TABLES */
+/* #define FLUSH_ALL_TLBS */
#ifdef DEBUG_MMU
# define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0)
}
static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
- target_ulong pte1, int h, int rw, int type)
+ target_ulong pte1, int h,
+ int rw, int type)
{
target_ulong ptem, mmask;
int access, ret, pteh, ptev, pp;
static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
ppc6xx_tlb_t *tlb;
int nr, max;
tlb = &env->tlb.tlb6[nr];
pte_invalidate(&tlb->pte0);
}
- tlb_flush(CPU(cpu));
+ tlb_flush(env_cpu(env));
}
static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
int is_code, int match_epn)
{
#if !defined(FLUSH_ALL_TLBS)
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUState *cs = env_cpu(env);
ppc6xx_tlb_t *tlb;
int way, nr;
pte_is_valid(tlb->pte0) ? "valid" : "inval",
tlb->EPN, eaddr, tlb->pte1,
rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D');
- switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 0, rw, access_type)) {
+ switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1,
+ 0, rw, access_type)) {
case -3:
/* TLB inconsistency */
return -1;
break;
case 0:
/* access granted */
- /* XXX: we should go on looping to check all TLBs consistency
- * but we can speed-up the whole thing as the
- * result would be undefined if TLBs are not consistent.
+ /*
+ * XXX: we should go on looping to check all TLBs
+ * consistency but we can speed-up the whole thing as
+ * the result would be undefined if TLBs are not
+ * consistent.
*/
ret = 0;
best = nr;
static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
target_ulong eaddr, int rw, int type)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ PowerPCCPU *cpu = env_archcpu(env);
hwaddr hash;
target_ulong vsid;
int ds, pr, target_page_bits;
ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type);
#if defined(DUMP_PAGE_TABLES)
if (qemu_loglevel_mask(CPU_LOG_MMU)) {
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = env_cpu(env);
hwaddr curaddr;
uint32_t a0, a1, a2, a3;
qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
/* Direct-store segment : absolutely *BUGGY* for now */
- /* Direct-store implies a 32-bit MMU.
+ /*
+ * Direct-store implies a 32-bit MMU.
* Check the Segment Register's bus unit ID (BUID).
*/
sr = env->sr[eaddr >> 28];
if ((sr & 0x1FF00000) >> 20 == 0x07f) {
- /* Memory-forced I/O controller interface access */
- /* If T=1 and BUID=x'07F', the 601 performs a memory access
- * to SR[28-31] LA[4-31], bypassing all protection mechanisms.
+ /*
+ * Memory-forced I/O controller interface access
+ *
+ * If T=1 and BUID=x'07F', the 601 performs a memory
+ * access to SR[28-31] LA[4-31], bypassing all protection
+ * mechanisms.
*/
ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
/* lwarx, ldarx or srwcx. */
return -4;
case ACCESS_CACHE:
- /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
- /* Should make the instruction do no-op.
- * As it already do no-op, it's quite easy :-)
+ /*
+ * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
+ *
+ * Should make the instruction do no-op. As it already do
+ * no-op, it's quite easy :-)
*/
ctx->raddr = eaddr;
return 0;
/* Helpers specific to PowerPC 40x implementations */
static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
ppcemb_tlb_t *tlb;
int i;
tlb = &env->tlb.tlbe[i];
tlb->prot &= ~PAGE_VALID;
}
- tlb_flush(CPU(cpu));
+ tlb_flush(env_cpu(env));
}
static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
void store_40x_sler(CPUPPCState *env, uint32_t val)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
-
/* XXX: TO BE FIXED */
if (val != 0x00000000) {
- cpu_abort(CPU(cpu), "Little-endian regions are not supported by now\n");
+ cpu_abort(env_cpu(env),
+ "Little-endian regions are not supported by now\n");
}
env->spr[SPR_405_SLER] = val;
}
static void booke206_flush_tlb(CPUPPCState *env, int flags,
const int check_iprot)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
int tlb_size;
int i, j;
ppcmas_tlb_t *tlb = env->tlb.tlbm;
tlb += booke206_tlb_size(env, i);
}
- tlb_flush(CPU(cpu));
+ tlb_flush(env_cpu(env));
}
static hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
return esr;
}
-/* Get EPID register given the mmu_idx. If this is regular load,
- * construct the EPID access bits from current processor state */
-
-/* Get the effective AS and PR bits and the PID. The PID is returned only if
- * EPID load is requested, otherwise the caller must detect the correct EPID.
- * Return true if valid EPID is returned. */
+/*
+ * Get EPID register given the mmu_idx. If this is regular load,
+ * construct the EPID access bits from current processor state
+ *
+ * Get the effective AS and PR bits and the PID. The PID is returned
+ * only if EPID load is requested, otherwise the caller must detect
+ * the correct EPID. Return true if valid EPID is returned.
+ */
static bool mmubooke206_get_as(CPUPPCState *env,
int mmu_idx, uint32_t *epid_out,
bool *as_out, bool *pr_out)
"1T", "2T"
};
-static void mmubooke_dump_mmu(FILE *f, fprintf_function cpu_fprintf,
- CPUPPCState *env)
+static void mmubooke_dump_mmu(CPUPPCState *env)
{
ppcemb_tlb_t *entry;
int i;
if (kvm_enabled() && !env->kvm_sw_tlb) {
- cpu_fprintf(f, "Cannot access KVM TLB\n");
+ qemu_printf("Cannot access KVM TLB\n");
return;
}
- cpu_fprintf(f, "\nTLB:\n");
- cpu_fprintf(f, "Effective Physical Size PID Prot "
+ qemu_printf("\nTLB:\n");
+ qemu_printf("Effective Physical Size PID Prot "
"Attr\n");
entry = &env->tlb.tlbe[0];
} else {
snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB);
}
- cpu_fprintf(f, "0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n",
+ qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n",
(uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID,
entry->prot, entry->attr);
}
}
-static void mmubooke206_dump_one_tlb(FILE *f, fprintf_function cpu_fprintf,
- CPUPPCState *env, int tlbn, int offset,
+static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset,
int tlbsize)
{
ppcmas_tlb_t *entry;
int i;
- cpu_fprintf(f, "\nTLB%d:\n", tlbn);
- cpu_fprintf(f, "Effective Physical Size TID TS SRWX"
+ qemu_printf("\nTLB%d:\n", tlbn);
+ qemu_printf("Effective Physical Size TID TS SRWX"
" URWX WIMGE U0123\n");
entry = &env->tlb.tlbm[offset];
ea = entry->mas2 & ~(size - 1);
pa = entry->mas7_3 & ~(size - 1);
- cpu_fprintf(f, "0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c"
+ qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c"
"U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
(uint64_t)ea, (uint64_t)pa,
book3e_tsize_to_str[tsize],
}
}
-static void mmubooke206_dump_mmu(FILE *f, fprintf_function cpu_fprintf,
- CPUPPCState *env)
+static void mmubooke206_dump_mmu(CPUPPCState *env)
{
int offset = 0;
int i;
if (kvm_enabled() && !env->kvm_sw_tlb) {
- cpu_fprintf(f, "Cannot access KVM TLB\n");
+ qemu_printf("Cannot access KVM TLB\n");
return;
}
continue;
}
- mmubooke206_dump_one_tlb(f, cpu_fprintf, env, i, offset, size);
+ mmubooke206_dump_one_tlb(env, i, offset, size);
offset += size;
}
}
-static void mmu6xx_dump_BATs(FILE *f, fprintf_function cpu_fprintf,
- CPUPPCState *env, int type)
+static void mmu6xx_dump_BATs(CPUPPCState *env, int type)
{
target_ulong *BATlt, *BATut, *BATu, *BATl;
target_ulong BEPIl, BEPIu, bl;
BEPIu = *BATu & 0xF0000000;
BEPIl = *BATu & 0x0FFE0000;
bl = (*BATu & 0x00001FFC) << 15;
- cpu_fprintf(f, "%s BAT%d BATu " TARGET_FMT_lx
+ qemu_printf("%s BAT%d BATu " TARGET_FMT_lx
" BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
TARGET_FMT_lx " " TARGET_FMT_lx "\n",
type == ACCESS_CODE ? "code" : "data", i,
}
}
-static void mmu6xx_dump_mmu(FILE *f, fprintf_function cpu_fprintf,
- CPUPPCState *env)
+static void mmu6xx_dump_mmu(CPUPPCState *env)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ PowerPCCPU *cpu = env_archcpu(env);
ppc6xx_tlb_t *tlb;
target_ulong sr;
int type, way, entry, i;
- cpu_fprintf(f, "HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu));
- cpu_fprintf(f, "HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu));
+ qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu));
+ qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu));
- cpu_fprintf(f, "\nSegment registers:\n");
+ qemu_printf("\nSegment registers:\n");
for (i = 0; i < 32; i++) {
sr = env->sr[i];
if (sr & 0x80000000) {
- cpu_fprintf(f, "%02d T=%d Ks=%d Kp=%d BUID=0x%03x "
+ qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x "
"CNTLR_SPEC=0x%05x\n", i,
sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0,
sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF),
(uint32_t)(sr & 0xFFFFF));
} else {
- cpu_fprintf(f, "%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i,
+ qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i,
sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0,
sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0,
(uint32_t)(sr & 0x00FFFFFF));
}
}
- cpu_fprintf(f, "\nBATs:\n");
- mmu6xx_dump_BATs(f, cpu_fprintf, env, ACCESS_INT);
- mmu6xx_dump_BATs(f, cpu_fprintf, env, ACCESS_CODE);
+ qemu_printf("\nBATs:\n");
+ mmu6xx_dump_BATs(env, ACCESS_INT);
+ mmu6xx_dump_BATs(env, ACCESS_CODE);
if (env->id_tlbs != 1) {
- cpu_fprintf(f, "ERROR: 6xx MMU should have separated TLB"
+ qemu_printf("ERROR: 6xx MMU should have separated TLB"
" for code and data\n");
}
- cpu_fprintf(f, "\nTLBs [EPN EPN + SIZE]\n");
+ qemu_printf("\nTLBs [EPN EPN + SIZE]\n");
for (type = 0; type < 2; type++) {
for (way = 0; way < env->nb_ways; way++) {
entry++) {
tlb = &env->tlb.tlb6[entry];
- cpu_fprintf(f, "%s TLB %02d/%02d way:%d %s ["
+ qemu_printf("%s TLB %02d/%02d way:%d %s ["
TARGET_FMT_lx " " TARGET_FMT_lx "]\n",
type ? "code" : "data", entry % env->nb_tlb,
env->nb_tlb, way,
}
}
-void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env)
+void dump_mmu(CPUPPCState *env)
{
switch (env->mmu_model) {
case POWERPC_MMU_BOOKE:
- mmubooke_dump_mmu(f, cpu_fprintf, env);
+ mmubooke_dump_mmu(env);
break;
case POWERPC_MMU_BOOKE206:
- mmubooke206_dump_mmu(f, cpu_fprintf, env);
+ mmubooke206_dump_mmu(env);
break;
case POWERPC_MMU_SOFT_6xx:
case POWERPC_MMU_SOFT_74xx:
- mmu6xx_dump_mmu(f, cpu_fprintf, env);
+ mmu6xx_dump_mmu(env);
break;
#if defined(TARGET_PPC64)
case POWERPC_MMU_64B:
case POWERPC_MMU_2_03:
case POWERPC_MMU_2_06:
case POWERPC_MMU_2_07:
- dump_slb(f, cpu_fprintf, ppc_env_get_cpu(env));
+ dump_slb(env_archcpu(env));
break;
case POWERPC_MMU_3_00:
- if (ppc64_v3_radix(ppc_env_get_cpu(env))) {
+ if (ppc64_v3_radix(env_archcpu(env))) {
/* TODO - Unsupported */
} else {
- dump_slb(f, cpu_fprintf, ppc_env_get_cpu(env));
+ dump_slb(env_archcpu(env));
break;
}
#endif
case POWERPC_MMU_SOFT_4xx_Z:
if (unlikely(msr_pe != 0)) {
- /* 403 family add some particular protections,
- * using PBL/PBU registers for accesses with no translation.
+ /*
+ * 403 family add some particular protections, using
+ * PBL/PBU registers for accesses with no translation.
*/
in_plb =
/* Check PLB validity */
target_ulong eaddr, int rw, int access_type,
int mmu_idx)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
int ret = -1;
bool real_mode = (access_type == ACCESS_CODE && msr_ir == 0)
|| (access_type != ACCESS_CODE && msr_dr == 0);
break;
case POWERPC_MMU_MPC8xx:
/* XXX: TODO */
- cpu_abort(CPU(cpu), "MPC8xx MMU model is not implemented\n");
+ cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n");
break;
case POWERPC_MMU_REAL:
if (real_mode) {
ret = check_physical(env, ctx, eaddr, rw);
} else {
- cpu_abort(CPU(cpu), "PowerPC in real mode do not do any translation\n");
+ cpu_abort(env_cpu(env),
+ "PowerPC in real mode do not do any translation\n");
}
return -1;
default:
- cpu_abort(CPU(cpu), "Unknown or invalid MMU model\n");
+ cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n");
return -1;
}
if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) {
- /* Some MMUs have separate TLBs for code and data. If we only try an
- * ACCESS_INT, we may not be able to read instructions mapped by code
- * TLBs, so we also try a ACCESS_CODE.
+ /*
+ * Some MMUs have separate TLBs for code and data. If we only
+ * try an ACCESS_INT, we may not be able to read instructions
+ * mapped by code TLBs, so we also try a ACCESS_CODE.
*/
if (unlikely(get_physical_address(env, &ctx, addr, 0,
ACCESS_CODE) != 0)) {
static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
int rw, int mmu_idx)
{
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUState *cs = env_cpu(env);
PowerPCCPU *cpu = POWERPC_CPU(cs);
mmu_ctx_t ctx;
int access_type;
static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
target_ulong mask)
{
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUState *cs = env_cpu(env);
target_ulong base, end, page;
base = BATu & ~0x0001FFFF;
end = base + mask + 0x00020000;
+ if (((end - base) >> TARGET_PAGE_BITS) > 1024) {
+ /* Flushing 1024 4K pages is slower than a complete flush */
+ LOG_BATS("Flush all BATs\n");
+ tlb_flush(CPU(cs));
+ LOG_BATS("Flush done\n");
+ return;
+ }
LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " ("
TARGET_FMT_lx ")\n", base, end, mask);
for (page = base; page != end; page += TARGET_PAGE_SIZE) {
{
target_ulong mask;
#if defined(FLUSH_ALL_TLBS)
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ PowerPCCPU *cpu = env_archcpu(env);
#endif
dump_store_bat(env, 'I', 0, nr, value);
#if !defined(FLUSH_ALL_TLBS)
do_invalidate_BAT(env, env->IBAT[0][nr], mask);
#endif
- /* When storing valid upper BAT, mask BEPI and BRPN
- * and invalidate all TLBs covered by this BAT
+ /*
+ * When storing valid upper BAT, mask BEPI and BRPN and
+ * invalidate all TLBs covered by this BAT
*/
mask = (value << 15) & 0x0FFE0000UL;
env->IBAT[0][nr] = (value & 0x00001FFFUL) |
#if !defined(FLUSH_ALL_TLBS)
do_invalidate_BAT(env, env->IBAT[0][nr], mask);
#else
- tlb_flush(CPU(cpu));
+ tlb_flush(env_cpu(env));
#endif
}
}
{
target_ulong mask;
#if defined(FLUSH_ALL_TLBS)
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ PowerPCCPU *cpu = env_archcpu(env);
#endif
dump_store_bat(env, 'D', 0, nr, value);
if (env->DBAT[0][nr] != value) {
- /* When storing valid upper BAT, mask BEPI and BRPN
- * and invalidate all TLBs covered by this BAT
+ /*
+ * When storing valid upper BAT, mask BEPI and BRPN and
+ * invalidate all TLBs covered by this BAT
*/
mask = (value << 15) & 0x0FFE0000UL;
#if !defined(FLUSH_ALL_TLBS)
#if !defined(FLUSH_ALL_TLBS)
do_invalidate_BAT(env, env->DBAT[0][nr], mask);
#else
- tlb_flush(CPU(cpu));
+ tlb_flush(env_cpu(env));
#endif
}
}
{
target_ulong mask;
#if defined(FLUSH_ALL_TLBS)
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ PowerPCCPU *cpu = env_archcpu(env);
int do_inval;
#endif
do_inval = 1;
#endif
}
- /* When storing valid upper BAT, mask BEPI and BRPN
- * and invalidate all TLBs covered by this BAT
+ /*
+ * When storing valid upper BAT, mask BEPI and BRPN and
+ * invalidate all TLBs covered by this BAT
*/
env->IBAT[0][nr] = (value & 0x00001FFFUL) |
(value & ~0x0001FFFFUL & ~mask);
}
#if defined(FLUSH_ALL_TLBS)
if (do_inval) {
- tlb_flush(CPU(cpu));
+ tlb_flush(env_cpu(env));
}
#endif
}
#if !defined(FLUSH_ALL_TLBS)
target_ulong mask;
#else
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ PowerPCCPU *cpu = env_archcpu(env);
int do_inval;
#endif
env->DBAT[1][nr] = value;
#if defined(FLUSH_ALL_TLBS)
if (do_inval) {
- tlb_flush(CPU(cpu));
+ tlb_flush(env_cpu(env));
}
#endif
}
/* TLB management */
void ppc_tlb_invalidate_all(CPUPPCState *env)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
-
#if defined(TARGET_PPC64)
if (env->mmu_model & POWERPC_MMU_64) {
env->tlb_need_flush = 0;
- tlb_flush(CPU(cpu));
+ tlb_flush(env_cpu(env));
} else
#endif /* defined(TARGET_PPC64) */
switch (env->mmu_model) {
ppc4xx_tlb_invalidate_all(env);
break;
case POWERPC_MMU_REAL:
- cpu_abort(CPU(cpu), "No TLB for PowerPC 4xx in real mode\n");
+ cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n");
break;
case POWERPC_MMU_MPC8xx:
/* XXX: TODO */
- cpu_abort(CPU(cpu), "MPC8xx MMU model is not implemented\n");
+ cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n");
break;
case POWERPC_MMU_BOOKE:
- tlb_flush(CPU(cpu));
+ tlb_flush(env_cpu(env));
break;
case POWERPC_MMU_BOOKE206:
booke206_flush_tlb(env, -1, 0);
case POWERPC_MMU_32B:
case POWERPC_MMU_601:
env->tlb_need_flush = 0;
- tlb_flush(CPU(cpu));
+ tlb_flush(env_cpu(env));
break;
default:
/* XXX: TODO */
- cpu_abort(CPU(cpu), "Unknown MMU model %x\n", env->mmu_model);
+ cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model);
break;
}
}
#if defined(TARGET_PPC64)
if (env->mmu_model & POWERPC_MMU_64) {
/* tlbie invalidate TLBs for all segments */
- /* XXX: given the fact that there are too many segments to invalidate,
+ /*
+ * XXX: given the fact that there are too many segments to invalidate,
* and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
* we just invalidate all TLBs
*/
break;
case POWERPC_MMU_32B:
case POWERPC_MMU_601:
- /* Actual CPUs invalidate entire congruence classes based on the
- * geometry of their TLBs and some OSes take that into account,
- * we just mark the TLB to be flushed later (context synchronizing
- * event or sync instruction on 32-bit).
+ /*
+ * Actual CPUs invalidate entire congruence classes based on
+ * the geometry of their TLBs and some OSes take that into
+ * account, we just mark the TLB to be flushed later (context
+ * synchronizing event or sync instruction on 32-bit).
*/
env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
break;
/* Special registers manipulation */
void ppc_store_sdr1(CPUPPCState *env, target_ulong value)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ PowerPCCPU *cpu = env_archcpu(env);
qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value);
assert(!cpu->vhyp);
#if defined(TARGET_PPC64)
#if defined(TARGET_PPC64)
void ppc_store_ptcr(CPUPPCState *env, target_ulong value)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ PowerPCCPU *cpu = env_archcpu(env);
target_ulong ptcr_mask = PTCR_PATB | PTCR_PATS;
target_ulong patbsize = value & PTCR_PATS;
(int)srnum, value, env->sr[srnum]);
#if defined(TARGET_PPC64)
if (env->mmu_model & POWERPC_MMU_64) {
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ PowerPCCPU *cpu = env_archcpu(env);
uint64_t esid, vsid;
/* ESID = srnum */
#endif
if (env->sr[srnum] != value) {
env->sr[srnum] = value;
-/* Invalidating 256MB of virtual memory in 4kB pages is way longer than
- flusing the whole TLB. */
+ /*
+ * Invalidating 256MB of virtual memory in 4kB pages is way
+ * longer than flusing the whole TLB.
+ */
#if !defined(FLUSH_ALL_TLBS) && 0
{
target_ulong page, end;
page = (16 << 20) * srnum;
end = page + (16 << 20);
for (; page != end; page += TARGET_PAGE_SIZE) {
- tlb_flush_page(CPU(cpu), page);
+ tlb_flush_page(env_cpu(env), page);
}
}
#else
void helper_tlbiva(CPUPPCState *env, target_ulong addr)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
-
/* tlbiva instruction only exists on BookE */
assert(env->mmu_model == POWERPC_MMU_BOOKE);
/* XXX: TODO */
- cpu_abort(CPU(cpu), "BookE MMU model is not implemented\n");
+ cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n");
}
/* Software driven TLBs management */
int nb_BATs;
target_ulong ret = 0;
- /* We don't have to generate many instances of this instruction,
+ /*
+ * We don't have to generate many instances of this instruction,
* as rac is supervisor only.
+ *
+ * XXX: FIX THIS: Pretend we have no BAT
*/
- /* XXX: FIX THIS: Pretend we have no BAT */
nb_BATs = env->nb_BATs;
env->nb_BATs = 0;
if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) {
void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
target_ulong val)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
- CPUState *cs = CPU(cpu);
+ CPUState *cs = env_cpu(env);
ppcemb_tlb_t *tlb;
target_ulong page, end;
}
tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
& PPC4XX_TLBHI_SIZE_MASK);
- /* We cannot handle TLB size < TARGET_PAGE_SIZE.
+ /*
+ * We cannot handle TLB size < TARGET_PAGE_SIZE.
* If this ever occurs, we should implement TARGET_PAGE_BITS_VARY
*/
if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry,
target_ulong value)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
ppcemb_tlb_t *tlb;
target_ulong EPN, RPN, size;
int do_flush_tlbs;
}
tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
if (do_flush_tlbs) {
- tlb_flush(CPU(cpu));
+ tlb_flush(env_cpu(env));
}
break;
case 1:
RPN = value & 0xFFFFFC0F;
if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) {
- tlb_flush(CPU(cpu));
+ tlb_flush(env_cpu(env));
}
tlb->RPN = RPN;
break;
static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
uint32_t tlbncfg = 0;
int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
- cpu_abort(CPU(cpu), "we don't support HES yet\n");
+ cpu_abort(env_cpu(env), "we don't support HES yet\n");
}
return booke206_get_tlbm(env, tlb, ea, esel);
void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
-
env->spr[pidn] = pid;
/* changing PIDs mean we're in a different address space now */
- tlb_flush(CPU(cpu));
+ tlb_flush(env_cpu(env));
}
void helper_booke_set_eplc(CPUPPCState *env, target_ulong val)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK;
- tlb_flush_by_mmuidx(CPU(cpu), 1 << PPC_TLB_EPID_LOAD);
+ tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD);
}
void helper_booke_set_epsc(CPUPPCState *env, target_ulong val)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK;
- tlb_flush_by_mmuidx(CPU(cpu), 1 << PPC_TLB_EPID_STORE);
+ tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE);
}
static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
-
if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) {
- tlb_flush_page(CPU(cpu), tlb->mas2 & MAS2_EPN_MASK);
+ tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK);
} else {
- tlb_flush(CPU(cpu));
+ tlb_flush(env_cpu(env));
}
}
void helper_booke206_tlbwe(CPUPPCState *env)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
uint32_t tlbncfg, tlbn;
ppcmas_tlb_t *tlb;
uint32_t size_tlb, size_ps;
}
if (msr_gs) {
- cpu_abort(CPU(cpu), "missing HV implementation\n");
+ cpu_abort(env_cpu(env), "missing HV implementation\n");
}
if (tlb->mas1 & MAS1_VALID) {
- /* Invalidate the page in QEMU TLB if it was a valid entry.
+ /*
+ * Invalidate the page in QEMU TLB if it was a valid entry.
*
* In "PowerPC e500 Core Family Reference Manual, Rev. 1",
* Section "12.4.2 TLB Write Entry (tlbwe) Instruction":
* "Note that when an L2 TLB entry is written, it may be displacing an
* already valid entry in the same L2 TLB location (a victim). If a
* valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1
- * TLB entry is automatically invalidated." */
+ * TLB entry is automatically invalidated."
+ */
flush_page(env, tlb);
}
mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
if (!msr_cm) {
- /* Executing a tlbwe instruction in 32-bit mode will set
- * bits 0:31 of the TLB EPN field to zero.
+ /*
+ * Executing a tlbwe instruction in 32-bit mode will set bits
+ * 0:31 of the TLB EPN field to zero.
*/
mask &= 0xffffffff;
}
void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
int i, j;
int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
ppcmas_tlb_t *tlb = env->tlb.tlbm;
}
tlb += booke206_tlb_size(env, i);
}
- tlb_flush(CPU(cpu));
+ tlb_flush(env_cpu(env));
}
void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
int i, j;
ppcmas_tlb_t *tlb;
int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
tlb->mas1 &= ~MAS1_VALID;
}
}
- tlb_flush(CPU(cpu));
+ tlb_flush(env_cpu(env));
}
void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type)
/*****************************************************************************/
-/* try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
-/* XXX: fix it to restore all registers */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
+bool ppc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
ret = cpu_ppc_handle_mmu_fault(env, addr, access_type, mmu_idx);
}
if (unlikely(ret != 0)) {
+ if (probe) {
+ return false;
+ }
raise_exception_err_ra(env, cs->exception_index, env->error_code,
retaddr);
}
+ return true;
}