"1T", "2T"
};
+static void mmubooke_dump_mmu(FILE *f, fprintf_function cpu_fprintf,
+ CPUPPCState *env)
+{
+ ppcemb_tlb_t *entry;
+ int i;
+
+ if (kvm_enabled() && !env->kvm_sw_tlb) {
+ cpu_fprintf(f, "Cannot access KVM TLB\n");
+ return;
+ }
+
+ cpu_fprintf(f, "\nTLB:\n");
+ cpu_fprintf(f, "Effective Physical Size PID Prot "
+ "Attr\n");
+
+ entry = &env->tlb.tlbe[0];
+ for (i = 0; i < env->nb_tlb; i++, entry++) {
+ target_phys_addr_t ea, pa;
+ target_ulong mask;
+ uint64_t size = (uint64_t)entry->size;
+ char size_buf[20];
+
+ /* Check valid flag */
+ if (!(entry->prot & PAGE_VALID)) {
+ continue;
+ }
+
+ mask = ~(entry->size - 1);
+ ea = entry->EPN & mask;
+ pa = entry->RPN & mask;
+#if (TARGET_PHYS_ADDR_BITS >= 36)
+ /* Extend the physical address to 36 bits */
+ pa |= (target_phys_addr_t)(entry->RPN & 0xF) << 32;
+#endif
+ size /= 1024;
+ if (size >= 1024) {
+ snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / 1024);
+ } else {
+ snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size);
+ }
+ cpu_fprintf(f, "0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n",
+ (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID,
+ entry->prot, entry->attr);
+ }
+
+}
+
static void mmubooke206_dump_one_tlb(FILE *f, fprintf_function cpu_fprintf,
CPUPPCState *env, int tlbn, int offset,
int tlbsize)
void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env)
{
switch (env->mmu_model) {
+ case POWERPC_MMU_BOOKE:
+ mmubooke_dump_mmu(f, cpu_fprintf, env);
+ break;
case POWERPC_MMU_BOOKE206:
mmubooke206_dump_mmu(f, cpu_fprintf, env);
break;
if (asrr1 != -1)
env->spr[asrr1] = env->spr[srr1];
/* If we disactivated any translation, flush TLBs */
- if (new_msr & ((1 << MSR_IR) | (1 << MSR_DR)))
+ if (msr & ((1 << MSR_IR) | (1 << MSR_DR)))
tlb_flush(env, 1);
if (msr_ile) {
void cpu_state_reset(CPUPPCState *env)
{
- target_ulong msr;
-
- if (qemu_loglevel_mask(CPU_LOG_RESET)) {
- qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
- log_cpu_state(env, 0);
- }
-
- msr = (target_ulong)0;
- if (0) {
- /* XXX: find a suitable condition to enable the hypervisor mode */
- msr |= (target_ulong)MSR_HVB;
- }
- msr |= (target_ulong)0 << MSR_AP; /* TO BE CHECKED */
- msr |= (target_ulong)0 << MSR_SA; /* TO BE CHECKED */
- msr |= (target_ulong)1 << MSR_EP;
-#if defined (DO_SINGLE_STEP) && 0
- /* Single step trace mode */
- msr |= (target_ulong)1 << MSR_SE;
- msr |= (target_ulong)1 << MSR_BE;
-#endif
-#if defined(CONFIG_USER_ONLY)
- msr |= (target_ulong)1 << MSR_FP; /* Allow floating point usage */
- msr |= (target_ulong)1 << MSR_VR; /* Allow altivec usage */
- msr |= (target_ulong)1 << MSR_SPE; /* Allow SPE usage */
- msr |= (target_ulong)1 << MSR_PR;
-#else
- env->excp_prefix = env->hreset_excp_prefix;
- env->nip = env->hreset_vector | env->excp_prefix;
- if (env->mmu_model != POWERPC_MMU_REAL)
- ppc_tlb_invalidate_all(env);
-#endif
- env->msr = msr & env->msr_mask;
-#if defined(TARGET_PPC64)
- if (env->mmu_model & POWERPC_MMU_64)
- env->msr |= (1ULL << MSR_SF);
-#endif
- hreg_compute_hflags(env);
- env->reserve_addr = (target_ulong)-1ULL;
- /* Be sure no exception or interrupt is pending */
- env->pending_interrupts = 0;
- env->exception_index = POWERPC_EXCP_NONE;
- env->error_code = 0;
- /* Flush all TLBs */
- tlb_flush(env, 1);
+ cpu_reset(ENV_GET_CPU(env));
}
CPUPPCState *cpu_ppc_init (const char *cpu_model)
{
+ PowerPCCPU *cpu;
CPUPPCState *env;
const ppc_def_t *def;
if (!def)
return NULL;
- env = g_malloc0(sizeof(CPUPPCState));
- cpu_exec_init(env);
+ cpu = POWERPC_CPU(object_new(TYPE_POWERPC_CPU));
+ env = &cpu->env;
+
if (tcg_enabled()) {
ppc_translate_init();
}
- /* Adjust cpu index for SMT */
-#if !defined(CONFIG_USER_ONLY)
- if (kvm_enabled()) {
- int smt = kvmppc_smt_threads();
- env->cpu_index = (env->cpu_index / smp_threads)*smt
- + (env->cpu_index % smp_threads);
- }
-#endif /* !CONFIG_USER_ONLY */
env->cpu_model_str = cpu_model;
cpu_ppc_register_internal(env, def);
return env;
}
-
-void cpu_ppc_close (CPUPPCState *env)
-{
- /* Should also remove all opcode tables... */
- g_free(env);
-}