#include "qemu/osdep.h"
#include "cpu.h"
#include "disas/disas.h"
+#include "exec/exec-all.h"
#include "tcg-op.h"
#include "qemu/host-utils.h"
#include "exec/cpu_ldst.h"
/* Code translation helpers */
/* global register indexes */
-static TCGv_ptr cpu_env;
+static TCGv_env cpu_env;
static char cpu_reg_names[10*3 + 22*4 /* GPR */
+ 10*4 + 22*5 /* SPE GPRh */
+ 10*4 + 22*5 /* FPR */
uint32_t exception;
/* Routine used to access memory */
bool pr, hv;
+ bool lazy_tlb_flush;
int mem_idx;
int access_type;
/* Translation flags */
/* isel (PowerPC 2.03 specification) */
static void gen_isel(DisasContext *ctx)
{
- TCGLabel *l1, *l2;
uint32_t bi = rC(ctx->opcode);
- uint32_t mask;
- TCGv_i32 t0;
+ uint32_t mask = 0x08 >> (bi & 0x03);
+ TCGv t0 = tcg_temp_new();
+ TCGv zr;
- l1 = gen_new_label();
- l2 = gen_new_label();
+ tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
+ tcg_gen_andi_tl(t0, t0, mask);
- mask = 0x08 >> (bi & 0x03);
- t0 = tcg_temp_new_i32();
- tcg_gen_andi_i32(t0, cpu_crf[bi >> 2], mask);
- tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
- if (rA(ctx->opcode) == 0)
- tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
- else
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
- gen_set_label(l2);
- tcg_temp_free_i32(t0);
+ zr = tcg_const_tl(0);
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr,
+ rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr,
+ cpu_gpr[rB(ctx->opcode)]);
+ tcg_temp_free(zr);
+ tcg_temp_free(t0);
}
/* cmpb: PowerPC 2.05 specification */
/* nor & nor. */
GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
+#if defined(TARGET_PPC64)
+static void gen_pause(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_const_i32(0);
+ tcg_gen_st_i32(t0, cpu_env,
+ -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
+ tcg_temp_free_i32(t0);
+
+ /* Stop translation, this gives other CPUs a chance to run */
+ gen_exception_err(ctx, EXCP_HLT, 1);
+}
+#endif /* defined(TARGET_PPC64) */
+
/* or & or. */
static void gen_or(DisasContext *ctx)
{
}
break;
case 7:
- if (ctx->hv) {
+ if (ctx->hv && !ctx->pr) {
/* Set process priority to very high */
prio = 7;
}
tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
gen_store_spr(SPR_PPR, t0);
tcg_temp_free(t0);
+ /* Pause us out of TCG otherwise spin loops with smt_low
+ * eat too much CPU and the kernel hangs
+ */
+ gen_pause(ctx);
}
#endif
}
target_ulong uimm = UIMM(ctx->opcode);
if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
- /* NOP */
- /* XXX: should handle special NOPs for POWER series */
return;
}
tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
/* rlwimi & rlwimi. */
static void gen_rlwimi(DisasContext *ctx)
{
- uint32_t mb, me, sh;
+ TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+ uint32_t sh = SH(ctx->opcode);
+ uint32_t mb = MB(ctx->opcode);
+ uint32_t me = ME(ctx->opcode);
- mb = MB(ctx->opcode);
- me = ME(ctx->opcode);
- sh = SH(ctx->opcode);
- if (likely(sh == (31-me) && mb <= me)) {
- tcg_gen_deposit_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
- cpu_gpr[rS(ctx->opcode)], sh, me - mb + 1);
+ if (sh == (31-me) && mb <= me) {
+ tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
} else {
target_ulong mask;
+ TCGv_i32 t0;
TCGv t1;
- TCGv t0 = tcg_temp_new();
-#if defined(TARGET_PPC64)
- tcg_gen_deposit_i64(t0, cpu_gpr[rS(ctx->opcode)],
- cpu_gpr[rS(ctx->opcode)], 32, 32);
- tcg_gen_rotli_i64(t0, t0, sh);
-#else
- tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh);
-#endif
+
#if defined(TARGET_PPC64)
mb += 32;
me += 32;
#endif
mask = MASK(mb, me);
+
+ t0 = tcg_temp_new_i32();
t1 = tcg_temp_new();
- tcg_gen_andi_tl(t0, t0, mask);
- tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], ~mask);
- tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
- tcg_temp_free(t0);
+ tcg_gen_trunc_tl_i32(t0, t_rs);
+ tcg_gen_rotli_i32(t0, t0, sh);
+ tcg_gen_extu_i32_tl(t1, t0);
+ tcg_temp_free_i32(t0);
+
+ tcg_gen_andi_tl(t1, t1, mask);
+ tcg_gen_andi_tl(t_ra, t_ra, ~mask);
+ tcg_gen_or_tl(t_ra, t_ra, t1);
tcg_temp_free(t1);
}
- if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t_ra);
+ }
}
/* rlwinm & rlwinm. */
static void gen_rlwinm(DisasContext *ctx)
{
- uint32_t mb, me, sh;
-
- sh = SH(ctx->opcode);
- mb = MB(ctx->opcode);
- me = ME(ctx->opcode);
+ TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+ uint32_t sh = SH(ctx->opcode);
+ uint32_t mb = MB(ctx->opcode);
+ uint32_t me = ME(ctx->opcode);
- if (likely(mb == 0 && me == (31 - sh))) {
- if (likely(sh == 0)) {
- tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
- } else {
- TCGv t0 = tcg_temp_new();
- tcg_gen_ext32u_tl(t0, cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_shli_tl(t0, t0, sh);
- tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0);
- tcg_temp_free(t0);
- }
- } else if (likely(sh != 0 && me == 31 && sh == (32 - mb))) {
- TCGv t0 = tcg_temp_new();
- tcg_gen_ext32u_tl(t0, cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_shri_tl(t0, t0, mb);
- tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0);
- tcg_temp_free(t0);
- } else if (likely(mb == 0 && me == 31)) {
- TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t0, cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_rotli_i32(t0, t0, sh);
- tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t0);
- tcg_temp_free_i32(t0);
+ if (mb == 0 && me == (31 - sh)) {
+ tcg_gen_shli_tl(t_ra, t_rs, sh);
+ tcg_gen_ext32u_tl(t_ra, t_ra);
+ } else if (sh != 0 && me == 31 && sh == (32 - mb)) {
+ tcg_gen_ext32u_tl(t_ra, t_rs);
+ tcg_gen_shri_tl(t_ra, t_ra, mb);
} else {
- TCGv t0 = tcg_temp_new();
-#if defined(TARGET_PPC64)
- tcg_gen_deposit_i64(t0, cpu_gpr[rS(ctx->opcode)],
- cpu_gpr[rS(ctx->opcode)], 32, 32);
- tcg_gen_rotli_i64(t0, t0, sh);
-#else
- tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh);
-#endif
#if defined(TARGET_PPC64)
mb += 32;
me += 32;
#endif
- tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
- tcg_temp_free(t0);
+ if (sh == 0) {
+ tcg_gen_andi_tl(t_ra, t_rs, MASK(mb, me));
+ } else {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t0, t_rs);
+ tcg_gen_rotli_i32(t0, t0, sh);
+ tcg_gen_andi_i32(t0, t0, MASK(mb, me));
+ tcg_gen_extu_i32_tl(t_ra, t0);
+ tcg_temp_free_i32(t0);
+ }
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t_ra);
}
- if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* rlwnm & rlwnm. */
static void gen_rlwnm(DisasContext *ctx)
{
- uint32_t mb, me;
- mb = MB(ctx->opcode);
- me = ME(ctx->opcode);
+ TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+ TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
+ uint32_t mb = MB(ctx->opcode);
+ uint32_t me = ME(ctx->opcode);
+ TCGv_i32 t0, t1;
- if (likely(mb == 0 && me == 31)) {
- TCGv_i32 t0, t1;
- t0 = tcg_temp_new_i32();
- t1 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]);
- tcg_gen_trunc_tl_i32(t1, cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_andi_i32(t0, t0, 0x1f);
- tcg_gen_rotl_i32(t1, t1, t0);
- tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t1);
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
- } else {
- TCGv t0;
#if defined(TARGET_PPC64)
- TCGv t1;
+ mb += 32;
+ me += 32;
#endif
- t0 = tcg_temp_new();
- tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1f);
-#if defined(TARGET_PPC64)
- t1 = tcg_temp_new_i64();
- tcg_gen_deposit_i64(t1, cpu_gpr[rS(ctx->opcode)],
- cpu_gpr[rS(ctx->opcode)], 32, 32);
- tcg_gen_rotl_i64(t0, t1, t0);
- tcg_temp_free_i64(t1);
-#else
- tcg_gen_rotl_i32(t0, cpu_gpr[rS(ctx->opcode)], t0);
-#endif
- if (unlikely(mb != 0 || me != 31)) {
-#if defined(TARGET_PPC64)
- mb += 32;
- me += 32;
-#endif
- tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
- } else {
- tcg_gen_andi_tl(t0, t0, MASK(32, 63));
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
- }
- tcg_temp_free(t0);
+ t0 = tcg_temp_new_i32();
+ t1 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, t_rb);
+ tcg_gen_trunc_tl_i32(t1, t_rs);
+ tcg_gen_andi_i32(t0, t0, 0x1f);
+ tcg_gen_rotl_i32(t1, t1, t0);
+ tcg_temp_free_i32(t0);
+
+ tcg_gen_andi_i32(t1, t1, MASK(mb, me));
+ tcg_gen_extu_i32_tl(t_ra, t1);
+ tcg_temp_free_i32(t1);
+
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t_ra);
}
- if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
#if defined(TARGET_PPC64)
gen_##name(ctx, 1, 1); \
}
-static inline void gen_rldinm(DisasContext *ctx, uint32_t mb, uint32_t me,
- uint32_t sh)
+static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh)
{
- if (likely(sh != 0 && mb == 0 && me == (63 - sh))) {
- tcg_gen_shli_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh);
- } else if (likely(sh != 0 && me == 63 && sh == (64 - mb))) {
- tcg_gen_shri_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], mb);
+ TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+
+ if (sh != 0 && mb == 0 && me == (63 - sh)) {
+ tcg_gen_shli_tl(t_ra, t_rs, sh);
+ } else if (sh != 0 && me == 63 && sh == (64 - mb)) {
+ tcg_gen_shri_tl(t_ra, t_rs, mb);
} else {
- TCGv t0 = tcg_temp_new();
- tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
- if (likely(mb == 0 && me == 63)) {
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
- } else {
- tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
- }
- tcg_temp_free(t0);
+ tcg_gen_rotli_tl(t_ra, t_rs, sh);
+ tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t_ra);
}
- if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
+
/* rldicl - rldicl. */
static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
{
gen_rldinm(ctx, mb, 63, sh);
}
GEN_PPC64_R4(rldicl, 0x1E, 0x00);
+
/* rldicr - rldicr. */
static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
{
gen_rldinm(ctx, 0, me, sh);
}
GEN_PPC64_R4(rldicr, 0x1E, 0x02);
+
/* rldic - rldic. */
static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
{
}
GEN_PPC64_R4(rldic, 0x1E, 0x04);
-static inline void gen_rldnm(DisasContext *ctx, uint32_t mb, uint32_t me)
+static void gen_rldnm(DisasContext *ctx, int mb, int me)
{
+ TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+ TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
TCGv t0;
t0 = tcg_temp_new();
- tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3f);
- tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
- if (unlikely(mb != 0 || me != 63)) {
- tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me));
- } else {
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
- }
+ tcg_gen_andi_tl(t0, t_rb, 0x3f);
+ tcg_gen_rotl_tl(t_ra, t_rs, t0);
tcg_temp_free(t0);
- if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+
+ tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t_ra);
+ }
}
/* rldcl - rldcl. */
gen_rldnm(ctx, mb, 63);
}
GEN_PPC64_R2(rldcl, 0x1E, 0x08);
+
/* rldcr - rldcr. */
static inline void gen_rldcr(DisasContext *ctx, int men)
{
gen_rldnm(ctx, 0, me);
}
GEN_PPC64_R2(rldcr, 0x1E, 0x09);
+
/* rldimi - rldimi. */
-static inline void gen_rldimi(DisasContext *ctx, int mbn, int shn)
+static void gen_rldimi(DisasContext *ctx, int mbn, int shn)
{
- uint32_t sh, mb, me;
+ TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+ uint32_t sh = SH(ctx->opcode) | (shn << 5);
+ uint32_t mb = MB(ctx->opcode) | (mbn << 5);
+ uint32_t me = 63 - sh;
- sh = SH(ctx->opcode) | (shn << 5);
- mb = MB(ctx->opcode) | (mbn << 5);
- me = 63 - sh;
- if (unlikely(sh == 0 && mb == 0)) {
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ if (mb <= me) {
+ tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
} else {
- TCGv t0, t1;
- target_ulong mask;
+ target_ulong mask = MASK(mb, me);
+ TCGv t1 = tcg_temp_new();
- t0 = tcg_temp_new();
- tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
- t1 = tcg_temp_new();
- mask = MASK(mb, me);
- tcg_gen_andi_tl(t0, t0, mask);
- tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], ~mask);
- tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
- tcg_temp_free(t0);
+ tcg_gen_rotli_tl(t1, t_rs, sh);
+ tcg_gen_andi_tl(t1, t1, mask);
+ tcg_gen_andi_tl(t_ra, t_ra, ~mask);
+ tcg_gen_or_tl(t_ra, t_ra, t1);
tcg_temp_free(t1);
}
- if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t_ra);
+ }
}
GEN_PPC64_R4(rldimi, 0x1E, 0x06);
#endif
rs = rS(ctx->opcode);
if ((ctx->opcode & 0x3) == 0x2) { /* stq */
-
bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
+ if (!(ctx->insns_flags & PPC_64BX)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ }
+
if (!legal_in_user_mode && ctx->pr) {
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
if (nb == 0)
nb = 32;
- nr = nb / 4;
- if (unlikely(((start + nr) > 32 &&
- start <= ra && (start + nr - 32) > ra) ||
- ((start + nr) <= 32 && start <= ra && (start + nr) > ra))) {
+ nr = (nb + 3) / 4;
+ if (unlikely(lsw_reg_in_range(start, nr, ra))) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
return;
}
{
}
+#if !defined(CONFIG_USER_ONLY)
+static inline void gen_check_tlb_flush(DisasContext *ctx)
+{
+ TCGv_i32 t;
+ TCGLabel *l;
+
+ if (!ctx->lazy_tlb_flush) {
+ return;
+ }
+ l = gen_new_label();
+ t = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
+ tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
+ gen_helper_check_tlb_flush(cpu_env);
+ gen_set_label(l);
+ tcg_temp_free_i32(t);
+}
+#else
+static inline void gen_check_tlb_flush(DisasContext *ctx) { }
+#endif
+
/* isync */
static void gen_isync(DisasContext *ctx)
{
+ /*
+ * We need to check for a pending TLB flush. This can only happen in
+ * kernel mode however so check MSR_PR
+ */
+ if (!ctx->pr) {
+ gen_check_tlb_flush(ctx);
+ }
gen_stop_exception(ctx);
}
/* sync */
static void gen_sync(DisasContext *ctx)
{
+ uint32_t l = (ctx->opcode >> 21) & 3;
+
+ /*
+ * We may need to check for a pending TLB flush.
+ *
+ * We do this on ptesync (l == 2) on ppc64 and any sync pn ppc32.
+ *
+ * Additionally, this can only happen in kernel mode however so
+ * check MSR_PR as well.
+ */
+ if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) {
+ gen_check_tlb_flush(ctx);
+ }
}
/* wait */
#endif
}
+static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+{
+ if (unlikely(ctx->singlestep_enabled)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
/*** Branch ***/
static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
{
- TranslationBlock *tb;
- tb = ctx->tb;
if (NARROW_MODE(ctx)) {
dest = (uint32_t) dest;
}
- if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
- likely(!ctx->singlestep_enabled)) {
+ if (use_goto_tb(ctx, dest)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(cpu_nip, dest & ~3);
- tcg_gen_exit_tb((uintptr_t)tb + n);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
} else {
tcg_gen_movi_tl(cpu_nip, dest & ~3);
if (unlikely(ctx->singlestep_enabled)) {
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
/* Restore CPU state */
- if (unlikely(!ctx->hv)) {
+ if (unlikely(ctx->pr || !ctx->hv)) {
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
void (*read_cb)(DisasContext *ctx, int gprn, int sprn);
uint32_t sprn = SPR(ctx->opcode);
-#if !defined(CONFIG_USER_ONLY)
- if (ctx->hv)
+#if defined(CONFIG_USER_ONLY)
+ read_cb = ctx->spr_cb[sprn].uea_read;
+#else
+ if (ctx->pr) {
+ read_cb = ctx->spr_cb[sprn].uea_read;
+ } else if (ctx->hv) {
read_cb = ctx->spr_cb[sprn].hea_read;
- else if (!ctx->pr)
+ } else {
read_cb = ctx->spr_cb[sprn].oea_read;
- else
+ }
#endif
- read_cb = ctx->spr_cb[sprn].uea_read;
if (likely(read_cb != NULL)) {
if (likely(read_cb != SPR_NOACCESS)) {
(*read_cb)(ctx, rD(ctx->opcode), sprn);
qemu_log("Trying to read invalid spr %d (0x%03x) at "
TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
}
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+ /* Only generate an exception in user space, otherwise this is a nop */
+ if (ctx->pr) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+ }
}
}
/* Special form that does not need any synchronisation */
TCGv t0 = tcg_temp_new();
tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1 << MSR_RI) | (1 << MSR_EE));
- tcg_gen_andi_tl(cpu_msr, cpu_msr, ~((1 << MSR_RI) | (1 << MSR_EE)));
+ tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE)));
tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
tcg_temp_free(t0);
} else {
/* Special form that does not need any synchronisation */
TCGv t0 = tcg_temp_new();
tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1 << MSR_RI) | (1 << MSR_EE));
- tcg_gen_andi_tl(cpu_msr, cpu_msr, ~((1 << MSR_RI) | (1 << MSR_EE)));
+ tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE)));
tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
tcg_temp_free(t0);
} else {
void (*write_cb)(DisasContext *ctx, int sprn, int gprn);
uint32_t sprn = SPR(ctx->opcode);
-#if !defined(CONFIG_USER_ONLY)
- if (ctx->hv)
+#if defined(CONFIG_USER_ONLY)
+ write_cb = ctx->spr_cb[sprn].uea_write;
+#else
+ if (ctx->pr) {
+ write_cb = ctx->spr_cb[sprn].uea_write;
+ } else if (ctx->hv) {
write_cb = ctx->spr_cb[sprn].hea_write;
- else if (!ctx->pr)
+ } else {
write_cb = ctx->spr_cb[sprn].oea_write;
- else
+ }
#endif
- write_cb = ctx->spr_cb[sprn].uea_write;
if (likely(write_cb != NULL)) {
if (likely(write_cb != SPR_NOACCESS)) {
(*write_cb)(ctx, sprn, rS(ctx->opcode));
}
fprintf(stderr, "Trying to write invalid spr %d (0x%03x) at "
TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+
+ /* Only generate an exception in user space, otherwise this is a nop */
+ if (ctx->pr) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+ }
}
}
cpu_gpr[rB(ctx->opcode)]);
#endif
}
+
+static void gen_slbfee_(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ TCGLabel *l1, *l2;
+
+ if (unlikely(ctx->pr)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ gen_helper_find_slb_vsid(cpu_gpr[rS(ctx->opcode)], cpu_env,
+ cpu_gpr[rB(ctx->opcode)]);
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rS(ctx->opcode)], -1, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rS(ctx->opcode)], 0);
+ gen_set_label(l2);
+#endif
+}
#endif /* defined(TARGET_PPC64) */
/*** Lookaside buffer management ***/
#if defined(CONFIG_USER_ONLY)
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(ctx->pr)) {
+ if (unlikely(ctx->pr || !ctx->hv)) {
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
#if defined(CONFIG_USER_ONLY)
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(ctx->pr)) {
+ if (unlikely(ctx->pr || !ctx->hv)) {
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
#if defined(CONFIG_USER_ONLY)
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(ctx->pr)) {
+ if (unlikely(ctx->pr || !ctx->hv)) {
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- /* This has no effect: it should ensure that all previous
- * tlbie have completed
+ /* tlbsync is a nop for server, ptesync handles delayed tlb flush,
+ * embedded however needs to deal with tlbsync. We don't try to be
+ * fancy and swallow the overhead of checking for both.
*/
- gen_stop_exception(ctx);
+ gen_check_tlb_flush(ctx);
#endif
}
#if defined(TARGET_PPC64)
GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B),
#endif
-GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001FF801, PPC_MISC),
+GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001EF801, PPC_MISC),
GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC),
GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE),
GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE),
GEN_HANDLER2(slbmte, "slbmte", 0x1F, 0x12, 0x0C, 0x001F0001, PPC_SEGMENT_64B),
GEN_HANDLER2(slbmfee, "slbmfee", 0x1F, 0x13, 0x1C, 0x001F0001, PPC_SEGMENT_64B),
GEN_HANDLER2(slbmfev, "slbmfev", 0x1F, 0x13, 0x1A, 0x001F0001, PPC_SEGMENT_64B),
+GEN_HANDLER2(slbfee_, "slbfee.", 0x1F, 0x13, 0x1E, 0x001F0000, PPC_SEGMENT_64B),
#endif
GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA),
-GEN_HANDLER(tlbiel, 0x1F, 0x12, 0x08, 0x03FF0001, PPC_MEM_TLBIE),
-GEN_HANDLER(tlbie, 0x1F, 0x12, 0x09, 0x03FF0001, PPC_MEM_TLBIE),
+/* XXX Those instructions will need to be handled differently for
+ * different ISA versions */
+GEN_HANDLER(tlbiel, 0x1F, 0x12, 0x08, 0x001F0001, PPC_MEM_TLBIE),
+GEN_HANDLER(tlbie, 0x1F, 0x12, 0x09, 0x001F0001, PPC_MEM_TLBIE),
GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC),
#if defined(TARGET_PPC64)
-GEN_HANDLER(slbia, 0x1F, 0x12, 0x0F, 0x03FFFC01, PPC_SLBI),
+GEN_HANDLER(slbia, 0x1F, 0x12, 0x0F, 0x031FFC01, PPC_SLBI),
GEN_HANDLER(slbie, 0x1F, 0x12, 0x0D, 0x03FF0001, PPC_SLBI),
#endif
GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN),
env->nip, env->lr, env->ctr, cpu_read_xer(env),
cs->cpu_index);
cpu_fprintf(f, "MSR " TARGET_FMT_lx " HID0 " TARGET_FMT_lx " HF "
- TARGET_FMT_lx " idx %d\n", env->msr, env->spr[SPR_HID0],
- env->hflags, env->mmu_idx);
+ TARGET_FMT_lx " iidx %d didx %d\n",
+ env->msr, env->spr[SPR_HID0],
+ env->hflags, env->immu_idx, env->dmmu_idx);
#if !defined(NO_TIMER_DUMP)
cpu_fprintf(f, "TB %08" PRIu32 " %08" PRIu64
#if !defined(CONFIG_USER_ONLY)
ctx.exception = POWERPC_EXCP_NONE;
ctx.spr_cb = env->spr_cb;
ctx.pr = msr_pr;
- ctx.hv = !msr_pr && msr_hv;
- ctx.mem_idx = env->mmu_idx;
+ ctx.mem_idx = env->dmmu_idx;
+#if !defined(CONFIG_USER_ONLY)
+ ctx.hv = msr_hv || !env->has_hv_mode;
+#endif
ctx.insns_flags = env->insns_flags;
ctx.insns_flags2 = env->insns_flags2;
ctx.access_type = -1;
ctx.sf_mode = msr_is_64bit(env, env->msr);
ctx.has_cfar = !!(env->flags & POWERPC_FLAG_CFAR);
#endif
+ if (env->mmu_model == POWERPC_MMU_32B ||
+ env->mmu_model == POWERPC_MMU_601 ||
+ (env->mmu_model & POWERPC_MMU_64B))
+ ctx.lazy_tlb_flush = true;
+
ctx.fpu_enabled = msr_fp;
if ((env->flags & POWERPC_FLAG_SPE) && msr_spe)
ctx.spe_enabled = msr_spe;
tb->icount = num_insns;
#if defined(DEBUG_DISAS)
- if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(pc_start)) {
int flags;
flags = env->bfd_mach;
flags |= ctx.le_mode << 16;