//#define DO_SINGLE_STEP
//#define PPC_DEBUG_DISAS
//#define DO_PPC_STATISTICS
-//#define OPTIMIZE_FPRF_UPDATE
/*****************************************************************************/
/* Code translation helpers */
static TCGv_i64 cpu_avrh[32], cpu_avrl[32];
static TCGv_i32 cpu_crf[8];
static TCGv cpu_nip;
+static TCGv cpu_msr;
static TCGv cpu_ctr;
static TCGv cpu_lr;
static TCGv cpu_xer;
+static TCGv cpu_reserve;
static TCGv_i32 cpu_fpscr;
static TCGv_i32 cpu_access_type;
-/* dyngen register indexes */
-static TCGv cpu_T[3];
-
#include "gen-icount.h"
void ppc_translate_init(void)
return;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
-#if TARGET_LONG_BITS > HOST_LONG_BITS
- cpu_T[0] = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, t0), "T0");
- cpu_T[1] = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, t1), "T1");
- cpu_T[2] = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, t2), "T2");
-#else
- cpu_T[0] = tcg_global_reg_new(TCG_AREG1, "T0");
- cpu_T[1] = tcg_global_reg_new(TCG_AREG2, "T1");
-#ifdef HOST_I386
- /* XXX: This is a temporary workaround for i386.
- * On i386 qemu_st32 runs out of registers.
- * The proper fix is to remove cpu_T.
- */
- cpu_T[2] = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, t2), "T2");
-#else
- cpu_T[2] = tcg_global_reg_new(TCG_AREG3, "T2");
-#endif
-#endif
p = cpu_reg_names;
cpu_nip = tcg_global_mem_new(TCG_AREG0,
offsetof(CPUState, nip), "nip");
+ cpu_msr = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, msr), "msr");
+
cpu_ctr = tcg_global_mem_new(TCG_AREG0,
offsetof(CPUState, ctr), "ctr");
cpu_xer = tcg_global_mem_new(TCG_AREG0,
offsetof(CPUState, xer), "xer");
+ cpu_reserve = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, reserve), "reserve");
+
cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
offsetof(CPUState, fpscr), "fpscr");
done_init = 1;
}
-#if defined(OPTIMIZE_FPRF_UPDATE)
-static uint16_t *gen_fprf_buf[OPC_BUF_SIZE];
-static uint16_t **gen_fprf_ptr;
-#endif
-
/* internal defines */
typedef struct DisasContext {
struct TranslationBlock *tb;
uint32_t exception;
/* Routine used to access memory */
int mem_idx;
+ int access_type;
/* Translation flags */
-#if !defined(CONFIG_USER_ONLY)
- int supervisor;
-#endif
+ int le_mode;
#if defined(TARGET_PPC64)
int sf_mode;
#endif
static always_inline void gen_reset_fpstatus (void)
{
#ifdef CONFIG_SOFTFLOAT
- gen_op_reset_fpstatus();
+ gen_helper_reset_fpstatus();
#endif
}
if (set_fprf != 0) {
/* This case might be optimized later */
-#if defined(OPTIMIZE_FPRF_UPDATE)
- *gen_fprf_ptr++ = gen_opc_ptr;
-#endif
tcg_gen_movi_i32(t0, 1);
gen_helper_compute_fprf(t0, arg, t0);
if (unlikely(set_rc)) {
tcg_gen_movi_i32(t0, 0);
gen_helper_compute_fprf(t0, arg, t0);
tcg_gen_mov_i32(cpu_crf[1], t0);
- if (set_fprf)
- gen_helper_float_check_status();
}
tcg_temp_free_i32(t0);
}
-static always_inline void gen_optimize_fprf (void)
-{
-#if defined(OPTIMIZE_FPRF_UPDATE)
- uint16_t **ptr;
-
- for (ptr = gen_fprf_buf; ptr != (gen_fprf_ptr - 1); ptr++)
- *ptr = INDEX_op_nop1;
- gen_fprf_ptr = gen_fprf_buf;
-#endif
-}
-
-static always_inline void gen_set_access_type(int access_type)
+static always_inline void gen_set_access_type (DisasContext *ctx, int access_type)
{
- tcg_gen_movi_i32(cpu_access_type, access_type);
+ if (ctx->access_type != access_type) {
+ tcg_gen_movi_i32(cpu_access_type, access_type);
+ ctx->access_type = access_type;
+ }
}
static always_inline void gen_update_nip (DisasContext *ctx, target_ulong nip)
tcg_gen_movi_tl(cpu_nip, (uint32_t)nip);
}
-#define GEN_EXCP(ctx, excp, error) \
-do { \
- TCGv_i32 t0 = tcg_const_i32(excp); \
- TCGv_i32 t1 = tcg_const_i32(error); \
- if ((ctx)->exception == POWERPC_EXCP_NONE) { \
- gen_update_nip(ctx, (ctx)->nip); \
- } \
- gen_helper_raise_exception_err(t0, t1); \
- tcg_temp_free_i32(t0); \
- tcg_temp_free_i32(t1); \
- ctx->exception = (excp); \
-} while (0)
-
-#define GEN_EXCP_INVAL(ctx) \
-GEN_EXCP((ctx), POWERPC_EXCP_PROGRAM, \
- POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL)
-
-#define GEN_EXCP_PRIVOPC(ctx) \
-GEN_EXCP((ctx), POWERPC_EXCP_PROGRAM, \
- POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_OPC)
-
-#define GEN_EXCP_PRIVREG(ctx) \
-GEN_EXCP((ctx), POWERPC_EXCP_PROGRAM, \
- POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG)
+static always_inline void gen_exception_err (DisasContext *ctx, uint32_t excp, uint32_t error)
+{
+ TCGv_i32 t0, t1;
+ if (ctx->exception == POWERPC_EXCP_NONE) {
+ gen_update_nip(ctx, ctx->nip);
+ }
+ t0 = tcg_const_i32(excp);
+ t1 = tcg_const_i32(error);
+ gen_helper_raise_exception_err(t0, t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ ctx->exception = (excp);
+}
-#define GEN_EXCP_NO_FP(ctx) \
-GEN_EXCP(ctx, POWERPC_EXCP_FPU, 0)
+static always_inline void gen_exception (DisasContext *ctx, uint32_t excp)
+{
+ TCGv_i32 t0;
+ if (ctx->exception == POWERPC_EXCP_NONE) {
+ gen_update_nip(ctx, ctx->nip);
+ }
+ t0 = tcg_const_i32(excp);
+ gen_helper_raise_exception(t0);
+ tcg_temp_free_i32(t0);
+ ctx->exception = (excp);
+}
-#define GEN_EXCP_NO_AP(ctx) \
-GEN_EXCP(ctx, POWERPC_EXCP_APU, 0)
+static always_inline void gen_debug_exception (DisasContext *ctx)
+{
+ TCGv_i32 t0;
+ gen_update_nip(ctx, ctx->nip);
+ t0 = tcg_const_i32(EXCP_DEBUG);
+ gen_helper_raise_exception(t0);
+ tcg_temp_free_i32(t0);
+}
-#define GEN_EXCP_NO_VR(ctx) \
-GEN_EXCP(ctx, POWERPC_EXCP_VPU, 0)
+static always_inline void gen_inval_exception (DisasContext *ctx, uint32_t error)
+{
+ gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | error);
+}
/* Stop translation */
-static always_inline void GEN_STOP (DisasContext *ctx)
+static always_inline void gen_stop_exception (DisasContext *ctx)
{
gen_update_nip(ctx, ctx->nip);
ctx->exception = POWERPC_EXCP_STOP;
}
/* No need to update nip here, as execution flow will change */
-static always_inline void GEN_SYNC (DisasContext *ctx)
+static always_inline void gen_sync_exception (DisasContext *ctx)
{
ctx->exception = POWERPC_EXCP_SYNC;
}
.oname = stringify(name), \
}
+/* SPR load/store helpers */
+static always_inline void gen_load_spr(TCGv t, int reg)
+{
+ tcg_gen_ld_tl(t, cpu_env, offsetof(CPUState, spr[reg]));
+}
+
+static always_inline void gen_store_spr(int reg, TCGv t)
+{
+ tcg_gen_st_tl(t, cpu_env, offsetof(CPUState, spr[reg]));
+}
+
/* Start opcode list */
GEN_OPCODE_MARK(start);
/* Invalid instruction */
GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE)
{
- GEN_EXCP_INVAL(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
}
static opc_handler_t invalid_handler = {
break;
#if !defined(CONFIG_USER_ONLY)
case 31:
- if (ctx->supervisor > 0) {
+ if (ctx->mem_idx > 0) {
/* Set process priority to very low */
prio = 1;
}
break;
case 5:
- if (ctx->supervisor > 0) {
+ if (ctx->mem_idx > 0) {
/* Set process priority to medium-hight */
prio = 5;
}
break;
case 3:
- if (ctx->supervisor > 0) {
+ if (ctx->mem_idx > 0) {
/* Set process priority to high */
prio = 6;
}
break;
case 7:
- if (ctx->supervisor > 1) {
+ if (ctx->mem_idx > 1) {
/* Set process priority to very high */
prio = 7;
}
}
if (prio) {
TCGv t0 = tcg_temp_new();
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, spr[SPR_PPR]));
+ gen_load_spr(t0, SPR_PPR);
tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, spr[SPR_PPR]));
+ gen_store_spr(SPR_PPR, t0);
tcg_temp_free(t0);
}
#endif
GEN_HANDLER(f##name, op1, op2, 0xFF, 0x00000000, type) \
{ \
if (unlikely(!ctx->fpu_enabled)) { \
- GEN_EXCP_NO_FP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
gen_reset_fpstatus(); \
GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type) \
{ \
if (unlikely(!ctx->fpu_enabled)) { \
- GEN_EXCP_NO_FP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
gen_reset_fpstatus(); \
GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type) \
{ \
if (unlikely(!ctx->fpu_enabled)) { \
- GEN_EXCP_NO_FP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
gen_reset_fpstatus(); \
GEN_HANDLER(f##name, 0x3F, op2, op3, 0x001F0000, type) \
{ \
if (unlikely(!ctx->fpu_enabled)) { \
- GEN_EXCP_NO_FP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
gen_reset_fpstatus(); \
GEN_HANDLER(f##name, op1, op2, 0xFF, 0x001F07C0, type) \
{ \
if (unlikely(!ctx->fpu_enabled)) { \
- GEN_EXCP_NO_FP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
gen_reset_fpstatus(); \
GEN_HANDLER(frsqrtes, 0x3B, 0x1A, 0xFF, 0x001F07C0, PPC_FLOAT_FRSQRTES)
{
if (unlikely(!ctx->fpu_enabled)) {
- GEN_EXCP_NO_FP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_FPU);
return;
}
gen_reset_fpstatus();
GEN_HANDLER(fsqrt, 0x3F, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT)
{
if (unlikely(!ctx->fpu_enabled)) {
- GEN_EXCP_NO_FP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_FPU);
return;
}
gen_reset_fpstatus();
GEN_HANDLER(fsqrts, 0x3B, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT)
{
if (unlikely(!ctx->fpu_enabled)) {
- GEN_EXCP_NO_FP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_FPU);
return;
}
gen_reset_fpstatus();
GEN_HANDLER(fcmpo, 0x3F, 0x00, 0x01, 0x00600001, PPC_FLOAT)
{
if (unlikely(!ctx->fpu_enabled)) {
- GEN_EXCP_NO_FP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_FPU);
return;
}
gen_reset_fpstatus();
GEN_HANDLER(fcmpu, 0x3F, 0x00, 0x00, 0x00600001, PPC_FLOAT)
{
if (unlikely(!ctx->fpu_enabled)) {
- GEN_EXCP_NO_FP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_FPU);
return;
}
gen_reset_fpstatus();
GEN_HANDLER(fmr, 0x3F, 0x08, 0x02, 0x001F0000, PPC_FLOAT)
{
if (unlikely(!ctx->fpu_enabled)) {
- GEN_EXCP_NO_FP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_FPU);
return;
}
tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
int bfa;
if (unlikely(!ctx->fpu_enabled)) {
- GEN_EXCP_NO_FP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_FPU);
return;
}
- gen_optimize_fprf();
bfa = 4 * (7 - crfS(ctx->opcode));
tcg_gen_shri_i32(cpu_crf[crfD(ctx->opcode)], cpu_fpscr, bfa);
tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
GEN_HANDLER(mffs, 0x3F, 0x07, 0x12, 0x001FF800, PPC_FLOAT)
{
if (unlikely(!ctx->fpu_enabled)) {
- GEN_EXCP_NO_FP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_FPU);
return;
}
- gen_optimize_fprf();
gen_reset_fpstatus();
tcg_gen_extu_i32_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpscr);
gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0);
uint8_t crb;
if (unlikely(!ctx->fpu_enabled)) {
- GEN_EXCP_NO_FP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_FPU);
return;
}
- crb = 32 - (crbD(ctx->opcode) >> 2);
- gen_optimize_fprf();
+ crb = 31 - crbD(ctx->opcode);
gen_reset_fpstatus();
- if (likely(crb != 30 && crb != 29))
- tcg_gen_andi_i32(cpu_fpscr, cpu_fpscr, ~(1 << crb));
+ if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
+ TCGv_i32 t0 = tcg_const_i32(crb);
+ gen_helper_fpscr_clrbit(t0);
+ tcg_temp_free_i32(t0);
+ }
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
}
uint8_t crb;
if (unlikely(!ctx->fpu_enabled)) {
- GEN_EXCP_NO_FP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_FPU);
return;
}
- crb = 32 - (crbD(ctx->opcode) >> 2);
- gen_optimize_fprf();
+ crb = 31 - crbD(ctx->opcode);
gen_reset_fpstatus();
/* XXX: we pretend we can only do IEEE floating-point computations */
if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
TCGv_i32 t0;
if (unlikely(!ctx->fpu_enabled)) {
- GEN_EXCP_NO_FP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_FPU);
return;
}
- gen_optimize_fprf();
gen_reset_fpstatus();
t0 = tcg_const_i32(FM(ctx->opcode));
gen_helper_store_fpscr(cpu_fpr[rB(ctx->opcode)], t0);
TCGv_i32 t1;
if (unlikely(!ctx->fpu_enabled)) {
- GEN_EXCP_NO_FP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_FPU);
return;
}
bf = crbD(ctx->opcode) >> 2;
sh = 7 - bf;
- gen_optimize_fprf();
gen_reset_fpstatus();
t0 = tcg_const_i64(FPIMM(ctx->opcode) << (4 * sh));
t1 = tcg_const_i32(1 << sh);
/*** Addressing modes ***/
/* Register indirect with immediate index : EA = (rA|0) + SIMM */
-static always_inline void gen_addr_imm_index (TCGv EA,
- DisasContext *ctx,
- target_long maskl)
+static always_inline void gen_addr_imm_index (DisasContext *ctx, TCGv EA, target_long maskl)
{
target_long simm = SIMM(ctx->opcode);
simm &= ~maskl;
- if (rA(ctx->opcode) == 0)
+ if (rA(ctx->opcode) == 0) {
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_movi_tl(EA, (uint32_t)simm);
+ } else
+#endif
tcg_gen_movi_tl(EA, simm);
- else if (likely(simm != 0))
+ } else if (likely(simm != 0)) {
tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
- else
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(EA, EA);
+ }
+#endif
+ } else {
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ } else
+#endif
tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ }
}
-static always_inline void gen_addr_reg_index (TCGv EA,
- DisasContext *ctx)
+static always_inline void gen_addr_reg_index (DisasContext *ctx, TCGv EA)
{
- if (rA(ctx->opcode) == 0)
+ if (rA(ctx->opcode) == 0) {
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
+ } else
+#endif
tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
- else
+ } else {
tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(EA, EA);
+ }
+#endif
+ }
}
-static always_inline void gen_addr_register (TCGv EA,
- DisasContext *ctx)
+static always_inline void gen_addr_register (DisasContext *ctx, TCGv EA)
{
- if (rA(ctx->opcode) == 0)
+ if (rA(ctx->opcode) == 0) {
tcg_gen_movi_tl(EA, 0);
- else
- tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
-}
-
-#if defined(TARGET_PPC64)
-#define _GEN_MEM_FUNCS(name, mode) \
- &gen_op_##name##_##mode, \
- &gen_op_##name##_le_##mode, \
- &gen_op_##name##_64_##mode, \
- &gen_op_##name##_le_64_##mode
-#else
-#define _GEN_MEM_FUNCS(name, mode) \
- &gen_op_##name##_##mode, \
- &gen_op_##name##_le_##mode
-#endif
-#if defined(CONFIG_USER_ONLY)
+ } else {
#if defined(TARGET_PPC64)
-#define NB_MEM_FUNCS 4
-#else
-#define NB_MEM_FUNCS 2
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ } else
#endif
-#define GEN_MEM_FUNCS(name) \
- _GEN_MEM_FUNCS(name, raw)
-#else
+ tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+static always_inline void gen_addr_add (DisasContext *ctx, TCGv ret, TCGv arg1, target_long val)
+{
+ tcg_gen_addi_tl(ret, arg1, val);
#if defined(TARGET_PPC64)
-#define NB_MEM_FUNCS 12
-#else
-#define NB_MEM_FUNCS 6
-#endif
-#define GEN_MEM_FUNCS(name) \
- _GEN_MEM_FUNCS(name, user), \
- _GEN_MEM_FUNCS(name, kernel), \
- _GEN_MEM_FUNCS(name, hypv)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(ret, ret);
+ }
#endif
+}
+
+static always_inline void gen_check_align (DisasContext *ctx, TCGv EA, int mask)
+{
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1, t2;
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ tcg_gen_andi_tl(t0, EA, mask);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ t1 = tcg_const_i32(POWERPC_EXCP_ALIGN);
+ t2 = tcg_const_i32(0);
+ gen_helper_raise_exception_err(t1, t2);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+}
/*** Integer load ***/
+static always_inline void gen_qemu_ld8u(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_qemu_ld8u(arg1, arg2, ctx->mem_idx);
+}
+
+static always_inline void gen_qemu_ld8s(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_qemu_ld8s(arg1, arg2, ctx->mem_idx);
+}
+
+static always_inline void gen_qemu_ld16u(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
+ if (unlikely(ctx->le_mode)) {
#if defined(TARGET_PPC64)
-#define GEN_QEMU_LD_PPC64(width) \
-static always_inline void gen_qemu_ld##width##_ppc64(TCGv t0, TCGv t1, int flags)\
-{ \
- if (likely(flags & 2)) \
- tcg_gen_qemu_ld##width(t0, t1, flags >> 2); \
- else { \
- TCGv addr = tcg_temp_new(); \
- tcg_gen_ext32u_tl(addr, t1); \
- tcg_gen_qemu_ld##width(t0, addr, flags >> 2); \
- tcg_temp_free(addr); \
- } \
-}
-GEN_QEMU_LD_PPC64(8u)
-GEN_QEMU_LD_PPC64(8s)
-GEN_QEMU_LD_PPC64(16u)
-GEN_QEMU_LD_PPC64(16s)
-GEN_QEMU_LD_PPC64(32u)
-GEN_QEMU_LD_PPC64(32s)
-GEN_QEMU_LD_PPC64(64)
-
-#define GEN_QEMU_ST_PPC64(width) \
-static always_inline void gen_qemu_st##width##_ppc64(TCGv t0, TCGv t1, int flags)\
-{ \
- if (likely(flags & 2)) \
- tcg_gen_qemu_st##width(t0, t1, flags >> 2); \
- else { \
- TCGv addr = tcg_temp_new(); \
- tcg_gen_ext32u_tl(addr, t1); \
- tcg_gen_qemu_st##width(t0, addr, flags >> 2); \
- tcg_temp_free(addr); \
- } \
-}
-GEN_QEMU_ST_PPC64(8)
-GEN_QEMU_ST_PPC64(16)
-GEN_QEMU_ST_PPC64(32)
-GEN_QEMU_ST_PPC64(64)
-
-static always_inline void gen_qemu_ld8u(TCGv arg0, TCGv arg1, int flags)
-{
- gen_qemu_ld8u_ppc64(arg0, arg1, flags);
-}
-
-static always_inline void gen_qemu_ld8s(TCGv arg0, TCGv arg1, int flags)
-{
- gen_qemu_ld8s_ppc64(arg0, arg1, flags);
-}
-
-static always_inline void gen_qemu_ld16u(TCGv arg0, TCGv arg1, int flags)
-{
- if (unlikely(flags & 1)) {
- TCGv_i32 t0;
- gen_qemu_ld16u_ppc64(arg0, arg1, flags);
- t0 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t0, arg0);
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, arg1);
tcg_gen_bswap16_i32(t0, t0);
- tcg_gen_extu_i32_tl(arg0, t0);
+ tcg_gen_extu_i32_tl(arg1, t0);
tcg_temp_free_i32(t0);
- } else
- gen_qemu_ld16u_ppc64(arg0, arg1, flags);
+#else
+ tcg_gen_bswap16_i32(arg1, arg1);
+#endif
+ }
}
-static always_inline void gen_qemu_ld16s(TCGv arg0, TCGv arg1, int flags)
+static always_inline void gen_qemu_ld16s(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- if (unlikely(flags & 1)) {
+ if (unlikely(ctx->le_mode)) {
+#if defined(TARGET_PPC64)
TCGv_i32 t0;
- gen_qemu_ld16u_ppc64(arg0, arg1, flags);
+ tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
t0 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t0, arg0);
+ tcg_gen_trunc_tl_i32(t0, arg1);
tcg_gen_bswap16_i32(t0, t0);
- tcg_gen_extu_i32_tl(arg0, t0);
- tcg_gen_ext16s_tl(arg0, arg0);
+ tcg_gen_extu_i32_tl(arg1, t0);
+ tcg_gen_ext16s_tl(arg1, arg1);
tcg_temp_free_i32(t0);
- } else
- gen_qemu_ld16s_ppc64(arg0, arg1, flags);
+#else
+ tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
+ tcg_gen_bswap16_i32(arg1, arg1);
+ tcg_gen_ext16s_i32(arg1, arg1);
+#endif
+ } else {
+ tcg_gen_qemu_ld16s(arg1, arg2, ctx->mem_idx);
+ }
}
-static always_inline void gen_qemu_ld32u(TCGv arg0, TCGv arg1, int flags)
+static always_inline void gen_qemu_ld32u(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- if (unlikely(flags & 1)) {
- TCGv_i32 t0;
- gen_qemu_ld32u_ppc64(arg0, arg1, flags);
- t0 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t0, arg0);
+ tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
+ if (unlikely(ctx->le_mode)) {
+#if defined(TARGET_PPC64)
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, arg1);
tcg_gen_bswap_i32(t0, t0);
- tcg_gen_extu_i32_tl(arg0, t0);
+ tcg_gen_extu_i32_tl(arg1, t0);
tcg_temp_free_i32(t0);
- } else
- gen_qemu_ld32u_ppc64(arg0, arg1, flags);
+#else
+ tcg_gen_bswap_i32(arg1, arg1);
+#endif
+ }
}
-static always_inline void gen_qemu_ld32s(TCGv arg0, TCGv arg1, int flags)
+#if defined(TARGET_PPC64)
+static always_inline void gen_qemu_ld32s(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- if (unlikely(flags & 1)) {
+ if (unlikely(ctx->mem_idx)) {
TCGv_i32 t0;
- gen_qemu_ld32u_ppc64(arg0, arg1, flags);
+ tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
t0 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t0, arg0);
+ tcg_gen_trunc_tl_i32(t0, arg1);
tcg_gen_bswap_i32(t0, t0);
- tcg_gen_ext_i32_tl(arg0, t0);
+ tcg_gen_ext_i32_tl(arg1, t0);
tcg_temp_free_i32(t0);
} else
- gen_qemu_ld32s_ppc64(arg0, arg1, flags);
+ tcg_gen_qemu_ld32s(arg1, arg2, ctx->mem_idx);
}
+#endif
-static always_inline void gen_qemu_ld64(TCGv arg0, TCGv arg1, int flags)
+static always_inline void gen_qemu_ld64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
{
- gen_qemu_ld64_ppc64(arg0, arg1, flags);
- if (unlikely(flags & 1))
- tcg_gen_bswap_i64(arg0, arg0);
+ tcg_gen_qemu_ld64(arg1, arg2, ctx->mem_idx);
+ if (unlikely(ctx->le_mode)) {
+ tcg_gen_bswap_i64(arg1, arg1);
+ }
}
-static always_inline void gen_qemu_st8(TCGv arg0, TCGv arg1, int flags)
+static always_inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- gen_qemu_st8_ppc64(arg0, arg1, flags);
+ tcg_gen_qemu_st8(arg1, arg2, ctx->mem_idx);
}
-static always_inline void gen_qemu_st16(TCGv arg0, TCGv arg1, int flags)
+static always_inline void gen_qemu_st16(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- if (unlikely(flags & 1)) {
+ if (unlikely(ctx->le_mode)) {
+#if defined(TARGET_PPC64)
TCGv_i32 t0;
- TCGv_i64 t1;
+ TCGv t1;
t0 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t0, arg0);
+ tcg_gen_trunc_tl_i32(t0, arg1);
tcg_gen_ext16u_i32(t0, t0);
tcg_gen_bswap16_i32(t0, t0);
- t1 = tcg_temp_new_i64();
+ t1 = tcg_temp_new();
tcg_gen_extu_i32_tl(t1, t0);
tcg_temp_free_i32(t0);
- gen_qemu_st16_ppc64(t1, arg1, flags);
- tcg_temp_free_i64(t1);
- } else
- gen_qemu_st16_ppc64(arg0, arg1, flags);
+ tcg_gen_qemu_st16(t1, arg2, ctx->mem_idx);
+ tcg_temp_free(t1);
+#else
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ext16u_tl(t0, arg1);
+ tcg_gen_bswap16_i32(t0, t0);
+ tcg_gen_qemu_st16(t0, arg2, ctx->mem_idx);
+ tcg_temp_free(t0);
+#endif
+ } else {
+ tcg_gen_qemu_st16(arg1, arg2, ctx->mem_idx);
+ }
}
-static always_inline void gen_qemu_st32(TCGv arg0, TCGv arg1, int flags)
+static always_inline void gen_qemu_st32(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- if (unlikely(flags & 1)) {
+ if (unlikely(ctx->le_mode)) {
+#if defined(TARGET_PPC64)
TCGv_i32 t0;
- TCGv_i64 t1;
+ TCGv t1;
t0 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t0, arg0);
+ tcg_gen_trunc_tl_i32(t0, arg1);
tcg_gen_bswap_i32(t0, t0);
- t1 = tcg_temp_new_i64();
+ t1 = tcg_temp_new();
tcg_gen_extu_i32_tl(t1, t0);
tcg_temp_free_i32(t0);
- gen_qemu_st32_ppc64(t1, arg1, flags);
- tcg_temp_free_i64(t1);
- } else
- gen_qemu_st32_ppc64(arg0, arg1, flags);
+ tcg_gen_qemu_st32(t1, arg2, ctx->mem_idx);
+ tcg_temp_free(t1);
+#else
+ TCGv t0 = tcg_temp_new_i32();
+ tcg_gen_bswap_i32(t0, arg1);
+ tcg_gen_qemu_st32(t0, arg2, ctx->mem_idx);
+ tcg_temp_free(t0);
+#endif
+ } else {
+ tcg_gen_qemu_st32(arg1, arg2, ctx->mem_idx);
+ }
}
-static always_inline void gen_qemu_st64(TCGv arg0, TCGv arg1, int flags)
+static always_inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
{
- if (unlikely(flags & 1)) {
+ if (unlikely(ctx->le_mode)) {
TCGv_i64 t0 = tcg_temp_new_i64();
- tcg_gen_bswap_i64(t0, arg0);
- gen_qemu_st64_ppc64(t0, arg1, flags);
+ tcg_gen_bswap_i64(t0, arg1);
+ tcg_gen_qemu_st64(t0, arg2, ctx->mem_idx);
tcg_temp_free_i64(t0);
} else
- gen_qemu_st64_ppc64(arg0, arg1, flags);
-}
-
-
-#else /* defined(TARGET_PPC64) */
-#define GEN_QEMU_LD_PPC32(width) \
-static always_inline void gen_qemu_ld##width##_ppc32(TCGv arg0, TCGv arg1, int flags) \
-{ \
- tcg_gen_qemu_ld##width(arg0, arg1, flags >> 1); \
-}
-GEN_QEMU_LD_PPC32(8u)
-GEN_QEMU_LD_PPC32(8s)
-GEN_QEMU_LD_PPC32(16u)
-GEN_QEMU_LD_PPC32(16s)
-GEN_QEMU_LD_PPC32(32u)
-GEN_QEMU_LD_PPC32(32s)
-static always_inline void gen_qemu_ld64_ppc32(TCGv_i64 arg0, TCGv arg1, int flags)
-{
- tcg_gen_qemu_ld64(arg0, arg1, flags >> 1);
-}
-
-#define GEN_QEMU_ST_PPC32(width) \
-static always_inline void gen_qemu_st##width##_ppc32(TCGv arg0, TCGv arg1, int flags) \
-{ \
- tcg_gen_qemu_st##width(arg0, arg1, flags >> 1); \
-}
-GEN_QEMU_ST_PPC32(8)
-GEN_QEMU_ST_PPC32(16)
-GEN_QEMU_ST_PPC32(32)
-static always_inline void gen_qemu_st64_ppc32(TCGv_i64 arg0, TCGv arg1, int flags)
-{
- tcg_gen_qemu_st64(arg0, arg1, flags >> 1);
-}
-
-static always_inline void gen_qemu_ld8u(TCGv arg0, TCGv arg1, int flags)
-{
- gen_qemu_ld8u_ppc32(arg0, arg1, flags >> 1);
-}
-
-static always_inline void gen_qemu_ld8s(TCGv arg0, TCGv arg1, int flags)
-{
- gen_qemu_ld8s_ppc32(arg0, arg1, flags >> 1);
-}
-
-static always_inline void gen_qemu_ld16u(TCGv arg0, TCGv arg1, int flags)
-{
- gen_qemu_ld16u_ppc32(arg0, arg1, flags >> 1);
- if (unlikely(flags & 1))
- tcg_gen_bswap16_i32(arg0, arg0);
-}
-
-static always_inline void gen_qemu_ld16s(TCGv arg0, TCGv arg1, int flags)
-{
- if (unlikely(flags & 1)) {
- gen_qemu_ld16u_ppc32(arg0, arg1, flags);
- tcg_gen_bswap16_i32(arg0, arg0);
- tcg_gen_ext16s_i32(arg0, arg0);
- } else
- gen_qemu_ld16s_ppc32(arg0, arg1, flags);
-}
-
-static always_inline void gen_qemu_ld32u(TCGv arg0, TCGv arg1, int flags)
-{
- gen_qemu_ld32u_ppc32(arg0, arg1, flags);
- if (unlikely(flags & 1))
- tcg_gen_bswap_i32(arg0, arg0);
-}
-
-static always_inline void gen_qemu_ld64(TCGv_i64 arg0, TCGv arg1, int flags)
-{
- gen_qemu_ld64_ppc32(arg0, arg1, flags);
- if (unlikely(flags & 1))
- tcg_gen_bswap_i64(arg0, arg0);
-}
-
-static always_inline void gen_qemu_st8(TCGv arg0, TCGv arg1, int flags)
-{
- gen_qemu_st8_ppc32(arg0, arg1, flags);
-}
-
-static always_inline void gen_qemu_st16(TCGv arg0, TCGv arg1, int flags)
-{
- if (unlikely(flags & 1)) {
- TCGv_i32 temp = tcg_temp_new_i32();
- tcg_gen_ext16u_i32(temp, arg0);
- tcg_gen_bswap16_i32(temp, temp);
- gen_qemu_st16_ppc32(temp, arg1, flags);
- tcg_temp_free_i32(temp);
- } else
- gen_qemu_st16_ppc32(arg0, arg1, flags);
-}
-
-static always_inline void gen_qemu_st32(TCGv arg0, TCGv arg1, int flags)
-{
- if (unlikely(flags & 1)) {
- TCGv_i32 temp = tcg_temp_new_i32();
- tcg_gen_bswap_i32(temp, arg0);
- gen_qemu_st32_ppc32(temp, arg1, flags);
- tcg_temp_free_i32(temp);
- } else
- gen_qemu_st32_ppc32(arg0, arg1, flags);
-}
-
-static always_inline void gen_qemu_st64(TCGv_i64 arg0, TCGv arg1, int flags)
-{
- if (unlikely(flags & 1)) {
- TCGv_i64 temp = tcg_temp_new_i64();
- tcg_gen_bswap_i64(temp, arg0);
- gen_qemu_st64_ppc32(temp, arg1, flags);
- tcg_temp_free_i64(temp);
- } else
- gen_qemu_st64_ppc32(arg0, arg1, flags);
+ tcg_gen_qemu_st64(arg1, arg2, ctx->mem_idx);
}
-#endif
#define GEN_LD(name, ldop, opc, type) \
GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
- TCGv EA = tcg_temp_new(); \
- gen_set_access_type(ACCESS_INT); \
- gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_##ldop(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ TCGv EA; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
TCGv EA; \
if (unlikely(rA(ctx->opcode) == 0 || \
rA(ctx->opcode) == rD(ctx->opcode))) { \
- GEN_EXCP_INVAL(ctx); \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
return; \
} \
+ gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
- gen_set_access_type(ACCESS_INT); \
if (type == PPC_64B) \
- gen_addr_imm_index(EA, ctx, 0x03); \
+ gen_addr_imm_index(ctx, EA, 0x03); \
else \
- gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_##ldop(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
TCGv EA; \
if (unlikely(rA(ctx->opcode) == 0 || \
rA(ctx->opcode) == rD(ctx->opcode))) { \
- GEN_EXCP_INVAL(ctx); \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
return; \
} \
+ gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
- gen_set_access_type(ACCESS_INT); \
- gen_addr_reg_index(EA, ctx); \
- gen_qemu_##ldop(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
#define GEN_LDX(name, ldop, opc2, opc3, type) \
GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type) \
{ \
- TCGv EA = tcg_temp_new(); \
- gen_set_access_type(ACCESS_INT); \
- gen_addr_reg_index(EA, ctx); \
- gen_qemu_##ldop(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ TCGv EA; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
if (Rc(ctx->opcode)) {
if (unlikely(rA(ctx->opcode) == 0 ||
rA(ctx->opcode) == rD(ctx->opcode))) {
- GEN_EXCP_INVAL(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
}
+ gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
- gen_set_access_type(ACCESS_INT);
- gen_addr_imm_index(EA, ctx, 0x03);
+ gen_addr_imm_index(ctx, EA, 0x03);
if (ctx->opcode & 0x02) {
/* lwa (lwau is undefined) */
- gen_qemu_ld32s(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx);
+ gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
} else {
/* ld - ldu */
- gen_qemu_ld64(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx);
+ gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
}
if (Rc(ctx->opcode))
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
int ra, rd;
TCGv EA;
/* Restore CPU state */
- if (unlikely(ctx->supervisor == 0)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(ctx->mem_idx == 0)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
ra = rA(ctx->opcode);
rd = rD(ctx->opcode);
if (unlikely((rd & 1) || rd == ra)) {
- GEN_EXCP_INVAL(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
- if (unlikely(ctx->mem_idx & 1)) {
+ if (unlikely(ctx->le_mode)) {
/* Little-endian mode is not handled */
- GEN_EXCP(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
+ gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
return;
}
+ gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
- gen_set_access_type(ACCESS_INT);
- gen_addr_imm_index(EA, ctx, 0x0F);
- gen_qemu_ld64(cpu_gpr[rd], EA, ctx->mem_idx);
- tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_ld64(cpu_gpr[rd+1], EA, ctx->mem_idx);
+ gen_addr_imm_index(ctx, EA, 0x0F);
+ gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
+ gen_addr_add(ctx, EA, EA, 8);
+ gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
tcg_temp_free(EA);
#endif
}
#define GEN_ST(name, stop, opc, type) \
GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
- TCGv EA = tcg_temp_new(); \
- gen_set_access_type(ACCESS_INT); \
- gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_##stop(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ TCGv EA; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
{ \
TCGv EA; \
if (unlikely(rA(ctx->opcode) == 0)) { \
- GEN_EXCP_INVAL(ctx); \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
return; \
} \
+ gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
- gen_set_access_type(ACCESS_INT); \
if (type == PPC_64B) \
- gen_addr_imm_index(EA, ctx, 0x03); \
+ gen_addr_imm_index(ctx, EA, 0x03); \
else \
- gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_##stop(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
{ \
TCGv EA; \
if (unlikely(rA(ctx->opcode) == 0)) { \
- GEN_EXCP_INVAL(ctx); \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
return; \
} \
+ gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
- gen_set_access_type(ACCESS_INT); \
- gen_addr_reg_index(EA, ctx); \
- gen_qemu_##stop(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
#define GEN_STX(name, stop, opc2, opc3, type) \
GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type) \
{ \
- TCGv EA = tcg_temp_new(); \
- gen_set_access_type(ACCESS_INT); \
- gen_addr_reg_index(EA, ctx); \
- gen_qemu_##stop(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ TCGv EA; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
rs = rS(ctx->opcode);
if ((ctx->opcode & 0x3) == 0x2) {
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
/* stq */
- if (unlikely(ctx->supervisor == 0)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(ctx->mem_idx == 0)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
if (unlikely(rs & 1)) {
- GEN_EXCP_INVAL(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
- if (unlikely(ctx->mem_idx & 1)) {
+ if (unlikely(ctx->le_mode)) {
/* Little-endian mode is not handled */
- GEN_EXCP(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
+ gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
return;
}
+ gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
- gen_set_access_type(ACCESS_INT);
- gen_addr_imm_index(EA, ctx, 0x03);
- gen_qemu_st64(cpu_gpr[rs], EA, ctx->mem_idx);
- tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_st64(cpu_gpr[rs+1], EA, ctx->mem_idx);
+ gen_addr_imm_index(ctx, EA, 0x03);
+ gen_qemu_st64(ctx, cpu_gpr[rs], EA);
+ gen_addr_add(ctx, EA, EA, 8);
+ gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
tcg_temp_free(EA);
#endif
} else {
/* std / stdu */
if (Rc(ctx->opcode)) {
if (unlikely(rA(ctx->opcode) == 0)) {
- GEN_EXCP_INVAL(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
}
+ gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
- gen_set_access_type(ACCESS_INT);
- gen_addr_imm_index(EA, ctx, 0x03);
- gen_qemu_st64(cpu_gpr[rs], EA, ctx->mem_idx);
+ gen_addr_imm_index(ctx, EA, 0x03);
+ gen_qemu_st64(ctx, cpu_gpr[rs], EA);
if (Rc(ctx->opcode))
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
tcg_temp_free(EA);
#endif
/*** Integer load and store with byte reverse ***/
/* lhbrx */
-void always_inline gen_qemu_ld16ur(TCGv t0, TCGv t1, int flags)
+static void always_inline gen_qemu_ld16ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- TCGv_i32 temp = tcg_temp_new_i32();
- gen_qemu_ld16u(t0, t1, flags);
- tcg_gen_trunc_tl_i32(temp, t0);
- tcg_gen_bswap16_i32(temp, temp);
- tcg_gen_extu_i32_tl(t0, temp);
- tcg_temp_free_i32(temp);
+ tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
+ if (likely(!ctx->le_mode)) {
+#if defined(TARGET_PPC64)
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, arg1);
+ tcg_gen_bswap16_i32(t0, t0);
+ tcg_gen_extu_i32_tl(arg1, t0);
+ tcg_temp_free_i32(t0);
+#else
+ tcg_gen_bswap16_i32(arg1, arg1);
+#endif
+ }
}
GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
/* lwbrx */
-void always_inline gen_qemu_ld32ur(TCGv t0, TCGv t1, int flags)
+static void always_inline gen_qemu_ld32ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- TCGv_i32 temp = tcg_temp_new_i32();
- gen_qemu_ld32u(t0, t1, flags);
- tcg_gen_trunc_tl_i32(temp, t0);
- tcg_gen_bswap_i32(temp, temp);
- tcg_gen_extu_i32_tl(t0, temp);
- tcg_temp_free_i32(temp);
+ tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
+ if (likely(!ctx->le_mode)) {
+#if defined(TARGET_PPC64)
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, arg1);
+ tcg_gen_bswap_i32(t0, t0);
+ tcg_gen_extu_i32_tl(arg1, t0);
+ tcg_temp_free_i32(t0);
+#else
+ tcg_gen_bswap_i32(arg1, arg1);
+#endif
+ }
}
GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
/* sthbrx */
-void always_inline gen_qemu_st16r(TCGv t0, TCGv t1, int flags)
+static void always_inline gen_qemu_st16r(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- TCGv_i32 temp = tcg_temp_new_i32();
- TCGv t2 = tcg_temp_new();
- tcg_gen_trunc_tl_i32(temp, t0);
- tcg_gen_ext16u_i32(temp, temp);
- tcg_gen_bswap16_i32(temp, temp);
- tcg_gen_extu_i32_tl(t2, temp);
- tcg_temp_free_i32(temp);
- gen_qemu_st16(t2, t1, flags);
- tcg_temp_free(t2);
+ if (likely(!ctx->le_mode)) {
+#if defined(TARGET_PPC64)
+ TCGv_i32 t0;
+ TCGv t1;
+ t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, arg1);
+ tcg_gen_ext16u_i32(t0, t0);
+ tcg_gen_bswap16_i32(t0, t0);
+ t1 = tcg_temp_new();
+ tcg_gen_extu_i32_tl(t1, t0);
+ tcg_temp_free_i32(t0);
+ tcg_gen_qemu_st16(t1, arg2, ctx->mem_idx);
+ tcg_temp_free(t1);
+#else
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ext16u_tl(t0, arg1);
+ tcg_gen_bswap16_i32(t0, t0);
+ tcg_gen_qemu_st16(t0, arg2, ctx->mem_idx);
+ tcg_temp_free(t0);
+#endif
+ } else {
+ tcg_gen_qemu_st16(arg1, arg2, ctx->mem_idx);
+ }
}
GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
/* stwbrx */
-void always_inline gen_qemu_st32r(TCGv t0, TCGv t1, int flags)
-{
- TCGv_i32 temp = tcg_temp_new_i32();
- TCGv t2 = tcg_temp_new();
- tcg_gen_trunc_tl_i32(temp, t0);
- tcg_gen_bswap_i32(temp, temp);
- tcg_gen_extu_i32_tl(t2, temp);
- tcg_temp_free_i32(temp);
- gen_qemu_st32(t2, t1, flags);
- tcg_temp_free(t2);
-}
-GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
-
-/*** Integer load and store multiple ***/
-/* lmw */
-GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
+static void always_inline gen_qemu_st32r(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- TCGv t0 = tcg_temp_new();
- TCGv_i32 t1 = tcg_const_i32(rD(ctx->opcode));
+ if (likely(!ctx->le_mode)) {
+#if defined(TARGET_PPC64)
+ TCGv_i32 t0;
+ TCGv t1;
+ t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, arg1);
+ tcg_gen_bswap_i32(t0, t0);
+ t1 = tcg_temp_new();
+ tcg_gen_extu_i32_tl(t1, t0);
+ tcg_temp_free_i32(t0);
+ tcg_gen_qemu_st32(t1, arg2, ctx->mem_idx);
+ tcg_temp_free(t1);
+#else
+ TCGv t0 = tcg_temp_new_i32();
+ tcg_gen_bswap_i32(t0, arg1);
+ tcg_gen_qemu_st32(t0, arg2, ctx->mem_idx);
+ tcg_temp_free(t0);
+#endif
+ } else {
+ tcg_gen_qemu_st32(arg1, arg2, ctx->mem_idx);
+ }
+}
+GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
+
+/*** Integer load and store multiple ***/
+/* lmw */
+GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
+{
+ TCGv t0;
+ TCGv_i32 t1;
+ gen_set_access_type(ctx, ACCESS_INT);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_imm_index(t0, ctx, 0);
+ t0 = tcg_temp_new();
+ t1 = tcg_const_i32(rD(ctx->opcode));
+ gen_addr_imm_index(ctx, t0, 0);
gen_helper_lmw(t0, t1);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
/* stmw */
GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
{
- TCGv t0 = tcg_temp_new();
- TCGv_i32 t1 = tcg_const_i32(rS(ctx->opcode));
+ TCGv t0;
+ TCGv_i32 t1;
+ gen_set_access_type(ctx, ACCESS_INT);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_imm_index(t0, ctx, 0);
+ t0 = tcg_temp_new();
+ t1 = tcg_const_i32(rS(ctx->opcode));
+ gen_addr_imm_index(ctx, t0, 0);
gen_helper_stmw(t0, t1);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
if (unlikely(((start + nr) > 32 &&
start <= ra && (start + nr - 32) > ra) ||
((start + nr) <= 32 && start <= ra && (start + nr) > ra))) {
- GEN_EXCP(ctx, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_LSWX);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
return;
}
+ gen_set_access_type(ctx, ACCESS_INT);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
t0 = tcg_temp_new();
- gen_addr_register(t0, ctx);
+ gen_addr_register(ctx, t0);
t1 = tcg_const_i32(nb);
t2 = tcg_const_i32(start);
gen_helper_lsw(t0, t1, t2);
/* lswx */
GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING)
{
- TCGv t0 = tcg_temp_new();
- TCGv_i32 t1 = tcg_const_i32(rD(ctx->opcode));
- TCGv_i32 t2 = tcg_const_i32(rA(ctx->opcode));
- TCGv_i32 t3 = tcg_const_i32(rB(ctx->opcode));
+ TCGv t0;
+ TCGv_i32 t1, t2, t3;
+ gen_set_access_type(ctx, ACCESS_INT);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_reg_index(t0, ctx);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ t1 = tcg_const_i32(rD(ctx->opcode));
+ t2 = tcg_const_i32(rA(ctx->opcode));
+ t3 = tcg_const_i32(rB(ctx->opcode));
gen_helper_lswx(t0, t1, t2, t3);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
/* stswi */
GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING)
{
+ TCGv t0;
+ TCGv_i32 t1, t2;
int nb = NB(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- TCGv_i32 t1;
- TCGv_i32 t2 = tcg_const_i32(rS(ctx->opcode));
+ gen_set_access_type(ctx, ACCESS_INT);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_register(t0, ctx);
+ t0 = tcg_temp_new();
+ gen_addr_register(ctx, t0);
if (nb == 0)
nb = 32;
t1 = tcg_const_i32(nb);
+ t2 = tcg_const_i32(rS(ctx->opcode));
gen_helper_stsw(t0, t1, t2);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
/* stswx */
GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING)
{
- TCGv t0 = tcg_temp_new();
- TCGv_i32 t1 = tcg_temp_new_i32();
- TCGv_i32 t2 = tcg_const_i32(rS(ctx->opcode));
+ TCGv t0;
+ TCGv_i32 t1, t2;
+ gen_set_access_type(ctx, ACCESS_INT);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_reg_index(t0, ctx);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ t1 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(t1, cpu_xer);
tcg_gen_andi_i32(t1, t1, 0x7F);
+ t2 = tcg_const_i32(rS(ctx->opcode));
gen_helper_stsw(t0, t1, t2);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
/* isync */
GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM)
{
- GEN_STOP(ctx);
+ gen_stop_exception(ctx);
}
-#define op_lwarx() (*gen_op_lwarx[ctx->mem_idx])()
-#define op_stwcx() (*gen_op_stwcx[ctx->mem_idx])()
-static GenOpFunc *gen_op_lwarx[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(lwarx),
-};
-static GenOpFunc *gen_op_stwcx[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(stwcx),
-};
-
/* lwarx */
GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000001, PPC_RES)
{
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_set_access_type(ACCESS_RES);
- gen_addr_reg_index(cpu_T[0], ctx);
- op_lwarx();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[1]);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_RES);
+ t0 = tcg_temp_local_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_check_align(ctx, t0, 0x03);
+ gen_qemu_ld32u(ctx, cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_reserve, t0);
+ tcg_temp_free(t0);
}
/* stwcx. */
GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES)
{
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_set_access_type(ACCESS_RES);
- gen_addr_reg_index(cpu_T[0], ctx);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rS(ctx->opcode)]);
- op_stwcx();
+ int l1;
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_RES);
+ t0 = tcg_temp_local_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_check_align(ctx, t0, 0x03);
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_xer);
+ tcg_gen_shri_i32(cpu_crf[0], cpu_crf[0], XER_SO);
+ tcg_gen_andi_i32(cpu_crf[0], cpu_crf[0], 1);
+ l1 = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
+ gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], t0);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_reserve, -1);
+ tcg_temp_free(t0);
}
#if defined(TARGET_PPC64)
-#define op_ldarx() (*gen_op_ldarx[ctx->mem_idx])()
-#define op_stdcx() (*gen_op_stdcx[ctx->mem_idx])()
-static GenOpFunc *gen_op_ldarx[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(ldarx),
-};
-static GenOpFunc *gen_op_stdcx[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(stdcx),
-};
-
/* ldarx */
GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000001, PPC_64B)
{
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_set_access_type(ACCESS_RES);
- gen_addr_reg_index(cpu_T[0], ctx);
- op_ldarx();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[1]);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_RES);
+ t0 = tcg_temp_local_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_check_align(ctx, t0, 0x07);
+ gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_reserve, t0);
+ tcg_temp_free(t0);
}
/* stdcx. */
GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B)
{
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_set_access_type(ACCESS_RES);
- gen_addr_reg_index(cpu_T[0], ctx);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rS(ctx->opcode)]);
- op_stdcx();
+ int l1;
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_RES);
+ t0 = tcg_temp_local_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_check_align(ctx, t0, 0x07);
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_xer);
+ tcg_gen_shri_i32(cpu_crf[0], cpu_crf[0], XER_SO);
+ tcg_gen_andi_i32(cpu_crf[0], cpu_crf[0], 1);
+ l1 = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
+ gen_qemu_st64(ctx, cpu_gpr[rS(ctx->opcode)], t0);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_reserve, -1);
+ tcg_temp_free(t0);
}
#endif /* defined(TARGET_PPC64) */
tcg_gen_st_i32(t0, cpu_env, offsetof(CPUState, halted));
tcg_temp_free_i32(t0);
/* Stop translation, as the CPU is supposed to sleep from now */
- GEN_EXCP(ctx, EXCP_HLT, 1);
+ gen_exception_err(ctx, EXCP_HLT, 1);
}
/*** Floating-point load ***/
{ \
TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
- GEN_EXCP_NO_FP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
- gen_set_access_type(ACCESS_FLOAT); \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
- gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_##ldop(cpu_fpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
{ \
TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
- GEN_EXCP_NO_FP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
if (unlikely(rA(ctx->opcode) == 0)) { \
- GEN_EXCP_INVAL(ctx); \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
return; \
} \
- gen_set_access_type(ACCESS_FLOAT); \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
- gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_##ldop(cpu_fpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
{ \
TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
- GEN_EXCP_NO_FP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
if (unlikely(rA(ctx->opcode) == 0)) { \
- GEN_EXCP_INVAL(ctx); \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
return; \
} \
- gen_set_access_type(ACCESS_FLOAT); \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
- gen_addr_reg_index(EA, ctx); \
- gen_qemu_##ldop(cpu_fpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
{ \
TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
- GEN_EXCP_NO_FP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
- gen_set_access_type(ACCESS_FLOAT); \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
- gen_addr_reg_index(EA, ctx); \
- gen_qemu_##ldop(cpu_fpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
GEN_LDUXF(name, ldop, op | 0x01, type); \
GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
-static always_inline void gen_qemu_ld32fs(TCGv_i64 arg1, TCGv arg2, int flags)
+static always_inline void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
{
TCGv t0 = tcg_temp_new();
TCGv_i32 t1 = tcg_temp_new_i32();
- gen_qemu_ld32u(t0, arg2, flags);
+ gen_qemu_ld32u(ctx, t0, arg2);
tcg_gen_trunc_tl_i32(t1, t0);
tcg_temp_free(t0);
gen_helper_float32_to_float64(arg1, t1);
{ \
TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
- GEN_EXCP_NO_FP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
- gen_set_access_type(ACCESS_FLOAT); \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
- gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_##stop(cpu_fpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
{ \
TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
- GEN_EXCP_NO_FP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
if (unlikely(rA(ctx->opcode) == 0)) { \
- GEN_EXCP_INVAL(ctx); \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
return; \
} \
- gen_set_access_type(ACCESS_FLOAT); \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
- gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_##stop(cpu_fpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
{ \
TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
- GEN_EXCP_NO_FP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
if (unlikely(rA(ctx->opcode) == 0)) { \
- GEN_EXCP_INVAL(ctx); \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
return; \
} \
- gen_set_access_type(ACCESS_FLOAT); \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
- gen_addr_reg_index(EA, ctx); \
- gen_qemu_##stop(cpu_fpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
{ \
TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
- GEN_EXCP_NO_FP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
- gen_set_access_type(ACCESS_FLOAT); \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
- gen_addr_reg_index(EA, ctx); \
- gen_qemu_##stop(cpu_fpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
GEN_STUXF(name, stop, op | 0x01, type); \
GEN_STXF(name, stop, 0x17, op | 0x00, type)
-static always_inline void gen_qemu_st32fs(TCGv_i64 arg1, TCGv arg2, int flags)
+static always_inline void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
{
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv t1 = tcg_temp_new();
gen_helper_float64_to_float32(t0, arg1);
tcg_gen_extu_i32_tl(t1, t0);
tcg_temp_free_i32(t0);
- gen_qemu_st32(t1, arg2, flags);
+ gen_qemu_st32(ctx, t1, arg2);
tcg_temp_free(t1);
}
GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT);
/* Optional: */
-static always_inline void gen_qemu_st32fiw(TCGv_i64 arg1, TCGv arg2, int flags)
+static always_inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
{
TCGv t0 = tcg_temp_new();
tcg_gen_trunc_i64_tl(t0, arg1),
- gen_qemu_st32(t0, arg2, flags);
+ gen_qemu_st32(ctx, t0, arg2);
tcg_temp_free(t0);
}
/* stfiwx */
ctx->exception == POWERPC_EXCP_BRANCH) {
target_ulong tmp = ctx->nip;
ctx->nip = dest;
- GEN_EXCP(ctx, POWERPC_EXCP_TRACE, 0);
+ gen_exception(ctx, POWERPC_EXCP_TRACE);
ctx->nip = tmp;
}
if (ctx->singlestep_enabled & GDBSTUB_SINGLE_STEP) {
- gen_update_nip(ctx, dest);
- gen_helper_raise_debug();
+ gen_debug_exception(ctx);
}
}
tcg_gen_exit_tb(0);
/* Decrement and test CTR */
TCGv temp = tcg_temp_new();
if (unlikely(type == BCOND_CTR)) {
- GEN_EXCP_INVAL(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
}
/*** System linkage ***/
-/* rfi (supervisor only) */
+/* rfi (mem_idx only) */
GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
/* Restore CPU state */
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- gen_op_rfi();
- GEN_SYNC(ctx);
+ gen_helper_rfi();
+ gen_sync_exception(ctx);
#endif
}
GEN_HANDLER(rfid, 0x13, 0x12, 0x00, 0x03FF8001, PPC_64B)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
/* Restore CPU state */
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- gen_op_rfid();
- GEN_SYNC(ctx);
+ gen_helper_rfid();
+ gen_sync_exception(ctx);
#endif
}
GEN_HANDLER(hrfid, 0x13, 0x12, 0x08, 0x03FF8001, PPC_64H)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
/* Restore CPU state */
- if (unlikely(ctx->supervisor <= 1)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(ctx->mem_idx <= 1)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- gen_op_hrfid();
- GEN_SYNC(ctx);
+ gen_helper_hrfid();
+ gen_sync_exception(ctx);
#endif
}
#endif
uint32_t lev;
lev = (ctx->opcode >> 5) & 0x7F;
- GEN_EXCP(ctx, POWERPC_SYSCALL, lev);
+ gen_exception_err(ctx, POWERPC_SYSCALL, lev);
}
/*** Trap ***/
GEN_HANDLER(mfmsr, 0x1F, 0x13, 0x02, 0x001FF801, PPC_MISC)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVREG(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVREG(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
return;
}
- gen_op_load_msr();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr);
#endif
}
/* mfspr */
static always_inline void gen_op_mfspr (DisasContext *ctx)
{
- void (*read_cb)(void *opaque, int sprn);
+ void (*read_cb)(void *opaque, int gprn, int sprn);
uint32_t sprn = SPR(ctx->opcode);
#if !defined(CONFIG_USER_ONLY)
- if (ctx->supervisor == 2)
+ if (ctx->mem_idx == 2)
read_cb = ctx->spr_cb[sprn].hea_read;
- else if (ctx->supervisor)
+ else if (ctx->mem_idx)
read_cb = ctx->spr_cb[sprn].oea_read;
else
#endif
read_cb = ctx->spr_cb[sprn].uea_read;
if (likely(read_cb != NULL)) {
if (likely(read_cb != SPR_NOACCESS)) {
- (*read_cb)(ctx, sprn);
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ (*read_cb)(ctx, rD(ctx->opcode), sprn);
} else {
/* Privilege exception */
/* This is a hack to avoid warnings when running Linux:
printf("Trying to read privileged spr %d %03x at " ADDRX "\n",
sprn, sprn, ctx->nip);
}
- GEN_EXCP_PRIVREG(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
}
} else {
/* Not defined */
}
printf("Trying to read invalid spr %d %03x at " ADDRX "\n",
sprn, sprn, ctx->nip);
- GEN_EXCP(ctx, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_SPR);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_SPR);
}
}
GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVREG(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVREG(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
return;
}
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
if (ctx->opcode & 0x00010000) {
/* Special form that does not need any synchronisation */
- gen_op_update_riee();
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1 << MSR_RI) | (1 << MSR_EE));
+ tcg_gen_andi_tl(cpu_msr, cpu_msr, ~((1 << MSR_RI) | (1 << MSR_EE)));
+ tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
+ tcg_temp_free(t0);
} else {
/* XXX: we need to update nip before the store
* if we enter power saving mode, we will exit the loop
* directly from ppc_store_msr
*/
gen_update_nip(ctx, ctx->nip);
- gen_op_store_msr();
+ gen_helper_store_msr(cpu_gpr[rS(ctx->opcode)]);
/* Must stop the translation as machine state (may have) changed */
/* Note that mtmsr is not always defined as context-synchronizing */
- ctx->exception = POWERPC_EXCP_STOP;
+ gen_stop_exception(ctx);
}
#endif
}
GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001FF801, PPC_MISC)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVREG(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVREG(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
return;
}
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
if (ctx->opcode & 0x00010000) {
/* Special form that does not need any synchronisation */
- gen_op_update_riee();
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1 << MSR_RI) | (1 << MSR_EE));
+ tcg_gen_andi_tl(cpu_msr, cpu_msr, ~((1 << MSR_RI) | (1 << MSR_EE)));
+ tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
+ tcg_temp_free(t0);
} else {
/* XXX: we need to update nip before the store
* if we enter power saving mode, we will exit the loop
*/
gen_update_nip(ctx, ctx->nip);
#if defined(TARGET_PPC64)
- if (!ctx->sf_mode)
- gen_op_store_msr_32();
- else
+ if (!ctx->sf_mode) {
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_msr, 0xFFFFFFFF00000000ULL);
+ tcg_gen_ext32u_tl(t1, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_temp_free(t1);
+ gen_helper_store_msr(t0);
+ tcg_temp_free(t0);
+ } else
#endif
- gen_op_store_msr();
+ gen_helper_store_msr(cpu_gpr[rS(ctx->opcode)]);
/* Must stop the translation as machine state (may have) changed */
- /* Note that mtmsrd is not always defined as context-synchronizing */
- ctx->exception = POWERPC_EXCP_STOP;
+ /* Note that mtmsr is not always defined as context-synchronizing */
+ gen_stop_exception(ctx);
}
#endif
}
/* mtspr */
GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000001, PPC_MISC)
{
- void (*write_cb)(void *opaque, int sprn);
+ void (*write_cb)(void *opaque, int sprn, int gprn);
uint32_t sprn = SPR(ctx->opcode);
#if !defined(CONFIG_USER_ONLY)
- if (ctx->supervisor == 2)
+ if (ctx->mem_idx == 2)
write_cb = ctx->spr_cb[sprn].hea_write;
- else if (ctx->supervisor)
+ else if (ctx->mem_idx)
write_cb = ctx->spr_cb[sprn].oea_write;
else
#endif
write_cb = ctx->spr_cb[sprn].uea_write;
if (likely(write_cb != NULL)) {
if (likely(write_cb != SPR_NOACCESS)) {
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- (*write_cb)(ctx, sprn);
+ (*write_cb)(ctx, sprn, rS(ctx->opcode));
} else {
/* Privilege exception */
if (loglevel != 0) {
}
printf("Trying to write privileged spr %d %03x at " ADDRX "\n",
sprn, sprn, ctx->nip);
- GEN_EXCP_PRIVREG(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
}
} else {
/* Not defined */
}
printf("Trying to write invalid spr %d %03x at " ADDRX "\n",
sprn, sprn, ctx->nip);
- GEN_EXCP(ctx, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_SPR);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_SPR);
}
}
GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE)
{
/* XXX: specification says this is treated as a load by the MMU */
- TCGv t0 = tcg_temp_new();
- gen_set_access_type(ACCESS_CACHE);
- gen_addr_reg_index(t0, ctx);
- gen_qemu_ld8u(t0, t0, ctx->mem_idx);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld8u(ctx, t0, t0);
tcg_temp_free(t0);
}
GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
TCGv EA, val;
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
EA = tcg_temp_new();
- gen_set_access_type(ACCESS_CACHE);
- gen_addr_reg_index(EA, ctx);
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ gen_addr_reg_index(ctx, EA);
val = tcg_temp_new();
/* XXX: specification says this should be treated as a store by the MMU */
- gen_qemu_ld8u(val, EA, ctx->mem_idx);
- gen_qemu_st8(val, EA, ctx->mem_idx);
+ gen_qemu_ld8u(ctx, val, EA);
+ gen_qemu_st8(ctx, val, EA);
tcg_temp_free(val);
tcg_temp_free(EA);
#endif
GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE)
{
/* XXX: specification say this is treated as a load by the MMU */
- TCGv t0 = tcg_temp_new();
- gen_set_access_type(ACCESS_CACHE);
- gen_addr_reg_index(t0, ctx);
- gen_qemu_ld8u(t0, t0, ctx->mem_idx);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld8u(ctx, t0, t0);
tcg_temp_free(t0);
}
/* dcbz */
GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03E00001, PPC_CACHE_DCBZ)
{
- TCGv t0 = tcg_temp_new();
- gen_addr_reg_index(t0, ctx);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
gen_helper_dcbz(t0);
tcg_temp_free(t0);
}
GEN_HANDLER2(dcbz_970, "dcbz", 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZT)
{
- TCGv t0 = tcg_temp_new();
- gen_addr_reg_index(t0, ctx);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
if (ctx->opcode & 0x00200000)
gen_helper_dcbz(t0);
else
/* icbi */
GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE_ICBI)
{
- TCGv t0 = tcg_temp_new();
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_reg_index(t0, ctx);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
gen_helper_icbi(t0);
tcg_temp_free(t0);
}
GEN_HANDLER(mfsr, 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVREG(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVREG(ctx);
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
return;
}
- tcg_gen_movi_tl(cpu_T[1], SR(ctx->opcode));
- gen_op_load_sr();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ t0 = tcg_const_tl(SR(ctx->opcode));
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
#endif
}
GEN_HANDLER(mfsrin, 0x1F, 0x13, 0x14, 0x001F0001, PPC_SEGMENT)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVREG(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVREG(ctx);
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
return;
}
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_srli_T1(28);
- gen_op_load_sr();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 28);
+ tcg_gen_andi_tl(t0, t0, 0xF);
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
#endif
}
GEN_HANDLER(mtsr, 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVREG(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVREG(ctx);
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
return;
}
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_movi_tl(cpu_T[1], SR(ctx->opcode));
- gen_op_store_sr();
+ t0 = tcg_const_tl(SR(ctx->opcode));
+ gen_helper_store_sr(t0, cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free(t0);
#endif
}
GEN_HANDLER(mtsrin, 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVREG(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVREG(ctx);
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
return;
}
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_srli_T1(28);
- gen_op_store_sr();
+ t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 28);
+ tcg_gen_andi_tl(t0, t0, 0xF);
+ gen_helper_store_sr(t0, cpu_gpr[rD(ctx->opcode)]);
+ tcg_temp_free(t0);
#endif
}
GEN_HANDLER2(mfsr_64b, "mfsr", 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT_64B)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVREG(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVREG(ctx);
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
return;
}
- tcg_gen_movi_tl(cpu_T[1], SR(ctx->opcode));
- gen_op_load_slb();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ t0 = tcg_const_tl(SR(ctx->opcode));
+ gen_helper_load_slb(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
#endif
}
PPC_SEGMENT_64B)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVREG(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVREG(ctx);
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
return;
}
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_srli_T1(28);
- gen_op_load_slb();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 28);
+ tcg_gen_andi_tl(t0, t0, 0xF);
+ gen_helper_load_slb(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
#endif
}
GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVREG(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVREG(ctx);
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
return;
}
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_movi_tl(cpu_T[1], SR(ctx->opcode));
- gen_op_store_slb();
+ t0 = tcg_const_tl(SR(ctx->opcode));
+ gen_helper_store_slb(t0, cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free(t0);
#endif
}
PPC_SEGMENT_64B)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVREG(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVREG(ctx);
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
return;
}
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_srli_T1(28);
- gen_op_store_slb();
+ t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 28);
+ tcg_gen_andi_tl(t0, t0, 0xF);
+ gen_helper_store_slb(t0, cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free(t0);
#endif
}
#endif /* defined(TARGET_PPC64) */
/*** Lookaside buffer management ***/
-/* Optional & supervisor only: */
+/* Optional & mem_idx only: */
/* tlbia */
GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- gen_op_tlbia();
+ gen_helper_tlbia();
#endif
}
GEN_HANDLER(tlbie, 0x1F, 0x12, 0x09, 0x03FF0001, PPC_MEM_TLBIE)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rB(ctx->opcode)]);
#if defined(TARGET_PPC64)
- if (ctx->sf_mode)
- gen_op_tlbie_64();
- else
+ if (!ctx->sf_mode) {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ext32u_tl(t0, cpu_gpr[rB(ctx->opcode)]);
+ gen_helper_tlbie(t0);
+ tcg_temp_free(t0);
+ } else
#endif
- gen_op_tlbie();
+ gen_helper_tlbie(cpu_gpr[rB(ctx->opcode)]);
#endif
}
GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
/* This has no effect: it should ensure that all previous
* tlbie have completed
*/
- GEN_STOP(ctx);
+ gen_stop_exception(ctx);
#endif
}
GEN_HANDLER(slbia, 0x1F, 0x12, 0x0F, 0x03FFFC01, PPC_SLBI)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- gen_op_slbia();
+ gen_helper_slbia();
#endif
}
GEN_HANDLER(slbie, 0x1F, 0x12, 0x0D, 0x03FF0001, PPC_SLBI)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rB(ctx->opcode)]);
- gen_op_slbie();
+ gen_helper_slbie(cpu_gpr[rB(ctx->opcode)]);
#endif
}
#endif
/*** External control ***/
/* Optional: */
-#define op_eciwx() (*gen_op_eciwx[ctx->mem_idx])()
-#define op_ecowx() (*gen_op_ecowx[ctx->mem_idx])()
-static GenOpFunc *gen_op_eciwx[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(eciwx),
-};
-static GenOpFunc *gen_op_ecowx[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(ecowx),
-};
-
/* eciwx */
GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN)
{
- /* Should check EAR[E] & alignment ! */
- gen_set_access_type(ACCESS_RES);
- gen_addr_reg_index(cpu_T[0], ctx);
- op_eciwx();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ TCGv t0;
+ /* Should check EAR[E] ! */
+ gen_set_access_type(ctx, ACCESS_EXT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_check_align(ctx, t0, 0x03);
+ gen_qemu_ld32u(ctx, cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
}
/* ecowx */
GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN)
{
- /* Should check EAR[E] & alignment ! */
- gen_addr_reg_index(cpu_T[0], ctx);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rS(ctx->opcode)]);
- op_ecowx();
+ TCGv t0;
+ /* Should check EAR[E] ! */
+ gen_set_access_type(ctx, ACCESS_EXT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_check_align(ctx, t0, 0x03);
+ gen_qemu_st32(ctx, cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
}
/* PowerPC 601 specific instructions */
/* abs - abs. */
GEN_HANDLER(abs, 0x1F, 0x08, 0x0B, 0x0000F800, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- gen_op_POWER_abs();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rA(ctx->opcode)], 0, l1);
+ tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ gen_set_label(l2);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
/* abso - abso. */
GEN_HANDLER(abso, 0x1F, 0x08, 0x1B, 0x0000F800, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- gen_op_POWER_abso();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ int l3 = gen_new_label();
+ /* Start with XER OV disabled, the most likely case */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+ tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rA(ctx->opcode)], 0, l2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_gpr[rA(ctx->opcode)], 0x80000000, l1);
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l3);
+ gen_set_label(l2);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ gen_set_label(l3);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
/* clcs */
GEN_HANDLER(clcs, 0x1F, 0x10, 0x13, 0x0000F800, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- gen_op_POWER_clcs();
+ TCGv_i32 t0 = tcg_const_i32(rA(ctx->opcode));
+ gen_helper_clcs(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free_i32(t0);
/* Rc=1 sets CR0 to an undefined state */
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
}
/* div - div. */
GEN_HANDLER(div, 0x1F, 0x0B, 0x0A, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_div();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ gen_helper_div(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
/* divo - divo. */
GEN_HANDLER(divo, 0x1F, 0x0B, 0x1A, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_divo();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ gen_helper_divo(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
/* divs - divs. */
GEN_HANDLER(divs, 0x1F, 0x0B, 0x0B, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_divs();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ gen_helper_divs(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
/* divso - divso. */
GEN_HANDLER(divso, 0x1F, 0x0B, 0x1B, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_divso();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ gen_helper_divso(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
/* doz - doz. */
GEN_HANDLER(doz, 0x1F, 0x08, 0x08, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_doz();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], l1);
+ tcg_gen_sub_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
+ gen_set_label(l2);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
/* dozo - dozo. */
GEN_HANDLER(dozo, 0x1F, 0x08, 0x18, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_dozo();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ /* Start with XER OV disabled, the most likely case */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+ tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], l1);
+ tcg_gen_sub_tl(t0, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_xor_tl(t1, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_xor_tl(t2, cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_gen_andc_tl(t1, t1, t2);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l2);
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
+ gen_set_label(l2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
/* dozi */
GEN_HANDLER(dozi, 0x09, 0xFF, 0xFF, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_movi_tl(cpu_T[1], SIMM(ctx->opcode));
- gen_op_POWER_doz();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ target_long simm = SIMM(ctx->opcode);
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_LT, cpu_gpr[rA(ctx->opcode)], simm, l1);
+ tcg_gen_subfi_tl(cpu_gpr[rD(ctx->opcode)], simm, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
+ gen_set_label(l2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
/* lscbx - lscbx. */
TCGv_i32 t2 = tcg_const_i32(rA(ctx->opcode));
TCGv_i32 t3 = tcg_const_i32(rB(ctx->opcode));
- gen_addr_reg_index(t0, ctx);
+ gen_addr_reg_index(ctx, t0);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
gen_helper_lscbx(t0, t0, t1, t2, t3);
/* maskg - maskg. */
GEN_HANDLER(maskg, 0x1F, 0x1D, 0x00, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_maskg();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_movi_tl(t3, 0xFFFFFFFF);
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_andi_tl(t1, cpu_gpr[rS(ctx->opcode)], 0x1F);
+ tcg_gen_addi_tl(t2, t0, 1);
+ tcg_gen_shr_tl(t2, t3, t2);
+ tcg_gen_shr_tl(t3, t3, t1);
+ tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], t2, t3);
+ tcg_gen_brcond_tl(TCG_COND_GE, t0, t1, l1);
+ tcg_gen_neg_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(t3);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* maskir - maskir. */
GEN_HANDLER(maskir, 0x1F, 0x1D, 0x10, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[2], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_maskir();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_and_tl(t0, cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_andc_tl(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* mul - mul. */
GEN_HANDLER(mul, 0x1F, 0x0B, 0x03, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_mul();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_trunc_i64_tl(t2, t0);
+ gen_store_spr(SPR_MQ, t2);
+ tcg_gen_shri_i64(t1, t0, 32);
+ tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free(t2);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
/* mulo - mulo. */
GEN_HANDLER(mulo, 0x1F, 0x0B, 0x13, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_mulo();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ int l1 = gen_new_label();
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv t2 = tcg_temp_new();
+ /* Start with XER OV disabled, the most likely case */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
+ tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_trunc_i64_tl(t2, t0);
+ gen_store_spr(SPR_MQ, t2);
+ tcg_gen_shri_i64(t1, t0, 32);
+ tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t1);
+ tcg_gen_ext32s_i64(t1, t0);
+ tcg_gen_brcond_i64(TCG_COND_EQ, t0, t1, l1);
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO));
+ gen_set_label(l1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free(t2);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
/* nabs - nabs. */
GEN_HANDLER(nabs, 0x1F, 0x08, 0x0F, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- gen_op_POWER_nabs();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_GT, cpu_gpr[rA(ctx->opcode)], 0, l1);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ gen_set_label(l2);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
/* nabso - nabso. */
GEN_HANDLER(nabso, 0x1F, 0x08, 0x1F, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- gen_op_POWER_nabso();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_GT, cpu_gpr[rA(ctx->opcode)], 0, l1);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ gen_set_label(l2);
+ /* nabs never overflows */
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV));
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
/* rlmi - rlmi. */
GEN_HANDLER(rlmi, 0x16, 0xFF, 0xFF, 0x00000000, PPC_POWER_BR)
{
- uint32_t mb, me;
-
- mb = MB(ctx->opcode);
- me = ME(ctx->opcode);
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[2], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_rlmi(MASK(mb, me), ~MASK(mb, me));
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ uint32_t mb = MB(ctx->opcode);
+ uint32_t me = ME(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ tcg_gen_andi_tl(t0, t0, MASK(mb, me));
+ tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], ~MASK(mb, me));
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_temp_free(t0);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* rrib - rrib. */
GEN_HANDLER(rrib, 0x1F, 0x19, 0x10, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[2], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_rrib();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t1, 0x80000000);
+ tcg_gen_shr_tl(t1, t1, t0);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ tcg_gen_and_tl(t0, t0, t1);
+ tcg_gen_andc_tl(t1, cpu_gpr[rA(ctx->opcode)], t1);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* sle - sle. */
GEN_HANDLER(sle, 0x1F, 0x19, 0x04, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_sle();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_subfi_tl(t1, 32, t1);
+ tcg_gen_shr_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_or_tl(t1, t0, t1);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* sleq - sleq. */
GEN_HANDLER(sleq, 0x1F, 0x19, 0x06, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_sleq();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t2, 0xFFFFFFFF);
+ tcg_gen_shl_tl(t2, t2, t0);
+ tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ gen_load_spr(t1, SPR_MQ);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_and_tl(t0, t0, t2);
+ tcg_gen_andc_tl(t1, t1, t2);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* sliq - sliq. */
GEN_HANDLER(sliq, 0x1F, 0x18, 0x05, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_movi_tl(cpu_T[1], SH(ctx->opcode));
- gen_op_POWER_sle();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ int sh = SH(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_shli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ tcg_gen_shri_tl(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh);
+ tcg_gen_or_tl(t1, t0, t1);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* slliq - slliq. */
GEN_HANDLER(slliq, 0x1F, 0x18, 0x07, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_movi_tl(cpu_T[1], SH(ctx->opcode));
- gen_op_POWER_sleq();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ int sh = SH(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ gen_load_spr(t1, SPR_MQ);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_andi_tl(t0, t0, (0xFFFFFFFFU << sh));
+ tcg_gen_andi_tl(t1, t1, ~(0xFFFFFFFFU << sh));
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* sllq - sllq. */
GEN_HANDLER(sllq, 0x1F, 0x18, 0x06, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_sllq();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_local_new();
+ TCGv t2 = tcg_temp_local_new();
+ tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t1, 0xFFFFFFFF);
+ tcg_gen_shl_tl(t1, t1, t2);
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ gen_load_spr(t0, SPR_MQ);
+ tcg_gen_and_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t2);
+ gen_load_spr(t2, SPR_MQ);
+ tcg_gen_andc_tl(t1, t2, t1);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ gen_set_label(l2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* slq - slq. */
GEN_HANDLER(slq, 0x1F, 0x18, 0x04, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_slq();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_subfi_tl(t1, 32, t1);
+ tcg_gen_shr_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_or_tl(t1, t0, t1);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* sraiq - sraiq. */
GEN_HANDLER(sraiq, 0x1F, 0x18, 0x1D, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_movi_tl(cpu_T[1], SH(ctx->opcode));
- gen_op_POWER_sraq();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ int sh = SH(ctx->opcode);
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ tcg_gen_shli_tl(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh);
+ tcg_gen_or_tl(t0, t0, t1);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
+ tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rS(ctx->opcode)], 0, l1);
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_CA));
+ gen_set_label(l1);
+ tcg_gen_sari_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* sraq - sraq. */
GEN_HANDLER(sraq, 0x1F, 0x18, 0x1C, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_sraq();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_local_new();
+ TCGv t2 = tcg_temp_local_new();
+ tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t2);
+ tcg_gen_sar_tl(t1, cpu_gpr[rS(ctx->opcode)], t2);
+ tcg_gen_subfi_tl(t2, 32, t2);
+ tcg_gen_shl_tl(t2, cpu_gpr[rS(ctx->opcode)], t2);
+ tcg_gen_or_tl(t0, t0, t2);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l1);
+ tcg_gen_mov_tl(t2, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_sari_tl(t1, cpu_gpr[rS(ctx->opcode)], 31);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t1);
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA));
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l2);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l2);
+ tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_CA));
+ gen_set_label(l2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* sre - sre. */
GEN_HANDLER(sre, 0x1F, 0x19, 0x14, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_sre();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_subfi_tl(t1, 32, t1);
+ tcg_gen_shl_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_or_tl(t1, t0, t1);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* srea - srea. */
GEN_HANDLER(srea, 0x1F, 0x19, 0x1C, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_srea();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_rotr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_sar_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* sreq */
GEN_HANDLER(sreq, 0x1F, 0x19, 0x16, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_sreq();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t1, 0xFFFFFFFF);
+ tcg_gen_shr_tl(t1, t1, t0);
+ tcg_gen_rotr_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ gen_load_spr(t2, SPR_MQ);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_and_tl(t0, t0, t1);
+ tcg_gen_andc_tl(t2, t2, t1);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* sriq */
GEN_HANDLER(sriq, 0x1F, 0x18, 0x15, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_movi_tl(cpu_T[1], SH(ctx->opcode));
- gen_op_POWER_srq();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ int sh = SH(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ tcg_gen_shli_tl(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh);
+ tcg_gen_or_tl(t1, t0, t1);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* srliq */
GEN_HANDLER(srliq, 0x1F, 0x18, 0x17, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- tcg_gen_movi_tl(cpu_T[1], SH(ctx->opcode));
- gen_op_POWER_srlq();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ int sh = SH(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_rotri_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ gen_load_spr(t1, SPR_MQ);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_andi_tl(t0, t0, (0xFFFFFFFFU >> sh));
+ tcg_gen_andi_tl(t1, t1, ~(0xFFFFFFFFU >> sh));
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* srlq */
GEN_HANDLER(srlq, 0x1F, 0x18, 0x16, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_srlq();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_local_new();
+ TCGv t2 = tcg_temp_local_new();
+ tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t1, 0xFFFFFFFF);
+ tcg_gen_shr_tl(t2, t1, t2);
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ gen_load_spr(t0, SPR_MQ);
+ tcg_gen_and_tl(cpu_gpr[rA(ctx->opcode)], t0, t2);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t2);
+ tcg_gen_and_tl(t0, t0, t2);
+ gen_load_spr(t1, SPR_MQ);
+ tcg_gen_andc_tl(t1, t1, t2);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ gen_set_label(l2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* srq */
GEN_HANDLER(srq, 0x1F, 0x18, 0x14, 0x00000000, PPC_POWER_BR)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_POWER_srq();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_subfi_tl(t1, 32, t1);
+ tcg_gen_shl_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_or_tl(t1, t0, t1);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* PowerPC 602 specific instructions */
GEN_HANDLER(dsa, 0x1F, 0x14, 0x13, 0x03FFF801, PPC_602_SPEC)
{
/* XXX: TODO */
- GEN_EXCP_INVAL(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
}
/* esa */
GEN_HANDLER(esa, 0x1F, 0x14, 0x12, 0x03FFF801, PPC_602_SPEC)
{
/* XXX: TODO */
- GEN_EXCP_INVAL(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
}
/* mfrom */
GEN_HANDLER(mfrom, 0x1F, 0x09, 0x08, 0x03E0F801, PPC_602_SPEC)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
gen_helper_602_mfrom(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- gen_helper_load_6xx_tlbd(cpu_gpr[rB(ctx->opcode)]);
+ gen_helper_6xx_tlbd(cpu_gpr[rB(ctx->opcode)]);
#endif
}
GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- gen_helper_load_6xx_tlbi(cpu_gpr[rB(ctx->opcode)]);
+ gen_helper_6xx_tlbi(cpu_gpr[rB(ctx->opcode)]);
#endif
}
GEN_HANDLER2(tlbld_74xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_74xx_TLB)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- gen_helper_load_74xx_tlbd(cpu_gpr[rB(ctx->opcode)]);
+ gen_helper_74xx_tlbd(cpu_gpr[rB(ctx->opcode)]);
#endif
}
GEN_HANDLER2(tlbli_74xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_74xx_TLB)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- gen_helper_load_74xx_tlbi(cpu_gpr[rB(ctx->opcode)]);
+ gen_helper_74xx_tlbi(cpu_gpr[rB(ctx->opcode)]);
#endif
}
{
/* Cache line invalidate: privileged and treated as no-op */
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
#endif
GEN_HANDLER(mfsri, 0x1F, 0x13, 0x13, 0x00000001, PPC_POWER)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
- return;
- }
int ra = rA(ctx->opcode);
int rd = rD(ctx->opcode);
-
- gen_addr_reg_index(cpu_T[0], ctx);
- gen_op_POWER_mfsri();
- tcg_gen_mov_tl(cpu_gpr[rd], cpu_T[0]);
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ tcg_gen_shri_tl(t0, t0, 28);
+ tcg_gen_andi_tl(t0, t0, 0xF);
+ gen_helper_load_sr(cpu_gpr[rd], t0);
+ tcg_temp_free(t0);
if (ra != 0 && ra != rd)
- tcg_gen_mov_tl(cpu_gpr[ra], cpu_T[1]);
+ tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rd]);
#endif
}
GEN_HANDLER(rac, 0x1F, 0x12, 0x19, 0x00000001, PPC_POWER)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- gen_addr_reg_index(cpu_T[0], ctx);
- gen_op_POWER_rac();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_rac(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
#endif
}
GEN_HANDLER(rfsvc, 0x13, 0x12, 0x02, 0x03FFF0001, PPC_POWER)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- gen_op_POWER_rfsvc();
- GEN_SYNC(ctx);
+ gen_helper_rfsvc();
+ gen_sync_exception(ctx);
#endif
}
GEN_HANDLER(lfq, 0x38, 0xFF, 0xFF, 0x00000003, PPC_POWER2)
{
int rd = rD(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- gen_addr_imm_index(t0, ctx, 0);
- gen_qemu_ld64(cpu_fpr[rd], t0, ctx->mem_idx);
- tcg_gen_addi_tl(t0, t0, 8);
- gen_qemu_ld64(cpu_fpr[(rd + 1) % 32], t0, ctx->mem_idx);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0);
}
{
int ra = rA(ctx->opcode);
int rd = rD(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- gen_addr_imm_index(t0, ctx, 0);
- gen_qemu_ld64(cpu_fpr[rd], t0, ctx->mem_idx);
- tcg_gen_addi_tl(t1, t0, 8);
- gen_qemu_ld64(cpu_fpr[(rd + 1) % 32], t1, ctx->mem_idx);
+ TCGv t0, t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t1);
if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0);
tcg_temp_free(t0);
{
int ra = rA(ctx->opcode);
int rd = rD(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- gen_addr_reg_index(t0, ctx);
- gen_qemu_ld64(cpu_fpr[rd], t0, ctx->mem_idx);
- tcg_gen_addi_tl(t1, t0, 8);
- gen_qemu_ld64(cpu_fpr[(rd + 1) % 32], t1, ctx->mem_idx);
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ TCGv t0, t1;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
+ t1 = tcg_temp_new();
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ tcg_temp_free(t1);
if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0);
tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/* lfqx */
GEN_HANDLER(lfqx, 0x1F, 0x17, 0x18, 0x00000001, PPC_POWER2)
{
int rd = rD(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- gen_addr_reg_index(t0, ctx);
- gen_qemu_ld64(cpu_fpr[rd], t0, ctx->mem_idx);
- tcg_gen_addi_tl(t0, t0, 8);
- gen_qemu_ld64(cpu_fpr[(rd + 1) % 32], t0, ctx->mem_idx);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0);
}
GEN_HANDLER(stfq, 0x3C, 0xFF, 0xFF, 0x00000003, PPC_POWER2)
{
int rd = rD(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- gen_addr_imm_index(t0, ctx, 0);
- gen_qemu_st64(cpu_fpr[rd], t0, ctx->mem_idx);
- tcg_gen_addi_tl(t0, t0, 8);
- gen_qemu_st64(cpu_fpr[(rd + 1) % 32], t0, ctx->mem_idx);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0);
}
{
int ra = rA(ctx->opcode);
int rd = rD(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- gen_addr_imm_index(t0, ctx, 0);
- gen_qemu_st64(cpu_fpr[rd], t0, ctx->mem_idx);
- tcg_gen_addi_tl(t1, t0, 8);
- gen_qemu_st64(cpu_fpr[(rd + 1) % 32], t1, ctx->mem_idx);
+ TCGv t0, t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ t1 = tcg_temp_new();
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ tcg_temp_free(t1);
if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0);
tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/* stfqux */
{
int ra = rA(ctx->opcode);
int rd = rD(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- gen_addr_reg_index(t0, ctx);
- gen_qemu_st64(cpu_fpr[rd], t0, ctx->mem_idx);
- tcg_gen_addi_tl(t1, t0, 8);
- gen_qemu_st64(cpu_fpr[(rd + 1) % 32], t1, ctx->mem_idx);
+ TCGv t0, t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ t1 = tcg_temp_new();
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ tcg_temp_free(t1);
if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0);
tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/* stfqx */
GEN_HANDLER(stfqx, 0x1F, 0x17, 0x1C, 0x00000001, PPC_POWER2)
{
int rd = rD(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- gen_addr_reg_index(t0, ctx);
- gen_qemu_st64(cpu_fpr[rd], t0, ctx->mem_idx);
- tcg_gen_addi_tl(t0, t0, 8);
- gen_qemu_st64(cpu_fpr[(rd + 1) % 32], t0, ctx->mem_idx);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0);
}
GEN_HANDLER(mfapidi, 0x1F, 0x13, 0x08, 0x0000F801, PPC_MFAPIDI)
{
/* XXX: TODO */
- GEN_EXCP_INVAL(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
}
/* XXX: not implemented on 440 ? */
GEN_HANDLER(tlbiva, 0x1F, 0x12, 0x18, 0x03FFF801, PPC_TLBIVA)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- gen_addr_reg_index(cpu_T[0], ctx);
- /* Use the same micro-ops as for tlbie */
-#if defined(TARGET_PPC64)
- if (ctx->sf_mode)
- gen_op_tlbie_64();
- else
-#endif
- gen_op_tlbie();
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_tlbie(cpu_gpr[rB(ctx->opcode)]);
+ tcg_temp_free(t0);
#endif
}
GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVREG(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
#else
- uint32_t dcrn = SPR(ctx->opcode);
-
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVREG(ctx);
+ TCGv dcrn;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
return;
}
- tcg_gen_movi_tl(cpu_T[0], dcrn);
- gen_op_load_dcr();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ dcrn = tcg_const_tl(SPR(ctx->opcode));
+ gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], dcrn);
+ tcg_temp_free(dcrn);
#endif
}
GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVREG(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
#else
- uint32_t dcrn = SPR(ctx->opcode);
-
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVREG(ctx);
+ TCGv dcrn;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
return;
}
- tcg_gen_movi_tl(cpu_T[0], dcrn);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rS(ctx->opcode)]);
- gen_op_store_dcr();
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ dcrn = tcg_const_tl(SPR(ctx->opcode));
+ gen_helper_store_dcr(dcrn, cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free(dcrn);
#endif
}
GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVREG(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVREG(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
return;
}
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- gen_op_load_dcr();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
/* Note: Rc update flag set leads to undefined state of Rc0 */
#endif
}
GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVREG(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVREG(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
return;
}
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rS(ctx->opcode)]);
- gen_op_store_dcr();
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ gen_helper_store_dcr(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
/* Note: Rc update flag set leads to undefined state of Rc0 */
#endif
}
/* mfdcrux (PPC 460) : user-mode access to DCR */
GEN_HANDLER(mfdcrux, 0x1F, 0x03, 0x09, 0x00000000, PPC_DCRUX)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- gen_op_load_dcr();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
/* Note: Rc update flag set leads to undefined state of Rc0 */
}
/* mtdcrux (PPC 460) : user-mode access to DCR */
GEN_HANDLER(mtdcrux, 0x1F, 0x03, 0x0D, 0x00000000, PPC_DCRUX)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rS(ctx->opcode)]);
- gen_op_store_dcr();
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ gen_helper_store_dcr(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
/* Note: Rc update flag set leads to undefined state of Rc0 */
}
GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
/* interpreted as no-op */
GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
TCGv EA, val;
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
+ gen_set_access_type(ctx, ACCESS_CACHE);
EA = tcg_temp_new();
- gen_set_access_type(ACCESS_CACHE);
- gen_addr_reg_index(EA, ctx);
+ gen_addr_reg_index(ctx, EA);
val = tcg_temp_new();
- gen_qemu_ld32u(val, EA, ctx->mem_idx);
+ gen_qemu_ld32u(ctx, val, EA);
tcg_temp_free(val);
tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA);
tcg_temp_free(EA);
GEN_HANDLER(iccci, 0x1F, 0x06, 0x1E, 0x00000001, PPC_4xx_COMMON)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
/* interpreted as no-op */
GEN_HANDLER(icread, 0x1F, 0x06, 0x1F, 0x03E00001, PPC_4xx_COMMON)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
/* interpreted as no-op */
#endif
}
-/* rfci (supervisor only) */
+/* rfci (mem_idx only) */
GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
/* Restore CPU state */
- gen_op_40x_rfci();
- GEN_SYNC(ctx);
+ gen_helper_40x_rfci();
+ gen_sync_exception(ctx);
#endif
}
GEN_HANDLER(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
/* Restore CPU state */
- gen_op_rfci();
- GEN_SYNC(ctx);
+ gen_helper_rfci();
+ gen_sync_exception(ctx);
#endif
}
GEN_HANDLER(rfdi, 0x13, 0x07, 0x01, 0x03FF8001, PPC_RFDI)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
/* Restore CPU state */
- gen_op_rfdi();
- GEN_SYNC(ctx);
+ gen_helper_rfdi();
+ gen_sync_exception(ctx);
#endif
}
GEN_HANDLER(rfmci, 0x13, 0x06, 0x01, 0x03FF8001, PPC_RFMCI)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
/* Restore CPU state */
- gen_op_rfmci();
- GEN_SYNC(ctx);
+ gen_helper_rfmci();
+ gen_sync_exception(ctx);
#endif
}
GEN_HANDLER2(tlbre_40x, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_40x_TLB)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
switch (rB(ctx->opcode)) {
case 0:
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- gen_op_4xx_tlbre_hi();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
break;
case 1:
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- gen_op_4xx_tlbre_lo();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
break;
default:
- GEN_EXCP_INVAL(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
break;
}
#endif
GEN_HANDLER2(tlbsx_40x, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_40x_TLB)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- gen_addr_reg_index(cpu_T[0], ctx);
- gen_op_4xx_tlbsx();
- if (Rc(ctx->opcode))
- gen_op_4xx_tlbsx_check();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+ if (Rc(ctx->opcode)) {
+ int l1 = gen_new_label();
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_xer);
+ tcg_gen_shri_i32(cpu_crf[0], cpu_crf[0], XER_SO);
+ tcg_gen_andi_i32(cpu_crf[0], cpu_crf[0], 1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
+ gen_set_label(l1);
+ }
#endif
}
GEN_HANDLER2(tlbwe_40x, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_40x_TLB)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
switch (rB(ctx->opcode)) {
case 0:
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rS(ctx->opcode)]);
- gen_op_4xx_tlbwe_hi();
+ gen_helper_4xx_tlbwe_hi(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
break;
case 1:
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rS(ctx->opcode)]);
- gen_op_4xx_tlbwe_lo();
+ gen_helper_4xx_tlbwe_lo(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
break;
default:
- GEN_EXCP_INVAL(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
break;
}
#endif
GEN_HANDLER2(tlbre_440, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_BOOKE)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
switch (rB(ctx->opcode)) {
case 0:
case 1:
case 2:
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- gen_op_440_tlbre(rB(ctx->opcode));
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ {
+ TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode));
+ gen_helper_440_tlbwe(t0, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_temp_free_i32(t0);
+ }
break;
default:
- GEN_EXCP_INVAL(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
break;
}
#endif
GEN_HANDLER2(tlbsx_440, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_BOOKE)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- gen_addr_reg_index(cpu_T[0], ctx);
- gen_op_440_tlbsx();
- if (Rc(ctx->opcode))
- gen_op_4xx_tlbsx_check();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+ if (Rc(ctx->opcode)) {
+ int l1 = gen_new_label();
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_xer);
+ tcg_gen_shri_i32(cpu_crf[0], cpu_crf[0], XER_SO);
+ tcg_gen_andi_i32(cpu_crf[0], cpu_crf[0], 1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
+ gen_set_label(l1);
+ }
#endif
}
GEN_HANDLER2(tlbwe_440, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_BOOKE)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
switch (rB(ctx->opcode)) {
case 0:
case 1:
case 2:
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rS(ctx->opcode)]);
- gen_op_440_tlbwe(rB(ctx->opcode));
+ {
+ TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode));
+ gen_helper_440_tlbwe(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free_i32(t0);
+ }
break;
default:
- GEN_EXCP_INVAL(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
break;
}
#endif
GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ TCGv t0;
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rD(ctx->opcode)]);
- gen_op_wrte();
+ t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE));
+ tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
+ tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
+ tcg_temp_free(t0);
/* Stop translation to have a chance to raise an exception
* if we just set msr_ee to 1
*/
- GEN_STOP(ctx);
+ gen_stop_exception(ctx);
#endif
}
GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000EFC01, PPC_WRTEE)
{
#if defined(CONFIG_USER_ONLY)
- GEN_EXCP_PRIVOPC(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
- if (unlikely(!ctx->supervisor)) {
- GEN_EXCP_PRIVOPC(ctx);
+ if (unlikely(!ctx->mem_idx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
- tcg_gen_movi_tl(cpu_T[0], ctx->opcode & 0x00010000);
- gen_op_wrte();
- /* Stop translation to have a chance to raise an exception
- * if we just set msr_ee to 1
- */
- GEN_STOP(ctx);
+ if (ctx->opcode & 0x00010000) {
+ tcg_gen_ori_tl(cpu_msr, cpu_msr, (1 << MSR_EE));
+ /* Stop translation to have a chance to raise an exception */
+ gen_stop_exception(ctx);
+ } else {
+ tcg_gen_andi_tl(cpu_msr, cpu_msr, (1 << MSR_EE));
+ }
#endif
}
/* dlmzb */
GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
- gen_op_440_dlmzb();
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]);
- tcg_gen_andi_tl(cpu_xer, cpu_xer, ~0x7F);
- tcg_gen_or_tl(cpu_xer, cpu_xer, cpu_T[0]);
- if (Rc(ctx->opcode)) {
- gen_op_440_dlmzb_update_Rc();
- tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_T[0]);
- tcg_gen_andi_i32(cpu_crf[0], cpu_crf[0], 0xf);
- }
+ TCGv_i32 t0 = tcg_const_i32(Rc(ctx->opcode));
+ gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)], t0);
+ tcg_temp_free_i32(t0);
}
/* mbar replaces eieio on 440 */
-GEN_HANDLER(mbar, 0x1F, 0x16, 0x13, 0x001FF801, PPC_BOOKE)
+GEN_HANDLER(mbar, 0x1F, 0x16, 0x1a, 0x001FF801, PPC_BOOKE)
{
/* interpreted as no-op */
}
{ \
TCGv EA; \
if (unlikely(!ctx->altivec_enabled)) { \
- GEN_EXCP_NO_VR(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
return; \
} \
+ gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
- gen_addr_reg_index(EA, ctx); \
+ gen_addr_reg_index(ctx, EA); \
tcg_gen_andi_tl(EA, EA, ~0xf); \
- if (ctx->mem_idx & 1) { \
- gen_qemu_ld64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ if (ctx->le_mode) { \
+ gen_qemu_ld64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \
- gen_qemu_ld64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_ld64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
} else { \
- gen_qemu_ld64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_ld64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \
- gen_qemu_ld64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_ld64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
} \
tcg_temp_free(EA); \
}
{ \
TCGv EA; \
if (unlikely(!ctx->altivec_enabled)) { \
- GEN_EXCP_NO_VR(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
return; \
} \
+ gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
- gen_addr_reg_index(EA, ctx); \
+ gen_addr_reg_index(ctx, EA); \
tcg_gen_andi_tl(EA, EA, ~0xf); \
- if (ctx->mem_idx & 1) { \
- gen_qemu_st64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ if (ctx->le_mode) { \
+ gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \
- gen_qemu_st64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_st64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
} else { \
- gen_qemu_st64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_st64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \
- gen_qemu_st64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
} \
tcg_temp_free(EA); \
}
/* Handler for undefined SPE opcodes */
static always_inline void gen_speundef (DisasContext *ctx)
{
- GEN_EXCP_INVAL(ctx);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
}
/* SPE logic */
static always_inline void gen_##name (DisasContext *ctx) \
{ \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \
static always_inline void gen_##name (DisasContext *ctx) \
{ \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \
static always_inline void gen_##name (DisasContext *ctx) \
{ \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
TCGv_i32 t0 = tcg_temp_local_new_i32(); \
static always_inline void gen_##name (DisasContext *ctx) \
{ \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
tcg_opi(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \
static always_inline void gen_##name (DisasContext *ctx) \
{ \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
TCGv_i32 t0 = tcg_temp_local_new_i32(); \
static always_inline void gen_##name (DisasContext *ctx) \
{ \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); \
static always_inline void gen_##name (DisasContext *ctx) \
{ \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
TCGv_i32 t0 = tcg_temp_local_new_i32(); \
static always_inline void gen_##name (DisasContext *ctx) \
{ \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \
static always_inline void gen_evmergehi (DisasContext *ctx)
{
if (unlikely(!ctx->spe_enabled)) {
- GEN_EXCP_NO_AP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_APU);
return;
}
#if defined(TARGET_PPC64)
static always_inline void gen_##name (DisasContext *ctx) \
{ \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
TCGv_i32 t0 = tcg_temp_local_new_i32(); \
tcg_op(t0, t0, rA(ctx->opcode)); \
tcg_gen_shri_i64(t2, cpu_gpr[rB(ctx->opcode)], 32); \
tcg_gen_trunc_i64_i32(t1, t2); \
- tcg_temp_free_i64(t2); \
+ tcg_temp_free_i64(t2); \
tcg_op(t1, t1, rA(ctx->opcode)); \
tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); \
tcg_temp_free_i32(t0); \
static always_inline void gen_##name (DisasContext *ctx) \
{ \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
static always_inline void gen_##name (DisasContext *ctx) \
{ \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
int l1 = gen_new_label(); \
static always_inline void gen_##name (DisasContext *ctx) \
{ \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
int l1 = gen_new_label(); \
static always_inline void gen_evmergelo (DisasContext *ctx)
{
if (unlikely(!ctx->spe_enabled)) {
- GEN_EXCP_NO_AP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_APU);
return;
}
#if defined(TARGET_PPC64)
static always_inline void gen_evmergehilo (DisasContext *ctx)
{
if (unlikely(!ctx->spe_enabled)) {
- GEN_EXCP_NO_AP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_APU);
return;
}
#if defined(TARGET_PPC64)
static always_inline void gen_evmergelohi (DisasContext *ctx)
{
if (unlikely(!ctx->spe_enabled)) {
- GEN_EXCP_NO_AP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_APU);
return;
}
#if defined(TARGET_PPC64)
GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, PPC_SPE); ////
/* SPE load and stores */
-static always_inline void gen_addr_spe_imm_index (TCGv EA, DisasContext *ctx, int sh)
+static always_inline void gen_addr_spe_imm_index (DisasContext *ctx, TCGv EA, int sh)
{
target_ulong uimm = rB(ctx->opcode);
- if (rA(ctx->opcode) == 0)
+ if (rA(ctx->opcode) == 0) {
tcg_gen_movi_tl(EA, uimm << sh);
- else
+ } else {
tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], uimm << sh);
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(EA, EA);
+ }
+#endif
+ }
}
static always_inline void gen_op_evldd(DisasContext *ctx, TCGv addr)
{
#if defined(TARGET_PPC64)
- gen_qemu_ld64(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], addr);
#else
TCGv_i64 t0 = tcg_temp_new_i64();
- gen_qemu_ld64(t0, addr, ctx->mem_idx);
+ gen_qemu_ld64(ctx, t0, addr);
tcg_gen_trunc_i64_i32(cpu_gpr[rD(ctx->opcode)], t0);
tcg_gen_shri_i64(t0, t0, 32);
tcg_gen_trunc_i64_i32(cpu_gprh[rD(ctx->opcode)], t0);
{
#if defined(TARGET_PPC64)
TCGv t0 = tcg_temp_new();
- gen_qemu_ld32u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld32u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
- tcg_gen_addi_tl(addr, addr, 4);
- gen_qemu_ld32u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 4);
+ gen_qemu_ld32u(ctx, t0, addr);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
tcg_temp_free(t0);
#else
- gen_qemu_ld32u(cpu_gprh[rD(ctx->opcode)], addr, ctx->mem_idx);
- tcg_gen_addi_tl(addr, addr, 4);
- gen_qemu_ld32u(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_ld32u(ctx, cpu_gprh[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 4);
+ gen_qemu_ld32u(ctx, cpu_gpr[rD(ctx->opcode)], addr);
#endif
}
{
TCGv t0 = tcg_temp_new();
#if defined(TARGET_PPC64)
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(t0, t0, 32);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(t0, t0, 16);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
#else
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_or_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
#endif
tcg_temp_free(t0);
static always_inline void gen_op_evlhhesplat(DisasContext *ctx, TCGv addr)
{
TCGv t0 = tcg_temp_new();
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, t0, addr);
#if defined(TARGET_PPC64)
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
tcg_gen_shli_tl(t0, t0, 16);
static always_inline void gen_op_evlhhousplat(DisasContext *ctx, TCGv addr)
{
TCGv t0 = tcg_temp_new();
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, t0, addr);
#if defined(TARGET_PPC64)
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
static always_inline void gen_op_evlhhossplat(DisasContext *ctx, TCGv addr)
{
TCGv t0 = tcg_temp_new();
- gen_qemu_ld16s(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16s(ctx, t0, addr);
#if defined(TARGET_PPC64)
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
tcg_gen_ext32u_tl(t0, t0);
{
TCGv t0 = tcg_temp_new();
#if defined(TARGET_PPC64)
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(t0, t0, 16);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
#else
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
#endif
tcg_temp_free(t0);
{
#if defined(TARGET_PPC64)
TCGv t0 = tcg_temp_new();
- gen_qemu_ld16u(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, cpu_gpr[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(t0, t0, 32);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
tcg_temp_free(t0);
#else
- gen_qemu_ld16u(cpu_gprh[rD(ctx->opcode)], addr, ctx->mem_idx);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, cpu_gprh[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, cpu_gpr[rD(ctx->opcode)], addr);
#endif
}
{
#if defined(TARGET_PPC64)
TCGv t0 = tcg_temp_new();
- gen_qemu_ld16s(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16s(ctx, t0, addr);
tcg_gen_ext32u_tl(cpu_gpr[rD(ctx->opcode)], t0);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16s(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16s(ctx, t0, addr);
tcg_gen_shli_tl(t0, t0, 32);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
tcg_temp_free(t0);
#else
- gen_qemu_ld16s(cpu_gprh[rD(ctx->opcode)], addr, ctx->mem_idx);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16s(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_ld16s(ctx, cpu_gprh[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16s(ctx, cpu_gpr[rD(ctx->opcode)], addr);
#endif
}
static always_inline void gen_op_evlwwsplat(DisasContext *ctx, TCGv addr)
{
TCGv t0 = tcg_temp_new();
- gen_qemu_ld32u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld32u(ctx, t0, addr);
#if defined(TARGET_PPC64)
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
{
TCGv t0 = tcg_temp_new();
#if defined(TARGET_PPC64)
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
tcg_gen_shli_tl(t0, t0, 32);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
tcg_gen_shli_tl(t0, t0, 16);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
#else
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
tcg_gen_or_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
#endif
static always_inline void gen_op_evstdd(DisasContext *ctx, TCGv addr)
{
#if defined(TARGET_PPC64)
- gen_qemu_st64(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_st64(ctx, cpu_gpr[rS(ctx->opcode)], addr);
#else
TCGv_i64 t0 = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(t0, cpu_gpr[rS(ctx->opcode)], cpu_gprh[rS(ctx->opcode)]);
- gen_qemu_st64(t0, addr, ctx->mem_idx);
+ gen_qemu_st64(ctx, t0, addr);
tcg_temp_free_i64(t0);
#endif
}
#if defined(TARGET_PPC64)
TCGv t0 = tcg_temp_new();
tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
- gen_qemu_st32(t0, addr, ctx->mem_idx);
+ gen_qemu_st32(ctx, t0, addr);
tcg_temp_free(t0);
#else
- gen_qemu_st32(cpu_gprh[rS(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_st32(ctx, cpu_gprh[rS(ctx->opcode)], addr);
#endif
- tcg_gen_addi_tl(addr, addr, 4);
- gen_qemu_st32(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 4);
+ gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], addr);
}
static always_inline void gen_op_evstdh(DisasContext *ctx, TCGv addr)
#else
tcg_gen_shri_tl(t0, cpu_gprh[rS(ctx->opcode)], 16);
#endif
- gen_qemu_st16(t0, addr, ctx->mem_idx);
- tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_st16(ctx, t0, addr);
+ gen_addr_add(ctx, addr, addr, 2);
#if defined(TARGET_PPC64)
tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
- gen_qemu_st16(t0, addr, ctx->mem_idx);
+ gen_qemu_st16(ctx, t0, addr);
#else
- gen_qemu_st16(cpu_gprh[rS(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_st16(ctx, cpu_gprh[rS(ctx->opcode)], addr);
#endif
- tcg_gen_addi_tl(addr, addr, 2);
+ gen_addr_add(ctx, addr, addr, 2);
tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
- gen_qemu_st16(t0, addr, ctx->mem_idx);
+ gen_qemu_st16(ctx, t0, addr);
tcg_temp_free(t0);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_st16(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr);
}
static always_inline void gen_op_evstwhe(DisasContext *ctx, TCGv addr)
#else
tcg_gen_shri_tl(t0, cpu_gprh[rS(ctx->opcode)], 16);
#endif
- gen_qemu_st16(t0, addr, ctx->mem_idx);
- tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_st16(ctx, t0, addr);
+ gen_addr_add(ctx, addr, addr, 2);
tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
- gen_qemu_st16(t0, addr, ctx->mem_idx);
+ gen_qemu_st16(ctx, t0, addr);
tcg_temp_free(t0);
}
#if defined(TARGET_PPC64)
TCGv t0 = tcg_temp_new();
tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
- gen_qemu_st16(t0, addr, ctx->mem_idx);
+ gen_qemu_st16(ctx, t0, addr);
tcg_temp_free(t0);
#else
- gen_qemu_st16(cpu_gprh[rS(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_st16(ctx, cpu_gprh[rS(ctx->opcode)], addr);
#endif
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_st16(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr);
}
static always_inline void gen_op_evstwwe(DisasContext *ctx, TCGv addr)
#if defined(TARGET_PPC64)
TCGv t0 = tcg_temp_new();
tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
- gen_qemu_st32(t0, addr, ctx->mem_idx);
+ gen_qemu_st32(ctx, t0, addr);
tcg_temp_free(t0);
#else
- gen_qemu_st32(cpu_gprh[rS(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_st32(ctx, cpu_gprh[rS(ctx->opcode)], addr);
#endif
}
static always_inline void gen_op_evstwwo(DisasContext *ctx, TCGv addr)
{
- gen_qemu_st32(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], addr);
}
#define GEN_SPEOP_LDST(name, opc2, sh) \
-GEN_HANDLER(gen_##name, 0x04, opc2, 0x0C, 0x00000000, PPC_SPE) \
+GEN_HANDLER(name, 0x04, opc2, 0x0C, 0x00000000, PPC_SPE) \
{ \
TCGv t0; \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
+ gen_set_access_type(ctx, ACCESS_INT); \
t0 = tcg_temp_new(); \
if (Rc(ctx->opcode)) { \
- gen_addr_spe_imm_index(t0, ctx, sh); \
+ gen_addr_spe_imm_index(ctx, t0, sh); \
} else { \
- gen_addr_reg_index(t0, ctx); \
+ gen_addr_reg_index(ctx, t0); \
} \
gen_op_##name(ctx, t0); \
tcg_temp_free(t0); \
TCGv_i32 t0, t1; \
TCGv_i64 t2; \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
t0 = tcg_temp_new_i32(); \
static always_inline void gen_##name (DisasContext *ctx) \
{ \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \
{ \
TCGv_i32 t0, t1; \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
t0 = tcg_temp_new_i32(); \
static always_inline void gen_##name (DisasContext *ctx) \
{ \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
static always_inline void gen_##name (DisasContext *ctx) \
{ \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
gen_helper_##name(cpu_gpr[rD(ctx->opcode)], \
{ \
TCGv_i64 t0, t1; \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
t0 = tcg_temp_new_i64(); \
static always_inline void gen_##name (DisasContext *ctx) \
{ \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
{ \
TCGv_i64 t0, t1; \
if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
+ gen_exception(ctx, POWERPC_EXCP_APU); \
return; \
} \
t0 = tcg_temp_new_i64(); \
static always_inline void gen_evfsabs (DisasContext *ctx)
{
if (unlikely(!ctx->spe_enabled)) {
- GEN_EXCP_NO_AP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_APU);
return;
}
#if defined(TARGET_PPC64)
static always_inline void gen_evfsnabs (DisasContext *ctx)
{
if (unlikely(!ctx->spe_enabled)) {
- GEN_EXCP_NO_AP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_APU);
return;
}
#if defined(TARGET_PPC64)
static always_inline void gen_evfsneg (DisasContext *ctx)
{
if (unlikely(!ctx->spe_enabled)) {
- GEN_EXCP_NO_AP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_APU);
return;
}
#if defined(TARGET_PPC64)
static always_inline void gen_efsabs (DisasContext *ctx)
{
if (unlikely(!ctx->spe_enabled)) {
- GEN_EXCP_NO_AP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_APU);
return;
}
tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], (target_long)~0x80000000LL);
static always_inline void gen_efsnabs (DisasContext *ctx)
{
if (unlikely(!ctx->spe_enabled)) {
- GEN_EXCP_NO_AP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_APU);
return;
}
tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000);
static always_inline void gen_efsneg (DisasContext *ctx)
{
if (unlikely(!ctx->spe_enabled)) {
- GEN_EXCP_NO_AP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_APU);
return;
}
tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000);
static always_inline void gen_efdabs (DisasContext *ctx)
{
if (unlikely(!ctx->spe_enabled)) {
- GEN_EXCP_NO_AP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_APU);
return;
}
#if defined(TARGET_PPC64)
static always_inline void gen_efdnabs (DisasContext *ctx)
{
if (unlikely(!ctx->spe_enabled)) {
- GEN_EXCP_NO_AP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_APU);
return;
}
#if defined(TARGET_PPC64)
static always_inline void gen_efdneg (DisasContext *ctx)
{
if (unlikely(!ctx->spe_enabled)) {
- GEN_EXCP_NO_AP(ctx);
+ gen_exception(ctx, POWERPC_EXCP_APU);
return;
}
#if defined(TARGET_PPC64)
if ((i & (RFPL - 1)) == (RFPL - 1))
cpu_fprintf(f, "\n");
}
+ cpu_fprintf(f, "FPSCR %08x\n", env->fpscr);
#if !defined(CONFIG_USER_ONLY)
cpu_fprintf(f, "SRR0 " ADDRX " SRR1 " ADDRX " SDR1 " ADDRX "\n",
env->spr[SPR_SRR0], env->spr[SPR_SRR1], env->sdr1);
opc_handler_t **table, *handler;
target_ulong pc_start;
uint16_t *gen_opc_end;
- int supervisor, little_endian;
CPUBreakpoint *bp;
int j, lj = -1;
int num_insns;
pc_start = tb->pc;
gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
-#if defined(OPTIMIZE_FPRF_UPDATE)
- gen_fprf_ptr = gen_fprf_buf;
-#endif
ctx.nip = pc_start;
ctx.tb = tb;
ctx.exception = POWERPC_EXCP_NONE;
ctx.spr_cb = env->spr_cb;
- supervisor = env->mmu_idx;
-#if !defined(CONFIG_USER_ONLY)
- ctx.supervisor = supervisor;
-#endif
- little_endian = env->hflags & (1 << MSR_LE) ? 1 : 0;
+ ctx.mem_idx = env->mmu_idx;
+ ctx.access_type = -1;
+ ctx.le_mode = env->hflags & (1 << MSR_LE) ? 1 : 0;
#if defined(TARGET_PPC64)
ctx.sf_mode = msr_sf;
- ctx.mem_idx = (supervisor << 2) | (msr_sf << 1) | little_endian;
-#else
- ctx.mem_idx = (supervisor << 1) | little_endian;
#endif
ctx.fpu_enabled = msr_fp;
if ((env->flags & POWERPC_FLAG_SPE) && msr_spe)
if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
TAILQ_FOREACH(bp, &env->breakpoints, entry) {
if (bp->pc == ctx.nip) {
- gen_update_nip(&ctx, ctx.nip);
- gen_helper_raise_debug();
+ gen_debug_exception(ctxp);
break;
}
}
if (loglevel & CPU_LOG_TB_IN_ASM) {
fprintf(logfile, "----------------\n");
fprintf(logfile, "nip=" ADDRX " super=%d ir=%d\n",
- ctx.nip, supervisor, (int)msr_ir);
+ ctx.nip, ctx.mem_idx, (int)msr_ir);
}
#endif
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
gen_io_start();
- if (unlikely(little_endian)) {
+ if (unlikely(ctx.le_mode)) {
ctx.opcode = bswap32(ldl_code(ctx.nip));
} else {
ctx.opcode = ldl_code(ctx.nip);
opc2(ctx.opcode), opc3(ctx.opcode),
ctx.opcode, ctx.nip - 4);
}
- GEN_EXCP_INVAL(ctxp);
+ gen_inval_exception(ctxp, POWERPC_EXCP_INVAL_INVAL);
break;
}
}
ctx.exception != POWERPC_SYSCALL &&
ctx.exception != POWERPC_EXCP_TRAP &&
ctx.exception != POWERPC_EXCP_BRANCH)) {
- GEN_EXCP(ctxp, POWERPC_EXCP_TRACE, 0);
+ gen_exception(ctxp, POWERPC_EXCP_TRACE);
} else if (unlikely(((ctx.nip & (TARGET_PAGE_SIZE - 1)) == 0) ||
(env->singlestep_enabled) ||
num_insns >= max_insns)) {
gen_goto_tb(&ctx, 0, ctx.nip);
} else if (ctx.exception != POWERPC_EXCP_BRANCH) {
if (unlikely(env->singlestep_enabled)) {
- gen_update_nip(&ctx, ctx.nip);
- gen_helper_raise_debug();
+ gen_debug_exception(ctxp);
}
/* Generate the return instruction */
tcg_gen_exit_tb(0);
if (loglevel & CPU_LOG_TB_IN_ASM) {
int flags;
flags = env->bfd_mach;
- flags |= little_endian << 16;
+ flags |= ctx.le_mode << 16;
fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
target_disas(logfile, pc_start, ctx.nip - pc_start, flags);
fprintf(logfile, "\n");