* Copyright (c) 2004-2005 Jocelyn Mayer
* Copyright (c) 2006 Marius Groeger (FPU operations)
* Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support)
+ * Copyright (c) 2009 CodeSourcery (MIPS16 support)
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include <stdarg.h>
//#define MIPS_DEBUG_DISAS
//#define MIPS_DEBUG_SIGN_EXTENSIONS
-//#define MIPS_SINGLE_STEP
/* MIPS major opcodes */
#define MASK_OP_MAJOR(op) (op & (0x3F << 26))
OPC_ADDIU = (0x09 << 26),
OPC_SLTI = (0x0A << 26),
OPC_SLTIU = (0x0B << 26),
+ /* logic with immediate */
OPC_ANDI = (0x0C << 26),
OPC_ORI = (0x0D << 26),
OPC_XORI = (0x0E << 26),
OPC_LUI = (0x0F << 26),
+ /* arithmetic with immediate */
OPC_DADDI = (0x18 << 26),
OPC_DADDIU = (0x19 << 26),
/* Jump and branches */
OPC_LH = (0x21 << 26),
OPC_LWL = (0x22 << 26),
OPC_LW = (0x23 << 26),
+ OPC_LWPC = OPC_LW | 0x5,
OPC_LBU = (0x24 << 26),
OPC_LHU = (0x25 << 26),
OPC_LWR = (0x26 << 26),
OPC_LL = (0x30 << 26),
OPC_LLD = (0x34 << 26),
OPC_LD = (0x37 << 26),
+ OPC_LDPC = OPC_LD | 0x5,
OPC_SC = (0x38 << 26),
OPC_SCD = (0x3C << 26),
OPC_SD = (0x3F << 26),
/* SSNOP is SLL r0, r0, 1 */
/* EHB is SLL r0, r0, 3 */
OPC_SRL = 0x02 | OPC_SPECIAL, /* also ROTR */
+ OPC_ROTR = OPC_SRL | (1 << 21),
OPC_SRA = 0x03 | OPC_SPECIAL,
OPC_SLLV = 0x04 | OPC_SPECIAL,
OPC_SRLV = 0x06 | OPC_SPECIAL, /* also ROTRV */
+ OPC_ROTRV = OPC_SRLV | (1 << 6),
OPC_SRAV = 0x07 | OPC_SPECIAL,
OPC_DSLLV = 0x14 | OPC_SPECIAL,
OPC_DSRLV = 0x16 | OPC_SPECIAL, /* also DROTRV */
+ OPC_DROTRV = OPC_DSRLV | (1 << 6),
OPC_DSRAV = 0x17 | OPC_SPECIAL,
OPC_DSLL = 0x38 | OPC_SPECIAL,
OPC_DSRL = 0x3A | OPC_SPECIAL, /* also DROTR */
+ OPC_DROTR = OPC_DSRL | (1 << 21),
OPC_DSRA = 0x3B | OPC_SPECIAL,
OPC_DSLL32 = 0x3C | OPC_SPECIAL,
OPC_DSRL32 = 0x3E | OPC_SPECIAL, /* also DROTR32 */
+ OPC_DROTR32 = OPC_DSRL32 | (1 << 21),
OPC_DSRA32 = 0x3F | OPC_SPECIAL,
/* Multiplication / division */
OPC_MULT = 0x18 | OPC_SPECIAL,
/* Jumps */
OPC_JR = 0x08 | OPC_SPECIAL, /* Also JR.HB */
OPC_JALR = 0x09 | OPC_SPECIAL, /* Also JALR.HB */
+ OPC_JALRC = OPC_JALR | (0x5 << 6),
/* Traps */
OPC_TGE = 0x30 | OPC_SPECIAL,
OPC_TGEU = 0x31 | OPC_SPECIAL,
OPC_MOVCI = 0x01 | OPC_SPECIAL,
/* Special */
- OPC_PMON = 0x05 | OPC_SPECIAL, /* inofficial */
+ OPC_PMON = 0x05 | OPC_SPECIAL, /* unofficial */
OPC_SYSCALL = 0x0C | OPC_SPECIAL,
OPC_BREAK = 0x0D | OPC_SPECIAL,
- OPC_SPIM = 0x0E | OPC_SPECIAL, /* inofficial */
+ OPC_SPIM = 0x0E | OPC_SPECIAL, /* unofficial */
OPC_SYNC = 0x0F | OPC_SPECIAL,
OPC_SPECIAL15_RESERVED = 0x15 | OPC_SPECIAL,
static TCGv cpu_HI[MIPS_DSP_ACC], cpu_LO[MIPS_DSP_ACC], cpu_ACX[MIPS_DSP_ACC];
static TCGv cpu_dspctrl, btarget, bcond;
static TCGv_i32 hflags;
-static TCGv_i32 fpu_fpr32[32], fpu_fpr32h[32];
static TCGv_i32 fpu_fcr0, fpu_fcr31;
#include "gen-icount.h"
struct TranslationBlock *tb;
target_ulong pc, saved_pc;
uint32_t opcode;
+ int singlestep_enabled;
/* Routine used to access memory */
int mem_idx;
uint32_t hflags, saved_hflags;
"f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
"f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", };
-static const char *fregnames_h[] =
- { "h0", "h1", "h2", "h3", "h4", "h5", "h6", "h7",
- "h8", "h9", "h10", "h11", "h12", "h13", "h14", "h15",
- "h16", "h17", "h18", "h19", "h20", "h21", "h22", "h23",
- "h24", "h25", "h26", "h27", "h28", "h29", "h30", "h31", };
-
#ifdef MIPS_DEBUG_DISAS
-#define MIPS_DEBUG(fmt, args...) \
+#define MIPS_DEBUG(fmt, ...) \
qemu_log_mask(CPU_LOG_TB_IN_ASM, \
TARGET_FMT_lx ": %08x " fmt "\n", \
- ctx->pc, ctx->opcode , ##args)
+ ctx->pc, ctx->opcode , ## __VA_ARGS__)
#define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
#else
-#define MIPS_DEBUG(fmt, args...) do { } while(0)
+#define MIPS_DEBUG(fmt, ...) do { } while(0)
#define LOG_DISAS(...) do { } while (0)
#endif
/* Moves to/from shadow registers. */
static inline void gen_load_srsgpr (int from, int to)
{
- TCGv r_tmp1 = tcg_temp_new();
+ TCGv t0 = tcg_temp_new();
if (from == 0)
- tcg_gen_movi_tl(r_tmp1, 0);
+ tcg_gen_movi_tl(t0, 0);
else {
- TCGv_i32 r_tmp2 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
TCGv_ptr addr = tcg_temp_new_ptr();
- tcg_gen_ld_i32(r_tmp2, cpu_env, offsetof(CPUState, CP0_SRSCtl));
- tcg_gen_shri_i32(r_tmp2, r_tmp2, CP0SRSCtl_PSS);
- tcg_gen_andi_i32(r_tmp2, r_tmp2, 0xf);
- tcg_gen_muli_i32(r_tmp2, r_tmp2, sizeof(target_ulong) * 32);
- tcg_gen_ext_i32_ptr(addr, r_tmp2);
+ tcg_gen_ld_i32(t2, cpu_env, offsetof(CPUState, CP0_SRSCtl));
+ tcg_gen_shri_i32(t2, t2, CP0SRSCtl_PSS);
+ tcg_gen_andi_i32(t2, t2, 0xf);
+ tcg_gen_muli_i32(t2, t2, sizeof(target_ulong) * 32);
+ tcg_gen_ext_i32_ptr(addr, t2);
tcg_gen_add_ptr(addr, cpu_env, addr);
- tcg_gen_ld_tl(r_tmp1, addr, sizeof(target_ulong) * from);
+ tcg_gen_ld_tl(t0, addr, sizeof(target_ulong) * from);
tcg_temp_free_ptr(addr);
- tcg_temp_free_i32(r_tmp2);
+ tcg_temp_free_i32(t2);
}
- gen_store_gpr(r_tmp1, to);
- tcg_temp_free(r_tmp1);
+ gen_store_gpr(t0, to);
+ tcg_temp_free(t0);
}
static inline void gen_store_srsgpr (int from, int to)
{
if (to != 0) {
- TCGv r_tmp1 = tcg_temp_new();
- TCGv_i32 r_tmp2 = tcg_temp_new_i32();
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t2 = tcg_temp_new_i32();
TCGv_ptr addr = tcg_temp_new_ptr();
- gen_load_gpr(r_tmp1, from);
- tcg_gen_ld_i32(r_tmp2, cpu_env, offsetof(CPUState, CP0_SRSCtl));
- tcg_gen_shri_i32(r_tmp2, r_tmp2, CP0SRSCtl_PSS);
- tcg_gen_andi_i32(r_tmp2, r_tmp2, 0xf);
- tcg_gen_muli_i32(r_tmp2, r_tmp2, sizeof(target_ulong) * 32);
- tcg_gen_ext_i32_ptr(addr, r_tmp2);
+ gen_load_gpr(t0, from);
+ tcg_gen_ld_i32(t2, cpu_env, offsetof(CPUState, CP0_SRSCtl));
+ tcg_gen_shri_i32(t2, t2, CP0SRSCtl_PSS);
+ tcg_gen_andi_i32(t2, t2, 0xf);
+ tcg_gen_muli_i32(t2, t2, sizeof(target_ulong) * 32);
+ tcg_gen_ext_i32_ptr(addr, t2);
tcg_gen_add_ptr(addr, cpu_env, addr);
- tcg_gen_st_tl(r_tmp1, addr, sizeof(target_ulong) * to);
+ tcg_gen_st_tl(t0, addr, sizeof(target_ulong) * to);
tcg_temp_free_ptr(addr);
- tcg_temp_free_i32(r_tmp2);
- tcg_temp_free(r_tmp1);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free(t0);
}
}
/* Floating point register moves. */
static inline void gen_load_fpr32 (TCGv_i32 t, int reg)
{
- tcg_gen_mov_i32(t, fpu_fpr32[reg]);
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUState, active_fpu.fpr[reg].w[FP_ENDIAN_IDX]));
}
static inline void gen_store_fpr32 (TCGv_i32 t, int reg)
{
- tcg_gen_mov_i32(fpu_fpr32[reg], t);
+ tcg_gen_st_i32(t, cpu_env, offsetof(CPUState, active_fpu.fpr[reg].w[FP_ENDIAN_IDX]));
+}
+
+static inline void gen_load_fpr32h (TCGv_i32 t, int reg)
+{
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUState, active_fpu.fpr[reg].w[!FP_ENDIAN_IDX]));
+}
+
+static inline void gen_store_fpr32h (TCGv_i32 t, int reg)
+{
+ tcg_gen_st_i32(t, cpu_env, offsetof(CPUState, active_fpu.fpr[reg].w[!FP_ENDIAN_IDX]));
}
static inline void gen_load_fpr64 (DisasContext *ctx, TCGv_i64 t, int reg)
{
if (ctx->hflags & MIPS_HFLAG_F64) {
- tcg_gen_concat_i32_i64(t, fpu_fpr32[reg], fpu_fpr32h[reg]);
+ tcg_gen_ld_i64(t, cpu_env, offsetof(CPUState, active_fpu.fpr[reg].d));
} else {
- tcg_gen_concat_i32_i64(t, fpu_fpr32[reg & ~1], fpu_fpr32[reg | 1]);
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ gen_load_fpr32(t0, reg & ~1);
+ gen_load_fpr32(t1, reg | 1);
+ tcg_gen_concat_i32_i64(t, t0, t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
}
}
static inline void gen_store_fpr64 (DisasContext *ctx, TCGv_i64 t, int reg)
{
if (ctx->hflags & MIPS_HFLAG_F64) {
- tcg_gen_trunc_i64_i32(fpu_fpr32[reg], t);
- tcg_gen_shri_i64(t, t, 32);
- tcg_gen_trunc_i64_i32(fpu_fpr32h[reg], t);
+ tcg_gen_st_i64(t, cpu_env, offsetof(CPUState, active_fpu.fpr[reg].d));
} else {
- tcg_gen_trunc_i64_i32(fpu_fpr32[reg & ~1], t);
- tcg_gen_shri_i64(t, t, 32);
- tcg_gen_trunc_i64_i32(fpu_fpr32[reg | 1], t);
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(t1, t);
+ gen_store_fpr32(t1, reg & ~1);
+ tcg_gen_shri_i64(t0, t, 32);
+ tcg_gen_trunc_i64_i32(t1, t0);
+ gen_store_fpr32(t1, reg | 1);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i64(t0);
}
}
-static inline void gen_load_fpr32h (TCGv_i32 t, int reg)
-{
- tcg_gen_mov_i32(t, fpu_fpr32h[reg]);
-}
-
-static inline void gen_store_fpr32h (TCGv_i32 t, int reg)
-{
- tcg_gen_mov_i32(fpu_fpr32h[reg], t);
-}
-
-static inline void get_fp_cond (TCGv_i32 t)
+static inline int get_fp_bit (int cc)
{
- TCGv_i32 r_tmp1 = tcg_temp_new_i32();
- TCGv_i32 r_tmp2 = tcg_temp_new_i32();
-
- tcg_gen_shri_i32(r_tmp2, fpu_fcr31, 24);
- tcg_gen_andi_i32(r_tmp2, r_tmp2, 0xfe);
- tcg_gen_shri_i32(r_tmp1, fpu_fcr31, 23);
- tcg_gen_andi_i32(r_tmp1, r_tmp1, 0x1);
- tcg_gen_or_i32(t, r_tmp1, r_tmp2);
- tcg_temp_free_i32(r_tmp1);
- tcg_temp_free_i32(r_tmp2);
+ if (cc)
+ return 24 + cc;
+ else
+ return 23;
}
#define FOP_CONDS(type, fmt, bits) \
#undef FOP_CONDS
/* Tests */
-#define OP_COND(name, cond) \
-static inline void glue(gen_op_, name) (TCGv ret, TCGv t0, TCGv t1) \
-{ \
- int l1 = gen_new_label(); \
- int l2 = gen_new_label(); \
- \
- tcg_gen_brcond_tl(cond, t0, t1, l1); \
- tcg_gen_movi_tl(ret, 0); \
- tcg_gen_br(l2); \
- gen_set_label(l1); \
- tcg_gen_movi_tl(ret, 1); \
- gen_set_label(l2); \
-}
-OP_COND(eq, TCG_COND_EQ);
-OP_COND(ne, TCG_COND_NE);
-OP_COND(ge, TCG_COND_GE);
-OP_COND(geu, TCG_COND_GEU);
-OP_COND(lt, TCG_COND_LT);
-OP_COND(ltu, TCG_COND_LTU);
-#undef OP_COND
-
-#define OP_CONDI(name, cond) \
-static inline void glue(gen_op_, name) (TCGv ret, TCGv t0, target_ulong val) \
-{ \
- int l1 = gen_new_label(); \
- int l2 = gen_new_label(); \
- \
- tcg_gen_brcondi_tl(cond, t0, val, l1); \
- tcg_gen_movi_tl(ret, 0); \
- tcg_gen_br(l2); \
- gen_set_label(l1); \
- tcg_gen_movi_tl(ret, 1); \
- gen_set_label(l2); \
-}
-OP_CONDI(lti, TCG_COND_LT);
-OP_CONDI(ltiu, TCG_COND_LTU);
-#undef OP_CONDI
-
-#define OP_CONDZ(name, cond) \
-static inline void glue(gen_op_, name) (TCGv ret, TCGv t0) \
-{ \
- int l1 = gen_new_label(); \
- int l2 = gen_new_label(); \
- \
- tcg_gen_brcondi_tl(cond, t0, 0, l1); \
- tcg_gen_movi_tl(ret, 0); \
- tcg_gen_br(l2); \
- gen_set_label(l1); \
- tcg_gen_movi_tl(ret, 1); \
- gen_set_label(l2); \
-}
-OP_CONDZ(gez, TCG_COND_GE);
-OP_CONDZ(gtz, TCG_COND_GT);
-OP_CONDZ(lez, TCG_COND_LE);
-OP_CONDZ(ltz, TCG_COND_LT);
-#undef OP_CONDZ
-
static inline void gen_save_pc(target_ulong pc)
{
tcg_gen_movi_tl(cpu_PC, pc);
if (ctx->hflags != ctx->saved_hflags) {
tcg_gen_movi_i32(hflags, ctx->hflags);
ctx->saved_hflags = ctx->hflags;
- switch (ctx->hflags & MIPS_HFLAG_BMASK) {
+ switch (ctx->hflags & MIPS_HFLAG_BMASK_BASE) {
case MIPS_HFLAG_BR:
break;
case MIPS_HFLAG_BC:
static inline void restore_cpu_state (CPUState *env, DisasContext *ctx)
{
ctx->saved_hflags = ctx->hflags;
- switch (ctx->hflags & MIPS_HFLAG_BMASK) {
+ switch (ctx->hflags & MIPS_HFLAG_BMASK_BASE) {
case MIPS_HFLAG_BR:
break;
case MIPS_HFLAG_BC:
gen_helper_raise_exception_err(texcp, terr);
tcg_temp_free_i32(terr);
tcg_temp_free_i32(texcp);
- gen_helper_interrupt_restart();
- tcg_gen_exit_tb(0);
}
static inline void
{
save_cpu_state(ctx, 1);
gen_helper_0i(raise_exception, excp);
- gen_helper_interrupt_restart();
- tcg_gen_exit_tb(0);
}
/* Addresses computation */
-static inline void gen_op_addr_add (DisasContext *ctx, TCGv t0, TCGv t1)
+static inline void gen_op_addr_add (DisasContext *ctx, TCGv ret, TCGv arg0, TCGv arg1)
{
- tcg_gen_add_tl(t0, t0, t1);
+ tcg_gen_add_tl(ret, arg0, arg1);
#if defined(TARGET_MIPS64)
/* For compatibility with 32-bit code, data reference in user mode
See the MIPS64 PRA manual, section 4.10. */
if (((ctx->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM) &&
!(ctx->hflags & MIPS_HFLAG_UX)) {
- tcg_gen_ext32s_i64(t0, t0);
+ tcg_gen_ext32s_i64(ret, ret);
}
#endif
}
static inline void check_cp0_enabled(DisasContext *ctx)
{
if (unlikely(!(ctx->hflags & MIPS_HFLAG_CP0)))
- generate_exception_err(ctx, EXCP_CpU, 1);
+ generate_exception_err(ctx, EXCP_CpU, 0);
}
static inline void check_cp1_enabled(DisasContext *ctx)
}
/* load/store instructions. */
-#define OP_LD(insn,fname) \
-static inline void op_ldst_##insn(TCGv t0, DisasContext *ctx) \
-{ \
- tcg_gen_qemu_##fname(t0, t0, ctx->mem_idx); \
+#define OP_LD(insn,fname) \
+static inline void op_ldst_##insn(TCGv ret, TCGv arg1, DisasContext *ctx) \
+{ \
+ tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \
}
OP_LD(lb,ld8s);
OP_LD(lbu,ld8u);
#endif
#undef OP_LD
-#define OP_ST(insn,fname) \
-static inline void op_ldst_##insn(TCGv t0, TCGv t1, DisasContext *ctx) \
-{ \
- tcg_gen_qemu_##fname(t1, t0, ctx->mem_idx); \
+#define OP_ST(insn,fname) \
+static inline void op_ldst_##insn(TCGv arg1, TCGv arg2, DisasContext *ctx) \
+{ \
+ tcg_gen_qemu_##fname(arg1, arg2, ctx->mem_idx); \
}
OP_ST(sb,st8);
OP_ST(sh,st16);
#endif
#undef OP_ST
-#define OP_LD_ATOMIC(insn,fname) \
-static inline void op_ldst_##insn(TCGv t0, TCGv t1, DisasContext *ctx) \
-{ \
- tcg_gen_mov_tl(t1, t0); \
- tcg_gen_qemu_##fname(t0, t0, ctx->mem_idx); \
- tcg_gen_st_tl(t1, cpu_env, offsetof(CPUState, CP0_LLAddr)); \
+#ifdef CONFIG_USER_ONLY
+#define OP_LD_ATOMIC(insn,fname) \
+static inline void op_ldst_##insn(TCGv ret, TCGv arg1, DisasContext *ctx) \
+{ \
+ TCGv t0 = tcg_temp_new(); \
+ tcg_gen_mov_tl(t0, arg1); \
+ tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, lladdr)); \
+ tcg_gen_st_tl(ret, cpu_env, offsetof(CPUState, llval)); \
+ tcg_temp_free(t0); \
+}
+#else
+#define OP_LD_ATOMIC(insn,fname) \
+static inline void op_ldst_##insn(TCGv ret, TCGv arg1, DisasContext *ctx) \
+{ \
+ gen_helper_2i(insn, ret, arg1, ctx->mem_idx); \
}
+#endif
OP_LD_ATOMIC(ll,ld32s);
#if defined(TARGET_MIPS64)
OP_LD_ATOMIC(lld,ld64);
#endif
#undef OP_LD_ATOMIC
-#define OP_ST_ATOMIC(insn,fname,almask) \
-static inline void op_ldst_##insn(TCGv t0, TCGv t1, DisasContext *ctx) \
-{ \
- TCGv r_tmp = tcg_temp_local_new(); \
- int l1 = gen_new_label(); \
- int l2 = gen_new_label(); \
- int l3 = gen_new_label(); \
- \
- tcg_gen_andi_tl(r_tmp, t0, almask); \
- tcg_gen_brcondi_tl(TCG_COND_EQ, r_tmp, 0, l1); \
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, CP0_BadVAddr)); \
- generate_exception(ctx, EXCP_AdES); \
- gen_set_label(l1); \
- tcg_gen_ld_tl(r_tmp, cpu_env, offsetof(CPUState, CP0_LLAddr)); \
- tcg_gen_brcond_tl(TCG_COND_NE, t0, r_tmp, l2); \
- tcg_gen_qemu_##fname(t1, t0, ctx->mem_idx); \
- tcg_gen_movi_tl(t0, 1); \
- tcg_gen_br(l3); \
- gen_set_label(l2); \
- tcg_gen_movi_tl(t0, 0); \
- gen_set_label(l3); \
- tcg_temp_free(r_tmp); \
+#ifdef CONFIG_USER_ONLY
+#define OP_ST_ATOMIC(insn,fname,ldname,almask) \
+static inline void op_ldst_##insn(TCGv arg1, TCGv arg2, int rt, DisasContext *ctx) \
+{ \
+ TCGv t0 = tcg_temp_new(); \
+ int l1 = gen_new_label(); \
+ int l2 = gen_new_label(); \
+ \
+ tcg_gen_andi_tl(t0, arg2, almask); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1); \
+ tcg_gen_st_tl(arg2, cpu_env, offsetof(CPUState, CP0_BadVAddr)); \
+ generate_exception(ctx, EXCP_AdES); \
+ gen_set_label(l1); \
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, lladdr)); \
+ tcg_gen_brcond_tl(TCG_COND_NE, arg2, t0, l2); \
+ tcg_gen_movi_tl(t0, rt | ((almask << 3) & 0x20)); \
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, llreg)); \
+ tcg_gen_st_tl(arg1, cpu_env, offsetof(CPUState, llnewval)); \
+ gen_helper_0i(raise_exception, EXCP_SC); \
+ gen_set_label(l2); \
+ tcg_gen_movi_tl(t0, 0); \
+ gen_store_gpr(t0, rt); \
+ tcg_temp_free(t0); \
+}
+#else
+#define OP_ST_ATOMIC(insn,fname,ldname,almask) \
+static inline void op_ldst_##insn(TCGv arg1, TCGv arg2, int rt, DisasContext *ctx) \
+{ \
+ TCGv t0 = tcg_temp_new(); \
+ gen_helper_3i(insn, t0, arg1, arg2, ctx->mem_idx); \
+ gen_store_gpr(t0, rt); \
+ tcg_temp_free(t0); \
}
-OP_ST_ATOMIC(sc,st32,0x3);
+#endif
+OP_ST_ATOMIC(sc,st32,ld32s,0x3);
#if defined(TARGET_MIPS64)
-OP_ST_ATOMIC(scd,st64,0x7);
+OP_ST_ATOMIC(scd,st64,ld64,0x7);
#endif
#undef OP_ST_ATOMIC
+static void gen_base_offset_addr (DisasContext *ctx, TCGv addr,
+ int base, int16_t offset)
+{
+ if (base == 0) {
+ tcg_gen_movi_tl(addr, offset);
+ } else if (offset == 0) {
+ gen_load_gpr(addr, base);
+ } else {
+ tcg_gen_movi_tl(addr, offset);
+ gen_op_addr_add(ctx, addr, cpu_gpr[base], addr);
+ }
+}
+
+static target_ulong pc_relative_pc (DisasContext *ctx)
+{
+ target_ulong pc = ctx->pc;
+
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+ int branch_bytes = ctx->hflags & MIPS_HFLAG_BDS16 ? 2 : 4;
+
+ pc -= branch_bytes;
+ }
+
+ pc &= ~(target_ulong)3;
+ return pc;
+}
+
/* Load and store */
static void gen_ldst (DisasContext *ctx, uint32_t opc, int rt,
int base, int16_t offset)
{
const char *opn = "ldst";
- TCGv t0 = tcg_temp_local_new();
- TCGv t1 = tcg_temp_local_new();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
- if (base == 0) {
- tcg_gen_movi_tl(t0, offset);
- } else if (offset == 0) {
- gen_load_gpr(t0, base);
- } else {
- tcg_gen_movi_tl(t0, offset);
- gen_op_addr_add(ctx, t0, cpu_gpr[base]);
- }
+ gen_base_offset_addr(ctx, t0, base, offset);
/* Don't do NOP if destination is zero: we must perform the actual
memory access. */
switch (opc) {
#if defined(TARGET_MIPS64)
case OPC_LWU:
- op_ldst_lwu(t0, ctx);
+ save_cpu_state(ctx, 0);
+ op_ldst_lwu(t0, t0, ctx);
gen_store_gpr(t0, rt);
opn = "lwu";
break;
case OPC_LD:
- op_ldst_ld(t0, ctx);
+ save_cpu_state(ctx, 0);
+ op_ldst_ld(t0, t0, ctx);
gen_store_gpr(t0, rt);
opn = "ld";
break;
case OPC_LLD:
- op_ldst_lld(t0, t1, ctx);
+ save_cpu_state(ctx, 0);
+ op_ldst_lld(t0, t0, ctx);
gen_store_gpr(t0, rt);
opn = "lld";
break;
case OPC_SD:
+ save_cpu_state(ctx, 0);
gen_load_gpr(t1, rt);
- op_ldst_sd(t0, t1, ctx);
+ op_ldst_sd(t1, t0, ctx);
opn = "sd";
break;
- case OPC_SCD:
- save_cpu_state(ctx, 1);
- gen_load_gpr(t1, rt);
- op_ldst_scd(t0, t1, ctx);
- gen_store_gpr(t0, rt);
- opn = "scd";
- break;
case OPC_LDL:
save_cpu_state(ctx, 1);
gen_load_gpr(t1, rt);
- gen_helper_3i(ldl, t1, t0, t1, ctx->mem_idx);
+ gen_helper_3i(ldl, t1, t1, t0, ctx->mem_idx);
gen_store_gpr(t1, rt);
opn = "ldl";
break;
case OPC_SDL:
save_cpu_state(ctx, 1);
gen_load_gpr(t1, rt);
- gen_helper_2i(sdl, t0, t1, ctx->mem_idx);
+ gen_helper_2i(sdl, t1, t0, ctx->mem_idx);
opn = "sdl";
break;
case OPC_LDR:
save_cpu_state(ctx, 1);
gen_load_gpr(t1, rt);
- gen_helper_3i(ldr, t1, t0, t1, ctx->mem_idx);
+ gen_helper_3i(ldr, t1, t1, t0, ctx->mem_idx);
gen_store_gpr(t1, rt);
opn = "ldr";
break;
case OPC_SDR:
save_cpu_state(ctx, 1);
gen_load_gpr(t1, rt);
- gen_helper_2i(sdr, t0, t1, ctx->mem_idx);
+ gen_helper_2i(sdr, t1, t0, ctx->mem_idx);
opn = "sdr";
break;
+ case OPC_LDPC:
+ save_cpu_state(ctx, 1);
+ tcg_gen_movi_tl(t1, pc_relative_pc(ctx));
+ gen_op_addr_add(ctx, t0, t0, t1);
+ op_ldst_ld(t0, t0, ctx);
+ gen_store_gpr(t0, rt);
+ break;
#endif
+ case OPC_LWPC:
+ save_cpu_state(ctx, 1);
+ tcg_gen_movi_tl(t1, pc_relative_pc(ctx));
+ gen_op_addr_add(ctx, t0, t0, t1);
+ op_ldst_lw(t0, t0, ctx);
+ gen_store_gpr(t0, rt);
+ break;
case OPC_LW:
- op_ldst_lw(t0, ctx);
+ save_cpu_state(ctx, 0);
+ op_ldst_lw(t0, t0, ctx);
gen_store_gpr(t0, rt);
opn = "lw";
break;
case OPC_SW:
+ save_cpu_state(ctx, 0);
gen_load_gpr(t1, rt);
- op_ldst_sw(t0, t1, ctx);
+ op_ldst_sw(t1, t0, ctx);
opn = "sw";
break;
case OPC_LH:
- op_ldst_lh(t0, ctx);
+ save_cpu_state(ctx, 0);
+ op_ldst_lh(t0, t0, ctx);
gen_store_gpr(t0, rt);
opn = "lh";
break;
case OPC_SH:
+ save_cpu_state(ctx, 0);
gen_load_gpr(t1, rt);
- op_ldst_sh(t0, t1, ctx);
+ op_ldst_sh(t1, t0, ctx);
opn = "sh";
break;
case OPC_LHU:
- op_ldst_lhu(t0, ctx);
+ save_cpu_state(ctx, 0);
+ op_ldst_lhu(t0, t0, ctx);
gen_store_gpr(t0, rt);
opn = "lhu";
break;
case OPC_LB:
- op_ldst_lb(t0, ctx);
+ save_cpu_state(ctx, 0);
+ op_ldst_lb(t0, t0, ctx);
gen_store_gpr(t0, rt);
opn = "lb";
break;
case OPC_SB:
+ save_cpu_state(ctx, 0);
gen_load_gpr(t1, rt);
- op_ldst_sb(t0, t1, ctx);
+ op_ldst_sb(t1, t0, ctx);
opn = "sb";
break;
case OPC_LBU:
- op_ldst_lbu(t0, ctx);
+ save_cpu_state(ctx, 0);
+ op_ldst_lbu(t0, t0, ctx);
gen_store_gpr(t0, rt);
opn = "lbu";
break;
case OPC_LWL:
save_cpu_state(ctx, 1);
gen_load_gpr(t1, rt);
- gen_helper_3i(lwl, t1, t0, t1, ctx->mem_idx);
+ gen_helper_3i(lwl, t1, t1, t0, ctx->mem_idx);
gen_store_gpr(t1, rt);
opn = "lwl";
break;
case OPC_SWL:
save_cpu_state(ctx, 1);
gen_load_gpr(t1, rt);
- gen_helper_2i(swl, t0, t1, ctx->mem_idx);
+ gen_helper_2i(swl, t1, t0, ctx->mem_idx);
opn = "swr";
break;
case OPC_LWR:
save_cpu_state(ctx, 1);
gen_load_gpr(t1, rt);
- gen_helper_3i(lwr, t1, t0, t1, ctx->mem_idx);
+ gen_helper_3i(lwr, t1, t1, t0, ctx->mem_idx);
gen_store_gpr(t1, rt);
opn = "lwr";
break;
case OPC_SWR:
save_cpu_state(ctx, 1);
gen_load_gpr(t1, rt);
- gen_helper_2i(swr, t0, t1, ctx->mem_idx);
+ gen_helper_2i(swr, t1, t0, ctx->mem_idx);
opn = "swr";
break;
case OPC_LL:
- op_ldst_ll(t0, t1, ctx);
+ save_cpu_state(ctx, 1);
+ op_ldst_ll(t0, t0, ctx);
gen_store_gpr(t0, rt);
opn = "ll";
break;
+ }
+ MIPS_DEBUG("%s %s, %d(%s)", opn, regnames[rt], offset, regnames[base]);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/* Store conditional */
+static void gen_st_cond (DisasContext *ctx, uint32_t opc, int rt,
+ int base, int16_t offset)
+{
+ const char *opn = "st_cond";
+ TCGv t0, t1;
+
+ t0 = tcg_temp_local_new();
+
+ gen_base_offset_addr(ctx, t0, base, offset);
+ /* Don't do NOP if destination is zero: we must perform the actual
+ memory access. */
+
+ t1 = tcg_temp_local_new();
+ gen_load_gpr(t1, rt);
+ switch (opc) {
+#if defined(TARGET_MIPS64)
+ case OPC_SCD:
+ save_cpu_state(ctx, 0);
+ op_ldst_scd(t1, t0, rt, ctx);
+ opn = "scd";
+ break;
+#endif
case OPC_SC:
save_cpu_state(ctx, 1);
- gen_load_gpr(t1, rt);
- op_ldst_sc(t0, t1, ctx);
- gen_store_gpr(t0, rt);
+ op_ldst_sc(t1, t0, rt, ctx);
opn = "sc";
break;
- default:
- MIPS_INVAL(opn);
- generate_exception(ctx, EXCP_RI);
- goto out;
}
MIPS_DEBUG("%s %s, %d(%s)", opn, regnames[rt], offset, regnames[base]);
- out:
- tcg_temp_free(t0);
tcg_temp_free(t1);
+ tcg_temp_free(t0);
}
/* Load and store */
int base, int16_t offset)
{
const char *opn = "flt_ldst";
- TCGv t0 = tcg_temp_local_new();
+ TCGv t0 = tcg_temp_new();
- if (base == 0) {
- tcg_gen_movi_tl(t0, offset);
- } else if (offset == 0) {
- gen_load_gpr(t0, base);
- } else {
- tcg_gen_movi_tl(t0, offset);
- gen_op_addr_add(ctx, t0, cpu_gpr[base]);
- }
+ gen_base_offset_addr(ctx, t0, base, offset);
/* Don't do NOP if destination is zero: we must perform the actual
memory access. */
switch (opc) {
case OPC_LWC1:
{
TCGv_i32 fp0 = tcg_temp_new_i32();
- TCGv t1 = tcg_temp_new();
- tcg_gen_qemu_ld32s(t1, t0, ctx->mem_idx);
- tcg_gen_trunc_tl_i32(fp0, t1);
+ tcg_gen_qemu_ld32s(t0, t0, ctx->mem_idx);
+ tcg_gen_trunc_tl_i32(fp0, t0);
gen_store_fpr32(fp0, ft);
- tcg_temp_free(t1);
tcg_temp_free_i32(fp0);
}
opn = "lwc1";
static void gen_arith_imm (CPUState *env, DisasContext *ctx, uint32_t opc,
int rt, int rs, int16_t imm)
{
- target_ulong uimm;
+ target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */
const char *opn = "imm arith";
- TCGv t0 = tcg_temp_local_new();
if (rt == 0 && opc != OPC_ADDI && opc != OPC_DADDI) {
/* If no destination, treat it as a NOP.
For addi, we must generate the overflow exception when needed. */
MIPS_DEBUG("NOP");
- goto out;
- }
- uimm = (uint16_t)imm;
- switch (opc) {
- case OPC_ADDI:
- case OPC_ADDIU:
-#if defined(TARGET_MIPS64)
- case OPC_DADDI:
- case OPC_DADDIU:
-#endif
- case OPC_SLTI:
- case OPC_SLTIU:
- uimm = (target_long)imm; /* Sign extend to 32/64 bits */
- /* Fall through. */
- case OPC_ANDI:
- case OPC_ORI:
- case OPC_XORI:
- gen_load_gpr(t0, rs);
- break;
- case OPC_LUI:
- tcg_gen_movi_tl(t0, imm << 16);
- break;
- case OPC_SLL:
- case OPC_SRA:
- case OPC_SRL:
-#if defined(TARGET_MIPS64)
- case OPC_DSLL:
- case OPC_DSRA:
- case OPC_DSRL:
- case OPC_DSLL32:
- case OPC_DSRA32:
- case OPC_DSRL32:
-#endif
- uimm &= 0x1f;
- gen_load_gpr(t0, rs);
- break;
+ return;
}
switch (opc) {
case OPC_ADDI:
{
- TCGv r_tmp1 = tcg_temp_new();
- TCGv r_tmp2 = tcg_temp_new();
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
int l1 = gen_new_label();
- save_cpu_state(ctx, 1);
- tcg_gen_ext32s_tl(r_tmp1, t0);
- tcg_gen_addi_tl(t0, r_tmp1, uimm);
-
- tcg_gen_xori_tl(r_tmp1, r_tmp1, ~uimm);
- tcg_gen_xori_tl(r_tmp2, t0, uimm);
- tcg_gen_and_tl(r_tmp1, r_tmp1, r_tmp2);
- tcg_temp_free(r_tmp2);
- tcg_gen_brcondi_tl(TCG_COND_GE, r_tmp1, 0, l1);
+ gen_load_gpr(t1, rs);
+ tcg_gen_addi_tl(t0, t1, uimm);
+ tcg_gen_ext32s_tl(t0, t0);
+
+ tcg_gen_xori_tl(t1, t1, ~uimm);
+ tcg_gen_xori_tl(t2, t0, uimm);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_temp_free(t1);
/* operands of same sign, result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
- tcg_temp_free(r_tmp1);
-
tcg_gen_ext32s_tl(t0, t0);
+ gen_store_gpr(t0, rt);
+ tcg_temp_free(t0);
}
opn = "addi";
break;
case OPC_ADDIU:
- tcg_gen_addi_tl(t0, t0, uimm);
- tcg_gen_ext32s_tl(t0, t0);
+ if (rs != 0) {
+ tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
+ }
opn = "addiu";
break;
#if defined(TARGET_MIPS64)
case OPC_DADDI:
{
- TCGv r_tmp1 = tcg_temp_new();
- TCGv r_tmp2 = tcg_temp_new();
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
int l1 = gen_new_label();
- save_cpu_state(ctx, 1);
- tcg_gen_mov_tl(r_tmp1, t0);
- tcg_gen_addi_tl(t0, t0, uimm);
-
- tcg_gen_xori_tl(r_tmp1, r_tmp1, ~uimm);
- tcg_gen_xori_tl(r_tmp2, t0, uimm);
- tcg_gen_and_tl(r_tmp1, r_tmp1, r_tmp2);
- tcg_temp_free(r_tmp2);
- tcg_gen_brcondi_tl(TCG_COND_GE, r_tmp1, 0, l1);
+ gen_load_gpr(t1, rs);
+ tcg_gen_addi_tl(t0, t1, uimm);
+
+ tcg_gen_xori_tl(t1, t1, ~uimm);
+ tcg_gen_xori_tl(t2, t0, uimm);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_temp_free(t1);
/* operands of same sign, result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
- tcg_temp_free(r_tmp1);
+ gen_store_gpr(t0, rt);
+ tcg_temp_free(t0);
}
opn = "daddi";
break;
case OPC_DADDIU:
- tcg_gen_addi_tl(t0, t0, uimm);
+ if (rs != 0) {
+ tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
+ }
opn = "daddiu";
break;
#endif
- case OPC_SLTI:
- gen_op_lti(t0, t0, uimm);
- opn = "slti";
- break;
- case OPC_SLTIU:
- gen_op_ltiu(t0, t0, uimm);
- opn = "sltiu";
- break;
+ }
+ MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt], regnames[rs], uimm);
+}
+
+/* Logic with immediate operand */
+static void gen_logic_imm (CPUState *env, uint32_t opc, int rt, int rs, int16_t imm)
+{
+ target_ulong uimm;
+ const char *opn = "imm logic";
+
+ if (rt == 0) {
+ /* If no destination, treat it as a NOP. */
+ MIPS_DEBUG("NOP");
+ return;
+ }
+ uimm = (uint16_t)imm;
+ switch (opc) {
case OPC_ANDI:
- tcg_gen_andi_tl(t0, t0, uimm);
+ if (likely(rs != 0))
+ tcg_gen_andi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ else
+ tcg_gen_movi_tl(cpu_gpr[rt], 0);
opn = "andi";
break;
case OPC_ORI:
- tcg_gen_ori_tl(t0, t0, uimm);
+ if (rs != 0)
+ tcg_gen_ori_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ else
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
opn = "ori";
break;
case OPC_XORI:
- tcg_gen_xori_tl(t0, t0, uimm);
+ if (likely(rs != 0))
+ tcg_gen_xori_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ else
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
opn = "xori";
break;
case OPC_LUI:
+ tcg_gen_movi_tl(cpu_gpr[rt], imm << 16);
opn = "lui";
break;
+ }
+ MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt], regnames[rs], uimm);
+}
+
+/* Set on less than with immediate operand */
+static void gen_slt_imm (CPUState *env, uint32_t opc, int rt, int rs, int16_t imm)
+{
+ target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */
+ const char *opn = "imm arith";
+ TCGv t0;
+
+ if (rt == 0) {
+ /* If no destination, treat it as a NOP. */
+ MIPS_DEBUG("NOP");
+ return;
+ }
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ switch (opc) {
+ case OPC_SLTI:
+ tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr[rt], t0, uimm);
+ opn = "slti";
+ break;
+ case OPC_SLTIU:
+ tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr[rt], t0, uimm);
+ opn = "sltiu";
+ break;
+ }
+ MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt], regnames[rs], uimm);
+ tcg_temp_free(t0);
+}
+
+/* Shifts with immediate operand */
+static void gen_shift_imm(CPUState *env, DisasContext *ctx, uint32_t opc,
+ int rt, int rs, int16_t imm)
+{
+ target_ulong uimm = ((uint16_t)imm) & 0x1f;
+ const char *opn = "imm shift";
+ TCGv t0;
+
+ if (rt == 0) {
+ /* If no destination, treat it as a NOP. */
+ MIPS_DEBUG("NOP");
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ switch (opc) {
case OPC_SLL:
tcg_gen_shli_tl(t0, t0, uimm);
- tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
opn = "sll";
break;
case OPC_SRA:
- tcg_gen_ext32s_tl(t0, t0);
- tcg_gen_sari_tl(t0, t0, uimm);
+ tcg_gen_sari_tl(cpu_gpr[rt], t0, uimm);
opn = "sra";
break;
case OPC_SRL:
- switch ((ctx->opcode >> 21) & 0x1f) {
- case 0:
- if (uimm != 0) {
- tcg_gen_ext32u_tl(t0, t0);
- tcg_gen_shri_tl(t0, t0, uimm);
- } else {
- tcg_gen_ext32s_tl(t0, t0);
- }
- opn = "srl";
- break;
- case 1:
- /* rotr is decoded as srl on non-R2 CPUs */
- if (env->insn_flags & ISA_MIPS32R2) {
- if (uimm != 0) {
- TCGv_i32 r_tmp1 = tcg_temp_new_i32();
-
- tcg_gen_trunc_tl_i32(r_tmp1, t0);
- tcg_gen_rotri_i32(r_tmp1, r_tmp1, uimm);
- tcg_gen_ext_i32_tl(t0, r_tmp1);
- tcg_temp_free_i32(r_tmp1);
- }
- opn = "rotr";
- } else {
- if (uimm != 0) {
- tcg_gen_ext32u_tl(t0, t0);
- tcg_gen_shri_tl(t0, t0, uimm);
- } else {
- tcg_gen_ext32s_tl(t0, t0);
- }
- opn = "srl";
- }
- break;
- default:
- MIPS_INVAL("invalid srl flag");
- generate_exception(ctx, EXCP_RI);
- break;
+ if (uimm != 0) {
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_shri_tl(cpu_gpr[rt], t0, uimm);
+ } else {
+ tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
+ }
+ opn = "srl";
+ break;
+ case OPC_ROTR:
+ if (uimm != 0) {
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t1, t0);
+ tcg_gen_rotri_i32(t1, t1, uimm);
+ tcg_gen_ext_i32_tl(cpu_gpr[rt], t1);
+ tcg_temp_free_i32(t1);
+ } else {
+ tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
}
+ opn = "rotr";
break;
#if defined(TARGET_MIPS64)
case OPC_DSLL:
- tcg_gen_shli_tl(t0, t0, uimm);
+ tcg_gen_shli_tl(cpu_gpr[rt], t0, uimm);
opn = "dsll";
break;
case OPC_DSRA:
- tcg_gen_sari_tl(t0, t0, uimm);
+ tcg_gen_sari_tl(cpu_gpr[rt], t0, uimm);
opn = "dsra";
break;
case OPC_DSRL:
- switch ((ctx->opcode >> 21) & 0x1f) {
- case 0:
- tcg_gen_shri_tl(t0, t0, uimm);
- opn = "dsrl";
- break;
- case 1:
- /* drotr is decoded as dsrl on non-R2 CPUs */
- if (env->insn_flags & ISA_MIPS32R2) {
- if (uimm != 0) {
- tcg_gen_rotri_tl(t0, t0, uimm);
- }
- opn = "drotr";
- } else {
- tcg_gen_shri_tl(t0, t0, uimm);
- opn = "dsrl";
- }
- break;
- default:
- MIPS_INVAL("invalid dsrl flag");
- generate_exception(ctx, EXCP_RI);
- break;
+ tcg_gen_shri_tl(cpu_gpr[rt], t0, uimm);
+ opn = "dsrl";
+ break;
+ case OPC_DROTR:
+ if (uimm != 0) {
+ tcg_gen_rotri_tl(cpu_gpr[rt], t0, uimm);
+ } else {
+ tcg_gen_mov_tl(cpu_gpr[rt], t0);
}
+ opn = "drotr";
break;
case OPC_DSLL32:
- tcg_gen_shli_tl(t0, t0, uimm + 32);
+ tcg_gen_shli_tl(cpu_gpr[rt], t0, uimm + 32);
opn = "dsll32";
break;
case OPC_DSRA32:
- tcg_gen_sari_tl(t0, t0, uimm + 32);
+ tcg_gen_sari_tl(cpu_gpr[rt], t0, uimm + 32);
opn = "dsra32";
break;
case OPC_DSRL32:
- switch ((ctx->opcode >> 21) & 0x1f) {
- case 0:
- tcg_gen_shri_tl(t0, t0, uimm + 32);
- opn = "dsrl32";
- break;
- case 1:
- /* drotr32 is decoded as dsrl32 on non-R2 CPUs */
- if (env->insn_flags & ISA_MIPS32R2) {
- tcg_gen_rotri_tl(t0, t0, uimm + 32);
- opn = "drotr32";
- } else {
- tcg_gen_shri_tl(t0, t0, uimm + 32);
- opn = "dsrl32";
- }
- break;
- default:
- MIPS_INVAL("invalid dsrl32 flag");
- generate_exception(ctx, EXCP_RI);
- break;
- }
+ tcg_gen_shri_tl(cpu_gpr[rt], t0, uimm + 32);
+ opn = "dsrl32";
+ break;
+ case OPC_DROTR32:
+ tcg_gen_rotri_tl(cpu_gpr[rt], t0, uimm + 32);
+ opn = "drotr32";
break;
#endif
- default:
- MIPS_INVAL(opn);
- generate_exception(ctx, EXCP_RI);
- goto out;
}
- gen_store_gpr(t0, rt);
MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt], regnames[rs], uimm);
- out:
tcg_temp_free(t0);
}
int rd, int rs, int rt)
{
const char *opn = "arith";
- TCGv t0 = tcg_temp_local_new();
- TCGv t1 = tcg_temp_local_new();
if (rd == 0 && opc != OPC_ADD && opc != OPC_SUB
&& opc != OPC_DADD && opc != OPC_DSUB) {
/* If no destination, treat it as a NOP.
For add & sub, we must generate the overflow exception when needed. */
MIPS_DEBUG("NOP");
- goto out;
- }
- gen_load_gpr(t0, rs);
- /* Specialcase the conventional move operation. */
- if (rt == 0 && (opc == OPC_ADDU || opc == OPC_DADDU
- || opc == OPC_SUBU || opc == OPC_DSUBU)) {
- gen_store_gpr(t0, rd);
- goto out;
+ return;
}
- gen_load_gpr(t1, rt);
+
switch (opc) {
case OPC_ADD:
{
- TCGv r_tmp1 = tcg_temp_new();
- TCGv r_tmp2 = tcg_temp_new();
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
int l1 = gen_new_label();
- save_cpu_state(ctx, 1);
- tcg_gen_ext32s_tl(r_tmp1, t0);
- tcg_gen_ext32s_tl(r_tmp2, t1);
- tcg_gen_add_tl(t0, r_tmp1, r_tmp2);
-
- tcg_gen_xor_tl(r_tmp1, r_tmp1, t1);
- tcg_gen_xori_tl(r_tmp1, r_tmp1, -1);
- tcg_gen_xor_tl(r_tmp2, t0, t1);
- tcg_gen_and_tl(r_tmp1, r_tmp1, r_tmp2);
- tcg_temp_free(r_tmp2);
- tcg_gen_brcondi_tl(TCG_COND_GE, r_tmp1, 0, l1);
+ gen_load_gpr(t1, rs);
+ gen_load_gpr(t2, rt);
+ tcg_gen_add_tl(t0, t1, t2);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_xor_tl(t1, t1, t2);
+ tcg_gen_xor_tl(t2, t0, t2);
+ tcg_gen_andc_tl(t1, t2, t1);
+ tcg_temp_free(t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_temp_free(t1);
/* operands of same sign, result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
- tcg_temp_free(r_tmp1);
-
- tcg_gen_ext32s_tl(t0, t0);
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
}
opn = "add";
break;
case OPC_ADDU:
- tcg_gen_add_tl(t0, t0, t1);
- tcg_gen_ext32s_tl(t0, t0);
+ if (rs != 0 && rt != 0) {
+ tcg_gen_add_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
opn = "addu";
break;
case OPC_SUB:
{
- TCGv r_tmp1 = tcg_temp_new();
- TCGv r_tmp2 = tcg_temp_new();
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
int l1 = gen_new_label();
- save_cpu_state(ctx, 1);
- tcg_gen_ext32s_tl(r_tmp1, t0);
- tcg_gen_ext32s_tl(r_tmp2, t1);
- tcg_gen_sub_tl(t0, r_tmp1, r_tmp2);
-
- tcg_gen_xor_tl(r_tmp2, r_tmp1, t1);
- tcg_gen_xor_tl(r_tmp1, r_tmp1, t0);
- tcg_gen_and_tl(r_tmp1, r_tmp1, r_tmp2);
- tcg_temp_free(r_tmp2);
- tcg_gen_brcondi_tl(TCG_COND_GE, r_tmp1, 0, l1);
+ gen_load_gpr(t1, rs);
+ gen_load_gpr(t2, rt);
+ tcg_gen_sub_tl(t0, t1, t2);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_xor_tl(t2, t1, t2);
+ tcg_gen_xor_tl(t1, t0, t1);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_temp_free(t1);
/* operands of different sign, first operand and result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
- tcg_temp_free(r_tmp1);
-
- tcg_gen_ext32s_tl(t0, t0);
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
}
opn = "sub";
break;
case OPC_SUBU:
- tcg_gen_sub_tl(t0, t0, t1);
- tcg_gen_ext32s_tl(t0, t0);
+ if (rs != 0 && rt != 0) {
+ tcg_gen_sub_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_neg_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
opn = "subu";
break;
#if defined(TARGET_MIPS64)
case OPC_DADD:
{
- TCGv r_tmp1 = tcg_temp_new();
- TCGv r_tmp2 = tcg_temp_new();
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
int l1 = gen_new_label();
- save_cpu_state(ctx, 1);
- tcg_gen_mov_tl(r_tmp1, t0);
- tcg_gen_add_tl(t0, t0, t1);
-
- tcg_gen_xor_tl(r_tmp1, r_tmp1, t1);
- tcg_gen_xori_tl(r_tmp1, r_tmp1, -1);
- tcg_gen_xor_tl(r_tmp2, t0, t1);
- tcg_gen_and_tl(r_tmp1, r_tmp1, r_tmp2);
- tcg_temp_free(r_tmp2);
- tcg_gen_brcondi_tl(TCG_COND_GE, r_tmp1, 0, l1);
+ gen_load_gpr(t1, rs);
+ gen_load_gpr(t2, rt);
+ tcg_gen_add_tl(t0, t1, t2);
+ tcg_gen_xor_tl(t1, t1, t2);
+ tcg_gen_xor_tl(t2, t0, t2);
+ tcg_gen_andc_tl(t1, t2, t1);
+ tcg_temp_free(t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_temp_free(t1);
/* operands of same sign, result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
- tcg_temp_free(r_tmp1);
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
}
opn = "dadd";
break;
case OPC_DADDU:
- tcg_gen_add_tl(t0, t0, t1);
+ if (rs != 0 && rt != 0) {
+ tcg_gen_add_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
opn = "daddu";
break;
case OPC_DSUB:
{
- TCGv r_tmp1 = tcg_temp_new();
- TCGv r_tmp2 = tcg_temp_new();
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
int l1 = gen_new_label();
- save_cpu_state(ctx, 1);
- tcg_gen_mov_tl(r_tmp1, t0);
- tcg_gen_sub_tl(t0, t0, t1);
-
- tcg_gen_xor_tl(r_tmp2, r_tmp1, t1);
- tcg_gen_xor_tl(r_tmp1, r_tmp1, t0);
- tcg_gen_and_tl(r_tmp1, r_tmp1, r_tmp2);
- tcg_temp_free(r_tmp2);
- tcg_gen_brcondi_tl(TCG_COND_GE, r_tmp1, 0, l1);
+ gen_load_gpr(t1, rs);
+ gen_load_gpr(t2, rt);
+ tcg_gen_sub_tl(t0, t1, t2);
+ tcg_gen_xor_tl(t2, t1, t2);
+ tcg_gen_xor_tl(t1, t0, t1);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_temp_free(t1);
/* operands of different sign, first operand and result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
- tcg_temp_free(r_tmp1);
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
}
opn = "dsub";
break;
case OPC_DSUBU:
- tcg_gen_sub_tl(t0, t0, t1);
+ if (rs != 0 && rt != 0) {
+ tcg_gen_sub_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_neg_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
opn = "dsubu";
break;
#endif
- case OPC_SLT:
- gen_op_lt(t0, t0, t1);
- opn = "slt";
+ case OPC_MUL:
+ if (likely(rs != 0 && rt != 0)) {
+ tcg_gen_mul_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ opn = "mul";
break;
- case OPC_SLTU:
- gen_op_ltu(t0, t0, t1);
- opn = "sltu";
+ }
+ MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]);
+}
+
+/* Conditional move */
+static void gen_cond_move (CPUState *env, uint32_t opc, int rd, int rs, int rt)
+{
+ const char *opn = "cond move";
+ int l1;
+
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP.
+ For add & sub, we must generate the overflow exception when needed. */
+ MIPS_DEBUG("NOP");
+ return;
+ }
+
+ l1 = gen_new_label();
+ switch (opc) {
+ case OPC_MOVN:
+ if (likely(rt != 0))
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rt], 0, l1);
+ else
+ tcg_gen_br(l1);
+ opn = "movn";
+ break;
+ case OPC_MOVZ:
+ if (likely(rt != 0))
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_gpr[rt], 0, l1);
+ opn = "movz";
break;
+ }
+ if (rs != 0)
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ else
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ gen_set_label(l1);
+
+ MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]);
+}
+
+/* Logic */
+static void gen_logic (CPUState *env, uint32_t opc, int rd, int rs, int rt)
+{
+ const char *opn = "logic";
+
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP. */
+ MIPS_DEBUG("NOP");
+ return;
+ }
+
+ switch (opc) {
case OPC_AND:
- tcg_gen_and_tl(t0, t0, t1);
+ if (likely(rs != 0 && rt != 0)) {
+ tcg_gen_and_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
opn = "and";
break;
case OPC_NOR:
- tcg_gen_nor_tl(t0, t0, t1);
+ if (rs != 0 && rt != 0) {
+ tcg_gen_nor_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_not_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_not_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], ~((target_ulong)0));
+ }
opn = "nor";
break;
case OPC_OR:
- tcg_gen_or_tl(t0, t0, t1);
+ if (likely(rs != 0 && rt != 0)) {
+ tcg_gen_or_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
opn = "or";
break;
case OPC_XOR:
- tcg_gen_xor_tl(t0, t0, t1);
+ if (likely(rs != 0 && rt != 0)) {
+ tcg_gen_xor_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
opn = "xor";
break;
- case OPC_MUL:
- tcg_gen_mul_tl(t0, t0, t1);
- tcg_gen_ext32s_tl(t0, t0);
- opn = "mul";
+ }
+ MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]);
+}
+
+/* Set on lower than */
+static void gen_slt (CPUState *env, uint32_t opc, int rd, int rs, int rt)
+{
+ const char *opn = "slt";
+ TCGv t0, t1;
+
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP. */
+ MIPS_DEBUG("NOP");
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ switch (opc) {
+ case OPC_SLT:
+ tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr[rd], t0, t1);
+ opn = "slt";
break;
- case OPC_MOVN:
- {
- int l1 = gen_new_label();
+ case OPC_SLTU:
+ tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr[rd], t0, t1);
+ opn = "sltu";
+ break;
+ }
+ MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
- tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
- gen_store_gpr(t0, rd);
- gen_set_label(l1);
- }
- opn = "movn";
- goto print;
- case OPC_MOVZ:
- {
- int l1 = gen_new_label();
+/* Shifts */
+static void gen_shift (CPUState *env, DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ const char *opn = "shifts";
+ TCGv t0, t1;
- tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
- gen_store_gpr(t0, rd);
- gen_set_label(l1);
- }
- opn = "movz";
- goto print;
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP.
+ For add & sub, we must generate the overflow exception when needed. */
+ MIPS_DEBUG("NOP");
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ switch (opc) {
case OPC_SLLV:
tcg_gen_andi_tl(t0, t0, 0x1f);
tcg_gen_shl_tl(t0, t1, t0);
- tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
opn = "sllv";
break;
case OPC_SRAV:
- tcg_gen_ext32s_tl(t1, t1);
tcg_gen_andi_tl(t0, t0, 0x1f);
- tcg_gen_sar_tl(t0, t1, t0);
+ tcg_gen_sar_tl(cpu_gpr[rd], t1, t0);
opn = "srav";
break;
case OPC_SRLV:
- switch ((ctx->opcode >> 6) & 0x1f) {
- case 0:
- tcg_gen_ext32u_tl(t1, t1);
- tcg_gen_andi_tl(t0, t0, 0x1f);
- tcg_gen_shr_tl(t0, t1, t0);
- tcg_gen_ext32s_tl(t0, t0);
- opn = "srlv";
- break;
- case 1:
- /* rotrv is decoded as srlv on non-R2 CPUs */
- if (env->insn_flags & ISA_MIPS32R2) {
- int l1 = gen_new_label();
- int l2 = gen_new_label();
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_andi_tl(t0, t0, 0x1f);
+ tcg_gen_shr_tl(t0, t1, t0);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
+ opn = "srlv";
+ break;
+ case OPC_ROTRV:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
- tcg_gen_andi_tl(t0, t0, 0x1f);
- tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
- {
- TCGv_i32 r_tmp1 = tcg_temp_new_i32();
- TCGv_i32 r_tmp2 = tcg_temp_new_i32();
-
- tcg_gen_trunc_tl_i32(r_tmp1, t0);
- tcg_gen_trunc_tl_i32(r_tmp2, t1);
- tcg_gen_rotr_i32(r_tmp1, r_tmp1, r_tmp2);
- tcg_temp_free_i32(r_tmp1);
- tcg_temp_free_i32(r_tmp2);
- tcg_gen_br(l2);
- }
- gen_set_label(l1);
- tcg_gen_mov_tl(t0, t1);
- gen_set_label(l2);
- opn = "rotrv";
- } else {
- tcg_gen_ext32u_tl(t1, t1);
- tcg_gen_andi_tl(t0, t0, 0x1f);
- tcg_gen_shr_tl(t0, t1, t0);
- tcg_gen_ext32s_tl(t0, t0);
- opn = "srlv";
- }
- break;
- default:
- MIPS_INVAL("invalid srlv flag");
- generate_exception(ctx, EXCP_RI);
- break;
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_andi_i32(t2, t2, 0x1f);
+ tcg_gen_rotr_i32(t2, t3, t2);
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ opn = "rotrv";
}
break;
#if defined(TARGET_MIPS64)
case OPC_DSLLV:
tcg_gen_andi_tl(t0, t0, 0x3f);
- tcg_gen_shl_tl(t0, t1, t0);
+ tcg_gen_shl_tl(cpu_gpr[rd], t1, t0);
opn = "dsllv";
break;
case OPC_DSRAV:
tcg_gen_andi_tl(t0, t0, 0x3f);
- tcg_gen_sar_tl(t0, t1, t0);
+ tcg_gen_sar_tl(cpu_gpr[rd], t1, t0);
opn = "dsrav";
break;
case OPC_DSRLV:
- switch ((ctx->opcode >> 6) & 0x1f) {
- case 0:
- tcg_gen_andi_tl(t0, t0, 0x3f);
- tcg_gen_shr_tl(t0, t1, t0);
- opn = "dsrlv";
- break;
- case 1:
- /* drotrv is decoded as dsrlv on non-R2 CPUs */
- if (env->insn_flags & ISA_MIPS32R2) {
- int l1 = gen_new_label();
- int l2 = gen_new_label();
-
- tcg_gen_andi_tl(t0, t0, 0x3f);
- tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
- {
- tcg_gen_rotr_tl(t0, t1, t0);
- tcg_gen_br(l2);
- }
- gen_set_label(l1);
- tcg_gen_mov_tl(t0, t1);
- gen_set_label(l2);
- opn = "drotrv";
- } else {
- tcg_gen_andi_tl(t0, t0, 0x3f);
- tcg_gen_shr_tl(t0, t1, t0);
- opn = "dsrlv";
- }
- break;
- default:
- MIPS_INVAL("invalid dsrlv flag");
- generate_exception(ctx, EXCP_RI);
- break;
- }
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_shr_tl(cpu_gpr[rd], t1, t0);
+ opn = "dsrlv";
+ break;
+ case OPC_DROTRV:
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_rotr_tl(cpu_gpr[rd], t1, t0);
+ opn = "drotrv";
break;
#endif
- default:
- MIPS_INVAL(opn);
- generate_exception(ctx, EXCP_RI);
- goto out;
}
- gen_store_gpr(t0, rd);
- print:
MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]);
- out:
tcg_temp_free(t0);
tcg_temp_free(t1);
}
{
int l1 = gen_new_label();
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
tcg_gen_divu_tl(cpu_LO[0], t0, t1);
tcg_gen_remu_tl(cpu_HI[0], t0, t1);
tcg_gen_trunc_i64_tl(t1, t2);
tcg_temp_free_i64(t2);
tcg_gen_ext32s_tl(cpu_LO[0], t0);
- tcg_gen_ext32s_tl(cpu_LO[1], t1);
+ tcg_gen_ext32s_tl(cpu_HI[0], t1);
}
opn = "madd";
break;
tcg_gen_ext_tl_i64(t3, t1);
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[0], cpu_HI[0]);
- tcg_gen_sub_i64(t2, t2, t3);
+ tcg_gen_sub_i64(t2, t3, t2);
tcg_temp_free_i64(t3);
tcg_gen_trunc_i64_tl(t0, t2);
tcg_gen_shri_i64(t2, t2, 32);
tcg_gen_extu_tl_i64(t3, t1);
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[0], cpu_HI[0]);
- tcg_gen_sub_i64(t2, t2, t3);
+ tcg_gen_sub_i64(t2, t3, t2);
tcg_temp_free_i64(t3);
tcg_gen_trunc_i64_tl(t0, t2);
tcg_gen_shri_i64(t2, t2, 32);
{
TranslationBlock *tb;
tb = ctx->tb;
- if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
+ if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
+ likely(!ctx->singlestep_enabled)) {
tcg_gen_goto_tb(n);
gen_save_pc(dest);
tcg_gen_exit_tb((long)tb + n);
} else {
gen_save_pc(dest);
+ if (ctx->singlestep_enabled) {
+ save_cpu_state(ctx, 0);
+ gen_helper_0i(raise_exception, EXCP_DEBUG);
+ }
tcg_gen_exit_tb(0);
}
}
/* Branches (before delay slot) */
static void gen_compute_branch (DisasContext *ctx, uint32_t opc,
+ int insn_bytes,
int rs, int rt, int32_t offset)
{
target_ulong btgt = -1;
gen_load_gpr(t1, rt);
bcond_compute = 1;
}
- btgt = ctx->pc + 4 + offset;
+ btgt = ctx->pc + insn_bytes + offset;
break;
case OPC_BGEZ:
case OPC_BGEZAL:
gen_load_gpr(t0, rs);
bcond_compute = 1;
}
- btgt = ctx->pc + 4 + offset;
+ btgt = ctx->pc + insn_bytes + offset;
break;
case OPC_J:
case OPC_JAL:
+ case OPC_JALX:
/* Jump to immediate */
- btgt = ((ctx->pc + 4) & (int32_t)0xF0000000) | (uint32_t)offset;
+ btgt = ((ctx->pc + insn_bytes) & (int32_t)0xF0000000) | (uint32_t)offset;
break;
case OPC_JR:
case OPC_JALR:
+ case OPC_JALRC:
/* Jump to register */
if (offset != 0 && offset != 16) {
/* Hint = 0 is JR/JALR, hint 16 is JR.HB/JALR.HB, the
ctx->hflags |= MIPS_HFLAG_B;
MIPS_DEBUG("j " TARGET_FMT_lx, btgt);
break;
+ case OPC_JALX:
+ ctx->hflags |= MIPS_HFLAG_BX;
+ /* Fallthrough */
case OPC_JAL:
blink = 31;
ctx->hflags |= MIPS_HFLAG_B;
+ ctx->hflags |= (ctx->hflags & MIPS_HFLAG_M16
+ ? MIPS_HFLAG_BDS16
+ : MIPS_HFLAG_BDS32);
MIPS_DEBUG("jal " TARGET_FMT_lx, btgt);
break;
case OPC_JR:
ctx->hflags |= MIPS_HFLAG_BR;
+ if (ctx->hflags & MIPS_HFLAG_M16)
+ ctx->hflags |= MIPS_HFLAG_BDS16;
MIPS_DEBUG("jr %s", regnames[rs]);
break;
case OPC_JALR:
+ case OPC_JALRC:
blink = rt;
ctx->hflags |= MIPS_HFLAG_BR;
+ if (ctx->hflags & MIPS_HFLAG_M16)
+ ctx->hflags |= MIPS_HFLAG_BDS16;
MIPS_DEBUG("jalr %s, %s", regnames[rt], regnames[rs]);
break;
default:
} else {
switch (opc) {
case OPC_BEQ:
- gen_op_eq(bcond, t0, t1);
+ tcg_gen_setcond_tl(TCG_COND_EQ, bcond, t0, t1);
MIPS_DEBUG("beq %s, %s, " TARGET_FMT_lx,
regnames[rs], regnames[rt], btgt);
goto not_likely;
case OPC_BEQL:
- gen_op_eq(bcond, t0, t1);
+ tcg_gen_setcond_tl(TCG_COND_EQ, bcond, t0, t1);
MIPS_DEBUG("beql %s, %s, " TARGET_FMT_lx,
regnames[rs], regnames[rt], btgt);
goto likely;
case OPC_BNE:
- gen_op_ne(bcond, t0, t1);
+ tcg_gen_setcond_tl(TCG_COND_NE, bcond, t0, t1);
MIPS_DEBUG("bne %s, %s, " TARGET_FMT_lx,
regnames[rs], regnames[rt], btgt);
goto not_likely;
case OPC_BNEL:
- gen_op_ne(bcond, t0, t1);
+ tcg_gen_setcond_tl(TCG_COND_NE, bcond, t0, t1);
MIPS_DEBUG("bnel %s, %s, " TARGET_FMT_lx,
regnames[rs], regnames[rt], btgt);
goto likely;
case OPC_BGEZ:
- gen_op_gez(bcond, t0);
+ tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0);
MIPS_DEBUG("bgez %s, " TARGET_FMT_lx, regnames[rs], btgt);
goto not_likely;
case OPC_BGEZL:
- gen_op_gez(bcond, t0);
+ tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0);
MIPS_DEBUG("bgezl %s, " TARGET_FMT_lx, regnames[rs], btgt);
goto likely;
case OPC_BGEZAL:
- gen_op_gez(bcond, t0);
+ tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0);
MIPS_DEBUG("bgezal %s, " TARGET_FMT_lx, regnames[rs], btgt);
blink = 31;
goto not_likely;
case OPC_BGEZALL:
- gen_op_gez(bcond, t0);
+ tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0);
blink = 31;
MIPS_DEBUG("bgezall %s, " TARGET_FMT_lx, regnames[rs], btgt);
goto likely;
case OPC_BGTZ:
- gen_op_gtz(bcond, t0);
+ tcg_gen_setcondi_tl(TCG_COND_GT, bcond, t0, 0);
MIPS_DEBUG("bgtz %s, " TARGET_FMT_lx, regnames[rs], btgt);
goto not_likely;
case OPC_BGTZL:
- gen_op_gtz(bcond, t0);
+ tcg_gen_setcondi_tl(TCG_COND_GT, bcond, t0, 0);
MIPS_DEBUG("bgtzl %s, " TARGET_FMT_lx, regnames[rs], btgt);
goto likely;
case OPC_BLEZ:
- gen_op_lez(bcond, t0);
+ tcg_gen_setcondi_tl(TCG_COND_LE, bcond, t0, 0);
MIPS_DEBUG("blez %s, " TARGET_FMT_lx, regnames[rs], btgt);
goto not_likely;
case OPC_BLEZL:
- gen_op_lez(bcond, t0);
+ tcg_gen_setcondi_tl(TCG_COND_LE, bcond, t0, 0);
MIPS_DEBUG("blezl %s, " TARGET_FMT_lx, regnames[rs], btgt);
goto likely;
case OPC_BLTZ:
- gen_op_ltz(bcond, t0);
+ tcg_gen_setcondi_tl(TCG_COND_LT, bcond, t0, 0);
MIPS_DEBUG("bltz %s, " TARGET_FMT_lx, regnames[rs], btgt);
goto not_likely;
case OPC_BLTZL:
- gen_op_ltz(bcond, t0);
+ tcg_gen_setcondi_tl(TCG_COND_LT, bcond, t0, 0);
MIPS_DEBUG("bltzl %s, " TARGET_FMT_lx, regnames[rs], btgt);
goto likely;
case OPC_BLTZAL:
- gen_op_ltz(bcond, t0);
+ tcg_gen_setcondi_tl(TCG_COND_LT, bcond, t0, 0);
blink = 31;
MIPS_DEBUG("bltzal %s, " TARGET_FMT_lx, regnames[rs], btgt);
not_likely:
ctx->hflags |= MIPS_HFLAG_BC;
break;
case OPC_BLTZALL:
- gen_op_ltz(bcond, t0);
+ tcg_gen_setcondi_tl(TCG_COND_LT, bcond, t0, 0);
blink = 31;
MIPS_DEBUG("bltzall %s, " TARGET_FMT_lx, regnames[rs], btgt);
likely:
ctx->btarget = btgt;
if (blink > 0) {
- tcg_gen_movi_tl(cpu_gpr[blink], ctx->pc + 8);
+ int post_delay = insn_bytes;
+ int lowbit = !!(ctx->hflags & MIPS_HFLAG_M16);
+
+ if (opc != OPC_JALRC)
+ post_delay += ((ctx->hflags & MIPS_HFLAG_BDS16) ? 2 : 4);
+
+ tcg_gen_movi_tl(cpu_gpr[blink], ctx->pc + post_delay + lowbit);
}
out:
+ if (insn_bytes == 2)
+ ctx->hflags |= MIPS_HFLAG_B16;
tcg_temp_free(t0);
tcg_temp_free(t1);
}
#ifndef CONFIG_USER_ONLY
/* CP0 (MMU and control) */
-static inline void gen_mfc0_load32 (TCGv t, target_ulong off)
+static inline void gen_mfc0_load32 (TCGv arg, target_ulong off)
{
- TCGv_i32 r_tmp = tcg_temp_new_i32();
+ TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_ld_i32(r_tmp, cpu_env, off);
- tcg_gen_ext_i32_tl(t, r_tmp);
- tcg_temp_free_i32(r_tmp);
+ tcg_gen_ld_i32(t0, cpu_env, off);
+ tcg_gen_ext_i32_tl(arg, t0);
+ tcg_temp_free_i32(t0);
}
-static inline void gen_mfc0_load64 (TCGv t, target_ulong off)
+static inline void gen_mfc0_load64 (TCGv arg, target_ulong off)
{
- tcg_gen_ld_tl(t, cpu_env, off);
- tcg_gen_ext32s_tl(t, t);
+ tcg_gen_ld_tl(arg, cpu_env, off);
+ tcg_gen_ext32s_tl(arg, arg);
}
-static inline void gen_mtc0_store32 (TCGv t, target_ulong off)
+static inline void gen_mtc0_store32 (TCGv arg, target_ulong off)
{
- TCGv_i32 r_tmp = tcg_temp_new_i32();
+ TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(r_tmp, t);
- tcg_gen_st_i32(r_tmp, cpu_env, off);
- tcg_temp_free_i32(r_tmp);
+ tcg_gen_trunc_tl_i32(t0, arg);
+ tcg_gen_st_i32(t0, cpu_env, off);
+ tcg_temp_free_i32(t0);
}
-static inline void gen_mtc0_store64 (TCGv t, target_ulong off)
+static inline void gen_mtc0_store64 (TCGv arg, target_ulong off)
{
- tcg_gen_ext32s_tl(t, t);
- tcg_gen_st_tl(t, cpu_env, off);
+ tcg_gen_ext32s_tl(arg, arg);
+ tcg_gen_st_tl(arg, cpu_env, off);
}
-static void gen_mfc0 (CPUState *env, DisasContext *ctx, TCGv t0, int reg, int sel)
+static void gen_mfc0 (CPUState *env, DisasContext *ctx, TCGv arg, int reg, int sel)
{
const char *rn = "invalid";
case 0:
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Index));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Index));
rn = "Index";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_mvpcontrol(t0);
+ gen_helper_mfc0_mvpcontrol(arg);
rn = "MVPControl";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_mvpconf0(t0);
+ gen_helper_mfc0_mvpconf0(arg);
rn = "MVPConf0";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_mvpconf1(t0);
+ gen_helper_mfc0_mvpconf1(arg);
rn = "MVPConf1";
break;
default:
case 1:
switch (sel) {
case 0:
- gen_helper_mfc0_random(t0);
+ gen_helper_mfc0_random(arg);
rn = "Random";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_VPEControl));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_VPEControl));
rn = "VPEControl";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_VPEConf0));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_VPEConf0));
rn = "VPEConf0";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_VPEConf1));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_VPEConf1));
rn = "VPEConf1";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load64(t0, offsetof(CPUState, CP0_YQMask));
+ gen_mfc0_load64(arg, offsetof(CPUState, CP0_YQMask));
rn = "YQMask";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load64(t0, offsetof(CPUState, CP0_VPESchedule));
+ gen_mfc0_load64(arg, offsetof(CPUState, CP0_VPESchedule));
rn = "VPESchedule";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load64(t0, offsetof(CPUState, CP0_VPEScheFBack));
+ gen_mfc0_load64(arg, offsetof(CPUState, CP0_VPEScheFBack));
rn = "VPEScheFBack";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_VPEOpt));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_VPEOpt));
rn = "VPEOpt";
break;
default:
case 2:
switch (sel) {
case 0:
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_EntryLo0));
- tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_EntryLo0));
+ tcg_gen_ext32s_tl(arg, arg);
rn = "EntryLo0";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_tcstatus(t0);
+ gen_helper_mfc0_tcstatus(arg);
rn = "TCStatus";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_tcbind(t0);
+ gen_helper_mfc0_tcbind(arg);
rn = "TCBind";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_tcrestart(t0);
+ gen_helper_mfc0_tcrestart(arg);
rn = "TCRestart";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_tchalt(t0);
+ gen_helper_mfc0_tchalt(arg);
rn = "TCHalt";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_tccontext(t0);
+ gen_helper_mfc0_tccontext(arg);
rn = "TCContext";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_tcschedule(t0);
+ gen_helper_mfc0_tcschedule(arg);
rn = "TCSchedule";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_tcschefback(t0);
+ gen_helper_mfc0_tcschefback(arg);
rn = "TCScheFBack";
break;
default:
case 3:
switch (sel) {
case 0:
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_EntryLo1));
- tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_EntryLo1));
+ tcg_gen_ext32s_tl(arg, arg);
rn = "EntryLo1";
break;
default:
case 4:
switch (sel) {
case 0:
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_Context));
- tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_Context));
+ tcg_gen_ext32s_tl(arg, arg);
rn = "Context";
break;
case 1:
-// gen_helper_mfc0_contextconfig(t0); /* SmartMIPS ASE */
+// gen_helper_mfc0_contextconfig(arg); /* SmartMIPS ASE */
rn = "ContextConfig";
// break;
default:
case 5:
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_PageMask));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_PageMask));
rn = "PageMask";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_PageGrain));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_PageGrain));
rn = "PageGrain";
break;
default:
case 6:
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Wired));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Wired));
rn = "Wired";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf0));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf0));
rn = "SRSConf0";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf1));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf1));
rn = "SRSConf1";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf2));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf2));
rn = "SRSConf2";
break;
case 4:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf3));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf3));
rn = "SRSConf3";
break;
case 5:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf4));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf4));
rn = "SRSConf4";
break;
default:
switch (sel) {
case 0:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_HWREna));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_HWREna));
rn = "HWREna";
break;
default:
case 8:
switch (sel) {
case 0:
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_BadVAddr));
- tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_BadVAddr));
+ tcg_gen_ext32s_tl(arg, arg);
rn = "BadVAddr";
break;
default:
/* Mark as an IO operation because we read the time. */
if (use_icount)
gen_io_start();
- gen_helper_mfc0_count(t0);
+ gen_helper_mfc0_count(arg);
if (use_icount) {
gen_io_end();
ctx->bstate = BS_STOP;
case 10:
switch (sel) {
case 0:
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_EntryHi));
- tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_EntryHi));
+ tcg_gen_ext32s_tl(arg, arg);
rn = "EntryHi";
break;
default:
case 11:
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Compare));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Compare));
rn = "Compare";
break;
/* 6,7 are implementation dependent */
case 12:
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Status));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Status));
rn = "Status";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_IntCtl));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_IntCtl));
rn = "IntCtl";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSCtl));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSCtl));
rn = "SRSCtl";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSMap));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSMap));
rn = "SRSMap";
break;
default:
case 13:
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Cause));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Cause));
rn = "Cause";
break;
default:
case 14:
switch (sel) {
case 0:
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_EPC));
- tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_EPC));
+ tcg_gen_ext32s_tl(arg, arg);
rn = "EPC";
break;
default:
case 15:
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_PRid));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_PRid));
rn = "PRid";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_EBase));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_EBase));
rn = "EBase";
break;
default:
case 16:
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config0));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config0));
rn = "Config";
break;
case 1:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config1));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config1));
rn = "Config1";
break;
case 2:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config2));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config2));
rn = "Config2";
break;
case 3:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config3));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config3));
rn = "Config3";
break;
/* 4,5 are reserved */
/* 6,7 are implementation dependent */
case 6:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config6));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config6));
rn = "Config6";
break;
case 7:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config7));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config7));
rn = "Config7";
break;
default:
case 17:
switch (sel) {
case 0:
- gen_helper_mfc0_lladdr(t0);
+ gen_helper_mfc0_lladdr(arg);
rn = "LLAddr";
break;
default:
case 18:
switch (sel) {
case 0 ... 7:
- gen_helper_1i(mfc0_watchlo, t0, sel);
+ gen_helper_1i(mfc0_watchlo, arg, sel);
rn = "WatchLo";
break;
default:
case 19:
switch (sel) {
case 0 ...7:
- gen_helper_1i(mfc0_watchhi, t0, sel);
+ gen_helper_1i(mfc0_watchhi, arg, sel);
rn = "WatchHi";
break;
default:
case 0:
#if defined(TARGET_MIPS64)
check_insn(env, ctx, ISA_MIPS3);
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_XContext));
- tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_XContext));
+ tcg_gen_ext32s_tl(arg, arg);
rn = "XContext";
break;
#endif
/* Officially reserved, but sel 0 is used for R1x000 framemask */
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Framemask));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Framemask));
rn = "Framemask";
break;
default:
}
break;
case 22:
- tcg_gen_movi_tl(t0, 0); /* unimplemented */
+ tcg_gen_movi_tl(arg, 0); /* unimplemented */
rn = "'Diagnostic"; /* implementation dependent */
break;
case 23:
switch (sel) {
case 0:
- gen_helper_mfc0_debug(t0); /* EJTAG support */
+ gen_helper_mfc0_debug(arg); /* EJTAG support */
rn = "Debug";
break;
case 1:
-// gen_helper_mfc0_tracecontrol(t0); /* PDtrace support */
+// gen_helper_mfc0_tracecontrol(arg); /* PDtrace support */
rn = "TraceControl";
// break;
case 2:
-// gen_helper_mfc0_tracecontrol2(t0); /* PDtrace support */
+// gen_helper_mfc0_tracecontrol2(arg); /* PDtrace support */
rn = "TraceControl2";
// break;
case 3:
-// gen_helper_mfc0_usertracedata(t0); /* PDtrace support */
+// gen_helper_mfc0_usertracedata(arg); /* PDtrace support */
rn = "UserTraceData";
// break;
case 4:
-// gen_helper_mfc0_tracebpc(t0); /* PDtrace support */
+// gen_helper_mfc0_tracebpc(arg); /* PDtrace support */
rn = "TraceBPC";
// break;
default:
switch (sel) {
case 0:
/* EJTAG support */
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_DEPC));
- tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_DEPC));
+ tcg_gen_ext32s_tl(arg, arg);
rn = "DEPC";
break;
default:
case 25:
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Performance0));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Performance0));
rn = "Performance0";
break;
case 1:
-// gen_helper_mfc0_performance1(t0);
+// gen_helper_mfc0_performance1(arg);
rn = "Performance1";
// break;
case 2:
-// gen_helper_mfc0_performance2(t0);
+// gen_helper_mfc0_performance2(arg);
rn = "Performance2";
// break;
case 3:
-// gen_helper_mfc0_performance3(t0);
+// gen_helper_mfc0_performance3(arg);
rn = "Performance3";
// break;
case 4:
-// gen_helper_mfc0_performance4(t0);
+// gen_helper_mfc0_performance4(arg);
rn = "Performance4";
// break;
case 5:
-// gen_helper_mfc0_performance5(t0);
+// gen_helper_mfc0_performance5(arg);
rn = "Performance5";
// break;
case 6:
-// gen_helper_mfc0_performance6(t0);
+// gen_helper_mfc0_performance6(arg);
rn = "Performance6";
// break;
case 7:
-// gen_helper_mfc0_performance7(t0);
+// gen_helper_mfc0_performance7(arg);
rn = "Performance7";
// break;
default:
}
break;
case 26:
- tcg_gen_movi_tl(t0, 0); /* unimplemented */
+ tcg_gen_movi_tl(arg, 0); /* unimplemented */
rn = "ECC";
break;
case 27:
switch (sel) {
case 0 ... 3:
- tcg_gen_movi_tl(t0, 0); /* unimplemented */
+ tcg_gen_movi_tl(arg, 0); /* unimplemented */
rn = "CacheErr";
break;
default:
case 2:
case 4:
case 6:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_TagLo));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_TagLo));
rn = "TagLo";
break;
case 1:
case 3:
case 5:
case 7:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_DataLo));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_DataLo));
rn = "DataLo";
break;
default:
case 2:
case 4:
case 6:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_TagHi));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_TagHi));
rn = "TagHi";
break;
case 1:
case 3:
case 5:
case 7:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_DataHi));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_DataHi));
rn = "DataHi";
break;
default:
case 30:
switch (sel) {
case 0:
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_ErrorEPC));
- tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_ErrorEPC));
+ tcg_gen_ext32s_tl(arg, arg);
rn = "ErrorEPC";
break;
default:
switch (sel) {
case 0:
/* EJTAG support */
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_DESAVE));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_DESAVE));
rn = "DESAVE";
break;
default:
generate_exception(ctx, EXCP_RI);
}
-static void gen_mtc0 (CPUState *env, DisasContext *ctx, TCGv t0, int reg, int sel)
+static void gen_mtc0 (CPUState *env, DisasContext *ctx, TCGv arg, int reg, int sel)
{
const char *rn = "invalid";
case 0:
switch (sel) {
case 0:
- gen_helper_mtc0_index(t0);
+ gen_helper_mtc0_index(arg);
rn = "Index";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_mvpcontrol(t0);
+ gen_helper_mtc0_mvpcontrol(arg);
rn = "MVPControl";
break;
case 2:
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_vpecontrol(t0);
+ gen_helper_mtc0_vpecontrol(arg);
rn = "VPEControl";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_vpeconf0(t0);
+ gen_helper_mtc0_vpeconf0(arg);
rn = "VPEConf0";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_vpeconf1(t0);
+ gen_helper_mtc0_vpeconf1(arg);
rn = "VPEConf1";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_yqmask(t0);
+ gen_helper_mtc0_yqmask(arg);
rn = "YQMask";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_mtc0_store64(t0, offsetof(CPUState, CP0_VPESchedule));
+ gen_mtc0_store64(arg, offsetof(CPUState, CP0_VPESchedule));
rn = "VPESchedule";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_mtc0_store64(t0, offsetof(CPUState, CP0_VPEScheFBack));
+ gen_mtc0_store64(arg, offsetof(CPUState, CP0_VPEScheFBack));
rn = "VPEScheFBack";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_vpeopt(t0);
+ gen_helper_mtc0_vpeopt(arg);
rn = "VPEOpt";
break;
default:
case 2:
switch (sel) {
case 0:
- gen_helper_mtc0_entrylo0(t0);
+ gen_helper_mtc0_entrylo0(arg);
rn = "EntryLo0";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcstatus(t0);
+ gen_helper_mtc0_tcstatus(arg);
rn = "TCStatus";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcbind(t0);
+ gen_helper_mtc0_tcbind(arg);
rn = "TCBind";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcrestart(t0);
+ gen_helper_mtc0_tcrestart(arg);
rn = "TCRestart";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tchalt(t0);
+ gen_helper_mtc0_tchalt(arg);
rn = "TCHalt";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tccontext(t0);
+ gen_helper_mtc0_tccontext(arg);
rn = "TCContext";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcschedule(t0);
+ gen_helper_mtc0_tcschedule(arg);
rn = "TCSchedule";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcschefback(t0);
+ gen_helper_mtc0_tcschefback(arg);
rn = "TCScheFBack";
break;
default:
case 3:
switch (sel) {
case 0:
- gen_helper_mtc0_entrylo1(t0);
+ gen_helper_mtc0_entrylo1(arg);
rn = "EntryLo1";
break;
default:
case 4:
switch (sel) {
case 0:
- gen_helper_mtc0_context(t0);
+ gen_helper_mtc0_context(arg);
rn = "Context";
break;
case 1:
-// gen_helper_mtc0_contextconfig(t0); /* SmartMIPS ASE */
+// gen_helper_mtc0_contextconfig(arg); /* SmartMIPS ASE */
rn = "ContextConfig";
// break;
default:
case 5:
switch (sel) {
case 0:
- gen_helper_mtc0_pagemask(t0);
+ gen_helper_mtc0_pagemask(arg);
rn = "PageMask";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_pagegrain(t0);
+ gen_helper_mtc0_pagegrain(arg);
rn = "PageGrain";
break;
default:
case 6:
switch (sel) {
case 0:
- gen_helper_mtc0_wired(t0);
+ gen_helper_mtc0_wired(arg);
rn = "Wired";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf0(t0);
+ gen_helper_mtc0_srsconf0(arg);
rn = "SRSConf0";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf1(t0);
+ gen_helper_mtc0_srsconf1(arg);
rn = "SRSConf1";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf2(t0);
+ gen_helper_mtc0_srsconf2(arg);
rn = "SRSConf2";
break;
case 4:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf3(t0);
+ gen_helper_mtc0_srsconf3(arg);
rn = "SRSConf3";
break;
case 5:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf4(t0);
+ gen_helper_mtc0_srsconf4(arg);
rn = "SRSConf4";
break;
default:
switch (sel) {
case 0:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_hwrena(t0);
+ gen_helper_mtc0_hwrena(arg);
rn = "HWREna";
break;
default:
case 9:
switch (sel) {
case 0:
- gen_helper_mtc0_count(t0);
+ gen_helper_mtc0_count(arg);
rn = "Count";
break;
/* 6,7 are implementation dependent */
default:
goto die;
}
- /* Stop translation as we may have switched the execution mode */
- ctx->bstate = BS_STOP;
break;
case 10:
switch (sel) {
case 0:
- gen_helper_mtc0_entryhi(t0);
+ gen_helper_mtc0_entryhi(arg);
rn = "EntryHi";
break;
default:
case 11:
switch (sel) {
case 0:
- gen_helper_mtc0_compare(t0);
+ gen_helper_mtc0_compare(arg);
rn = "Compare";
break;
/* 6,7 are implementation dependent */
default:
goto die;
}
- /* Stop translation as we may have switched the execution mode */
- ctx->bstate = BS_STOP;
break;
case 12:
switch (sel) {
case 0:
- gen_helper_mtc0_status(t0);
+ save_cpu_state(ctx, 1);
+ gen_helper_mtc0_status(arg);
/* BS_STOP isn't good enough here, hflags may have changed. */
gen_save_pc(ctx->pc + 4);
ctx->bstate = BS_EXCP;
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_intctl(t0);
+ gen_helper_mtc0_intctl(arg);
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "IntCtl";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsctl(t0);
+ gen_helper_mtc0_srsctl(arg);
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "SRSCtl";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mtc0_store32(t0, offsetof(CPUState, CP0_SRSMap));
+ gen_mtc0_store32(arg, offsetof(CPUState, CP0_SRSMap));
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "SRSMap";
case 13:
switch (sel) {
case 0:
- gen_helper_mtc0_cause(t0);
+ save_cpu_state(ctx, 1);
+ gen_helper_mtc0_cause(arg);
rn = "Cause";
break;
default:
goto die;
}
- /* Stop translation as we may have switched the execution mode */
- ctx->bstate = BS_STOP;
break;
case 14:
switch (sel) {
case 0:
- gen_mtc0_store64(t0, offsetof(CPUState, CP0_EPC));
+ gen_mtc0_store64(arg, offsetof(CPUState, CP0_EPC));
rn = "EPC";
break;
default:
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_ebase(t0);
+ gen_helper_mtc0_ebase(arg);
rn = "EBase";
break;
default:
case 16:
switch (sel) {
case 0:
- gen_helper_mtc0_config0(t0);
+ gen_helper_mtc0_config0(arg);
rn = "Config";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "Config1";
break;
case 2:
- gen_helper_mtc0_config2(t0);
+ gen_helper_mtc0_config2(arg);
rn = "Config2";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
case 17:
switch (sel) {
case 0:
- /* ignored */
+ gen_helper_mtc0_lladdr(arg);
rn = "LLAddr";
break;
default:
case 18:
switch (sel) {
case 0 ... 7:
- gen_helper_1i(mtc0_watchlo, t0, sel);
+ gen_helper_1i(mtc0_watchlo, arg, sel);
rn = "WatchLo";
break;
default:
case 19:
switch (sel) {
case 0 ... 7:
- gen_helper_1i(mtc0_watchhi, t0, sel);
+ gen_helper_1i(mtc0_watchhi, arg, sel);
rn = "WatchHi";
break;
default:
case 0:
#if defined(TARGET_MIPS64)
check_insn(env, ctx, ISA_MIPS3);
- gen_helper_mtc0_xcontext(t0);
+ gen_helper_mtc0_xcontext(arg);
rn = "XContext";
break;
#endif
/* Officially reserved, but sel 0 is used for R1x000 framemask */
switch (sel) {
case 0:
- gen_helper_mtc0_framemask(t0);
+ gen_helper_mtc0_framemask(arg);
rn = "Framemask";
break;
default:
case 23:
switch (sel) {
case 0:
- gen_helper_mtc0_debug(t0); /* EJTAG support */
+ gen_helper_mtc0_debug(arg); /* EJTAG support */
/* BS_STOP isn't good enough here, hflags may have changed. */
gen_save_pc(ctx->pc + 4);
ctx->bstate = BS_EXCP;
rn = "Debug";
break;
case 1:
-// gen_helper_mtc0_tracecontrol(t0); /* PDtrace support */
+// gen_helper_mtc0_tracecontrol(arg); /* PDtrace support */
rn = "TraceControl";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
// break;
case 2:
-// gen_helper_mtc0_tracecontrol2(t0); /* PDtrace support */
+// gen_helper_mtc0_tracecontrol2(arg); /* PDtrace support */
rn = "TraceControl2";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
case 3:
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
-// gen_helper_mtc0_usertracedata(t0); /* PDtrace support */
+// gen_helper_mtc0_usertracedata(arg); /* PDtrace support */
rn = "UserTraceData";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
// break;
case 4:
-// gen_helper_mtc0_tracebpc(t0); /* PDtrace support */
+// gen_helper_mtc0_tracebpc(arg); /* PDtrace support */
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "TraceBPC";
switch (sel) {
case 0:
/* EJTAG support */
- gen_mtc0_store64(t0, offsetof(CPUState, CP0_DEPC));
+ gen_mtc0_store64(arg, offsetof(CPUState, CP0_DEPC));
rn = "DEPC";
break;
default:
case 25:
switch (sel) {
case 0:
- gen_helper_mtc0_performance0(t0);
+ gen_helper_mtc0_performance0(arg);
rn = "Performance0";
break;
case 1:
-// gen_helper_mtc0_performance1(t0);
+// gen_helper_mtc0_performance1(arg);
rn = "Performance1";
// break;
case 2:
-// gen_helper_mtc0_performance2(t0);
+// gen_helper_mtc0_performance2(arg);
rn = "Performance2";
// break;
case 3:
-// gen_helper_mtc0_performance3(t0);
+// gen_helper_mtc0_performance3(arg);
rn = "Performance3";
// break;
case 4:
-// gen_helper_mtc0_performance4(t0);
+// gen_helper_mtc0_performance4(arg);
rn = "Performance4";
// break;
case 5:
-// gen_helper_mtc0_performance5(t0);
+// gen_helper_mtc0_performance5(arg);
rn = "Performance5";
// break;
case 6:
-// gen_helper_mtc0_performance6(t0);
+// gen_helper_mtc0_performance6(arg);
rn = "Performance6";
// break;
case 7:
-// gen_helper_mtc0_performance7(t0);
+// gen_helper_mtc0_performance7(arg);
rn = "Performance7";
// break;
default:
case 2:
case 4:
case 6:
- gen_helper_mtc0_taglo(t0);
+ gen_helper_mtc0_taglo(arg);
rn = "TagLo";
break;
case 1:
case 3:
case 5:
case 7:
- gen_helper_mtc0_datalo(t0);
+ gen_helper_mtc0_datalo(arg);
rn = "DataLo";
break;
default:
case 2:
case 4:
case 6:
- gen_helper_mtc0_taghi(t0);
+ gen_helper_mtc0_taghi(arg);
rn = "TagHi";
break;
case 1:
case 3:
case 5:
case 7:
- gen_helper_mtc0_datahi(t0);
+ gen_helper_mtc0_datahi(arg);
rn = "DataHi";
break;
default:
case 30:
switch (sel) {
case 0:
- gen_mtc0_store64(t0, offsetof(CPUState, CP0_ErrorEPC));
+ gen_mtc0_store64(arg, offsetof(CPUState, CP0_ErrorEPC));
rn = "ErrorEPC";
break;
default:
switch (sel) {
case 0:
/* EJTAG support */
- gen_mtc0_store32(t0, offsetof(CPUState, CP0_DESAVE));
+ gen_mtc0_store32(arg, offsetof(CPUState, CP0_DESAVE));
rn = "DESAVE";
break;
default:
}
#if defined(TARGET_MIPS64)
-static void gen_dmfc0 (CPUState *env, DisasContext *ctx, TCGv t0, int reg, int sel)
+static void gen_dmfc0 (CPUState *env, DisasContext *ctx, TCGv arg, int reg, int sel)
{
const char *rn = "invalid";
case 0:
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Index));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Index));
rn = "Index";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_mvpcontrol(t0);
+ gen_helper_mfc0_mvpcontrol(arg);
rn = "MVPControl";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_mvpconf0(t0);
+ gen_helper_mfc0_mvpconf0(arg);
rn = "MVPConf0";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_mvpconf1(t0);
+ gen_helper_mfc0_mvpconf1(arg);
rn = "MVPConf1";
break;
default:
case 1:
switch (sel) {
case 0:
- gen_helper_mfc0_random(t0);
+ gen_helper_mfc0_random(arg);
rn = "Random";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_VPEControl));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_VPEControl));
rn = "VPEControl";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_VPEConf0));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_VPEConf0));
rn = "VPEConf0";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_VPEConf1));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_VPEConf1));
rn = "VPEConf1";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_YQMask));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_YQMask));
rn = "YQMask";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_VPESchedule));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_VPESchedule));
rn = "VPESchedule";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_VPEScheFBack));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_VPEScheFBack));
rn = "VPEScheFBack";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_VPEOpt));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_VPEOpt));
rn = "VPEOpt";
break;
default:
case 2:
switch (sel) {
case 0:
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_EntryLo0));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_EntryLo0));
rn = "EntryLo0";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_tcstatus(t0);
+ gen_helper_mfc0_tcstatus(arg);
rn = "TCStatus";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_helper_mfc0_tcbind(t0);
+ gen_helper_mfc0_tcbind(arg);
rn = "TCBind";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_helper_dmfc0_tcrestart(t0);
+ gen_helper_dmfc0_tcrestart(arg);
rn = "TCRestart";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_helper_dmfc0_tchalt(t0);
+ gen_helper_dmfc0_tchalt(arg);
rn = "TCHalt";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_helper_dmfc0_tccontext(t0);
+ gen_helper_dmfc0_tccontext(arg);
rn = "TCContext";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_helper_dmfc0_tcschedule(t0);
+ gen_helper_dmfc0_tcschedule(arg);
rn = "TCSchedule";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_helper_dmfc0_tcschefback(t0);
+ gen_helper_dmfc0_tcschefback(arg);
rn = "TCScheFBack";
break;
default:
case 3:
switch (sel) {
case 0:
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_EntryLo1));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_EntryLo1));
rn = "EntryLo1";
break;
default:
case 4:
switch (sel) {
case 0:
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_Context));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_Context));
rn = "Context";
break;
case 1:
-// gen_helper_dmfc0_contextconfig(t0); /* SmartMIPS ASE */
+// gen_helper_dmfc0_contextconfig(arg); /* SmartMIPS ASE */
rn = "ContextConfig";
// break;
default:
case 5:
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_PageMask));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_PageMask));
rn = "PageMask";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_PageGrain));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_PageGrain));
rn = "PageGrain";
break;
default:
case 6:
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Wired));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Wired));
rn = "Wired";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf0));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf0));
rn = "SRSConf0";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf1));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf1));
rn = "SRSConf1";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf2));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf2));
rn = "SRSConf2";
break;
case 4:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf3));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf3));
rn = "SRSConf3";
break;
case 5:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf4));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf4));
rn = "SRSConf4";
break;
default:
switch (sel) {
case 0:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_HWREna));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_HWREna));
rn = "HWREna";
break;
default:
case 8:
switch (sel) {
case 0:
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_BadVAddr));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_BadVAddr));
rn = "BadVAddr";
break;
default:
/* Mark as an IO operation because we read the time. */
if (use_icount)
gen_io_start();
- gen_helper_mfc0_count(t0);
+ gen_helper_mfc0_count(arg);
if (use_icount) {
gen_io_end();
ctx->bstate = BS_STOP;
case 10:
switch (sel) {
case 0:
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_EntryHi));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_EntryHi));
rn = "EntryHi";
break;
default:
case 11:
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Compare));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Compare));
rn = "Compare";
break;
/* 6,7 are implementation dependent */
case 12:
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Status));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Status));
rn = "Status";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_IntCtl));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_IntCtl));
rn = "IntCtl";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSCtl));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSCtl));
rn = "SRSCtl";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSMap));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSMap));
rn = "SRSMap";
break;
default:
case 13:
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Cause));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Cause));
rn = "Cause";
break;
default:
case 14:
switch (sel) {
case 0:
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_EPC));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_EPC));
rn = "EPC";
break;
default:
case 15:
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_PRid));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_PRid));
rn = "PRid";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_EBase));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_EBase));
rn = "EBase";
break;
default:
case 16:
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config0));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config0));
rn = "Config";
break;
case 1:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config1));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config1));
rn = "Config1";
break;
case 2:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config2));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config2));
rn = "Config2";
break;
case 3:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config3));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config3));
rn = "Config3";
break;
/* 6,7 are implementation dependent */
case 6:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config6));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config6));
rn = "Config6";
break;
case 7:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config7));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config7));
rn = "Config7";
break;
default:
case 17:
switch (sel) {
case 0:
- gen_helper_dmfc0_lladdr(t0);
+ gen_helper_dmfc0_lladdr(arg);
rn = "LLAddr";
break;
default:
case 18:
switch (sel) {
case 0 ... 7:
- gen_helper_1i(dmfc0_watchlo, t0, sel);
+ gen_helper_1i(dmfc0_watchlo, arg, sel);
rn = "WatchLo";
break;
default:
case 19:
switch (sel) {
case 0 ... 7:
- gen_helper_1i(mfc0_watchhi, t0, sel);
+ gen_helper_1i(mfc0_watchhi, arg, sel);
rn = "WatchHi";
break;
default:
switch (sel) {
case 0:
check_insn(env, ctx, ISA_MIPS3);
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_XContext));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_XContext));
rn = "XContext";
break;
default:
/* Officially reserved, but sel 0 is used for R1x000 framemask */
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Framemask));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Framemask));
rn = "Framemask";
break;
default:
}
break;
case 22:
- tcg_gen_movi_tl(t0, 0); /* unimplemented */
+ tcg_gen_movi_tl(arg, 0); /* unimplemented */
rn = "'Diagnostic"; /* implementation dependent */
break;
case 23:
switch (sel) {
case 0:
- gen_helper_mfc0_debug(t0); /* EJTAG support */
+ gen_helper_mfc0_debug(arg); /* EJTAG support */
rn = "Debug";
break;
case 1:
-// gen_helper_dmfc0_tracecontrol(t0); /* PDtrace support */
+// gen_helper_dmfc0_tracecontrol(arg); /* PDtrace support */
rn = "TraceControl";
// break;
case 2:
-// gen_helper_dmfc0_tracecontrol2(t0); /* PDtrace support */
+// gen_helper_dmfc0_tracecontrol2(arg); /* PDtrace support */
rn = "TraceControl2";
// break;
case 3:
-// gen_helper_dmfc0_usertracedata(t0); /* PDtrace support */
+// gen_helper_dmfc0_usertracedata(arg); /* PDtrace support */
rn = "UserTraceData";
// break;
case 4:
-// gen_helper_dmfc0_tracebpc(t0); /* PDtrace support */
+// gen_helper_dmfc0_tracebpc(arg); /* PDtrace support */
rn = "TraceBPC";
// break;
default:
switch (sel) {
case 0:
/* EJTAG support */
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_DEPC));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_DEPC));
rn = "DEPC";
break;
default:
case 25:
switch (sel) {
case 0:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_Performance0));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_Performance0));
rn = "Performance0";
break;
case 1:
-// gen_helper_dmfc0_performance1(t0);
+// gen_helper_dmfc0_performance1(arg);
rn = "Performance1";
// break;
case 2:
-// gen_helper_dmfc0_performance2(t0);
+// gen_helper_dmfc0_performance2(arg);
rn = "Performance2";
// break;
case 3:
-// gen_helper_dmfc0_performance3(t0);
+// gen_helper_dmfc0_performance3(arg);
rn = "Performance3";
// break;
case 4:
-// gen_helper_dmfc0_performance4(t0);
+// gen_helper_dmfc0_performance4(arg);
rn = "Performance4";
// break;
case 5:
-// gen_helper_dmfc0_performance5(t0);
+// gen_helper_dmfc0_performance5(arg);
rn = "Performance5";
// break;
case 6:
-// gen_helper_dmfc0_performance6(t0);
+// gen_helper_dmfc0_performance6(arg);
rn = "Performance6";
// break;
case 7:
-// gen_helper_dmfc0_performance7(t0);
+// gen_helper_dmfc0_performance7(arg);
rn = "Performance7";
// break;
default:
}
break;
case 26:
- tcg_gen_movi_tl(t0, 0); /* unimplemented */
+ tcg_gen_movi_tl(arg, 0); /* unimplemented */
rn = "ECC";
break;
case 27:
switch (sel) {
/* ignored */
case 0 ... 3:
- tcg_gen_movi_tl(t0, 0); /* unimplemented */
+ tcg_gen_movi_tl(arg, 0); /* unimplemented */
rn = "CacheErr";
break;
default:
case 2:
case 4:
case 6:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_TagLo));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_TagLo));
rn = "TagLo";
break;
case 1:
case 3:
case 5:
case 7:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_DataLo));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_DataLo));
rn = "DataLo";
break;
default:
case 2:
case 4:
case 6:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_TagHi));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_TagHi));
rn = "TagHi";
break;
case 1:
case 3:
case 5:
case 7:
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_DataHi));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_DataHi));
rn = "DataHi";
break;
default:
case 30:
switch (sel) {
case 0:
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_ErrorEPC));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_ErrorEPC));
rn = "ErrorEPC";
break;
default:
switch (sel) {
case 0:
/* EJTAG support */
- gen_mfc0_load32(t0, offsetof(CPUState, CP0_DESAVE));
+ gen_mfc0_load32(arg, offsetof(CPUState, CP0_DESAVE));
rn = "DESAVE";
break;
default:
generate_exception(ctx, EXCP_RI);
}
-static void gen_dmtc0 (CPUState *env, DisasContext *ctx, TCGv t0, int reg, int sel)
+static void gen_dmtc0 (CPUState *env, DisasContext *ctx, TCGv arg, int reg, int sel)
{
const char *rn = "invalid";
case 0:
switch (sel) {
case 0:
- gen_helper_mtc0_index(t0);
+ gen_helper_mtc0_index(arg);
rn = "Index";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_mvpcontrol(t0);
+ gen_helper_mtc0_mvpcontrol(arg);
rn = "MVPControl";
break;
case 2:
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_vpecontrol(t0);
+ gen_helper_mtc0_vpecontrol(arg);
rn = "VPEControl";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_vpeconf0(t0);
+ gen_helper_mtc0_vpeconf0(arg);
rn = "VPEConf0";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_vpeconf1(t0);
+ gen_helper_mtc0_vpeconf1(arg);
rn = "VPEConf1";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_yqmask(t0);
+ gen_helper_mtc0_yqmask(arg);
rn = "YQMask";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, CP0_VPESchedule));
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUState, CP0_VPESchedule));
rn = "VPESchedule";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, CP0_VPEScheFBack));
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUState, CP0_VPEScheFBack));
rn = "VPEScheFBack";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_vpeopt(t0);
+ gen_helper_mtc0_vpeopt(arg);
rn = "VPEOpt";
break;
default:
case 2:
switch (sel) {
case 0:
- gen_helper_mtc0_entrylo0(t0);
+ gen_helper_mtc0_entrylo0(arg);
rn = "EntryLo0";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcstatus(t0);
+ gen_helper_mtc0_tcstatus(arg);
rn = "TCStatus";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcbind(t0);
+ gen_helper_mtc0_tcbind(arg);
rn = "TCBind";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcrestart(t0);
+ gen_helper_mtc0_tcrestart(arg);
rn = "TCRestart";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tchalt(t0);
+ gen_helper_mtc0_tchalt(arg);
rn = "TCHalt";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tccontext(t0);
+ gen_helper_mtc0_tccontext(arg);
rn = "TCContext";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcschedule(t0);
+ gen_helper_mtc0_tcschedule(arg);
rn = "TCSchedule";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_helper_mtc0_tcschefback(t0);
+ gen_helper_mtc0_tcschefback(arg);
rn = "TCScheFBack";
break;
default:
case 3:
switch (sel) {
case 0:
- gen_helper_mtc0_entrylo1(t0);
+ gen_helper_mtc0_entrylo1(arg);
rn = "EntryLo1";
break;
default:
case 4:
switch (sel) {
case 0:
- gen_helper_mtc0_context(t0);
+ gen_helper_mtc0_context(arg);
rn = "Context";
break;
case 1:
-// gen_helper_mtc0_contextconfig(t0); /* SmartMIPS ASE */
+// gen_helper_mtc0_contextconfig(arg); /* SmartMIPS ASE */
rn = "ContextConfig";
// break;
default:
case 5:
switch (sel) {
case 0:
- gen_helper_mtc0_pagemask(t0);
+ gen_helper_mtc0_pagemask(arg);
rn = "PageMask";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_pagegrain(t0);
+ gen_helper_mtc0_pagegrain(arg);
rn = "PageGrain";
break;
default:
case 6:
switch (sel) {
case 0:
- gen_helper_mtc0_wired(t0);
+ gen_helper_mtc0_wired(arg);
rn = "Wired";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf0(t0);
+ gen_helper_mtc0_srsconf0(arg);
rn = "SRSConf0";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf1(t0);
+ gen_helper_mtc0_srsconf1(arg);
rn = "SRSConf1";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf2(t0);
+ gen_helper_mtc0_srsconf2(arg);
rn = "SRSConf2";
break;
case 4:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf3(t0);
+ gen_helper_mtc0_srsconf3(arg);
rn = "SRSConf3";
break;
case 5:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsconf4(t0);
+ gen_helper_mtc0_srsconf4(arg);
rn = "SRSConf4";
break;
default:
switch (sel) {
case 0:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_hwrena(t0);
+ gen_helper_mtc0_hwrena(arg);
rn = "HWREna";
break;
default:
case 9:
switch (sel) {
case 0:
- gen_helper_mtc0_count(t0);
+ gen_helper_mtc0_count(arg);
rn = "Count";
break;
/* 6,7 are implementation dependent */
case 10:
switch (sel) {
case 0:
- gen_helper_mtc0_entryhi(t0);
+ gen_helper_mtc0_entryhi(arg);
rn = "EntryHi";
break;
default:
case 11:
switch (sel) {
case 0:
- gen_helper_mtc0_compare(t0);
+ gen_helper_mtc0_compare(arg);
rn = "Compare";
break;
/* 6,7 are implementation dependent */
case 12:
switch (sel) {
case 0:
- gen_helper_mtc0_status(t0);
+ save_cpu_state(ctx, 1);
+ gen_helper_mtc0_status(arg);
/* BS_STOP isn't good enough here, hflags may have changed. */
gen_save_pc(ctx->pc + 4);
ctx->bstate = BS_EXCP;
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_intctl(t0);
+ gen_helper_mtc0_intctl(arg);
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "IntCtl";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_srsctl(t0);
+ gen_helper_mtc0_srsctl(arg);
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "SRSCtl";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mtc0_store32(t0, offsetof(CPUState, CP0_SRSMap));
+ gen_mtc0_store32(arg, offsetof(CPUState, CP0_SRSMap));
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "SRSMap";
case 13:
switch (sel) {
case 0:
- gen_helper_mtc0_cause(t0);
+ save_cpu_state(ctx, 1);
+ gen_helper_mtc0_cause(arg);
rn = "Cause";
break;
default:
goto die;
}
- /* Stop translation as we may have switched the execution mode */
- ctx->bstate = BS_STOP;
break;
case 14:
switch (sel) {
case 0:
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, CP0_EPC));
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUState, CP0_EPC));
rn = "EPC";
break;
default:
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_helper_mtc0_ebase(t0);
+ gen_helper_mtc0_ebase(arg);
rn = "EBase";
break;
default:
case 16:
switch (sel) {
case 0:
- gen_helper_mtc0_config0(t0);
+ gen_helper_mtc0_config0(arg);
rn = "Config";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
break;
case 1:
- /* ignored */
+ /* ignored, read only */
rn = "Config1";
break;
case 2:
- gen_helper_mtc0_config2(t0);
+ gen_helper_mtc0_config2(arg);
rn = "Config2";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
case 17:
switch (sel) {
case 0:
- /* ignored */
+ gen_helper_mtc0_lladdr(arg);
rn = "LLAddr";
break;
default:
case 18:
switch (sel) {
case 0 ... 7:
- gen_helper_1i(mtc0_watchlo, t0, sel);
+ gen_helper_1i(mtc0_watchlo, arg, sel);
rn = "WatchLo";
break;
default:
case 19:
switch (sel) {
case 0 ... 7:
- gen_helper_1i(mtc0_watchhi, t0, sel);
+ gen_helper_1i(mtc0_watchhi, arg, sel);
rn = "WatchHi";
break;
default:
switch (sel) {
case 0:
check_insn(env, ctx, ISA_MIPS3);
- gen_helper_mtc0_xcontext(t0);
+ gen_helper_mtc0_xcontext(arg);
rn = "XContext";
break;
default:
/* Officially reserved, but sel 0 is used for R1x000 framemask */
switch (sel) {
case 0:
- gen_helper_mtc0_framemask(t0);
+ gen_helper_mtc0_framemask(arg);
rn = "Framemask";
break;
default:
case 23:
switch (sel) {
case 0:
- gen_helper_mtc0_debug(t0); /* EJTAG support */
+ gen_helper_mtc0_debug(arg); /* EJTAG support */
/* BS_STOP isn't good enough here, hflags may have changed. */
gen_save_pc(ctx->pc + 4);
ctx->bstate = BS_EXCP;
rn = "Debug";
break;
case 1:
-// gen_helper_mtc0_tracecontrol(t0); /* PDtrace support */
+// gen_helper_mtc0_tracecontrol(arg); /* PDtrace support */
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "TraceControl";
// break;
case 2:
-// gen_helper_mtc0_tracecontrol2(t0); /* PDtrace support */
+// gen_helper_mtc0_tracecontrol2(arg); /* PDtrace support */
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "TraceControl2";
// break;
case 3:
-// gen_helper_mtc0_usertracedata(t0); /* PDtrace support */
+// gen_helper_mtc0_usertracedata(arg); /* PDtrace support */
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "UserTraceData";
// break;
case 4:
-// gen_helper_mtc0_tracebpc(t0); /* PDtrace support */
+// gen_helper_mtc0_tracebpc(arg); /* PDtrace support */
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "TraceBPC";
switch (sel) {
case 0:
/* EJTAG support */
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, CP0_DEPC));
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUState, CP0_DEPC));
rn = "DEPC";
break;
default:
case 25:
switch (sel) {
case 0:
- gen_helper_mtc0_performance0(t0);
+ gen_helper_mtc0_performance0(arg);
rn = "Performance0";
break;
case 1:
-// gen_helper_mtc0_performance1(t0);
+// gen_helper_mtc0_performance1(arg);
rn = "Performance1";
// break;
case 2:
-// gen_helper_mtc0_performance2(t0);
+// gen_helper_mtc0_performance2(arg);
rn = "Performance2";
// break;
case 3:
-// gen_helper_mtc0_performance3(t0);
+// gen_helper_mtc0_performance3(arg);
rn = "Performance3";
// break;
case 4:
-// gen_helper_mtc0_performance4(t0);
+// gen_helper_mtc0_performance4(arg);
rn = "Performance4";
// break;
case 5:
-// gen_helper_mtc0_performance5(t0);
+// gen_helper_mtc0_performance5(arg);
rn = "Performance5";
// break;
case 6:
-// gen_helper_mtc0_performance6(t0);
+// gen_helper_mtc0_performance6(arg);
rn = "Performance6";
// break;
case 7:
-// gen_helper_mtc0_performance7(t0);
+// gen_helper_mtc0_performance7(arg);
rn = "Performance7";
// break;
default:
case 2:
case 4:
case 6:
- gen_helper_mtc0_taglo(t0);
+ gen_helper_mtc0_taglo(arg);
rn = "TagLo";
break;
case 1:
case 3:
case 5:
case 7:
- gen_helper_mtc0_datalo(t0);
+ gen_helper_mtc0_datalo(arg);
rn = "DataLo";
break;
default:
case 2:
case 4:
case 6:
- gen_helper_mtc0_taghi(t0);
+ gen_helper_mtc0_taghi(arg);
rn = "TagHi";
break;
case 1:
case 3:
case 5:
case 7:
- gen_helper_mtc0_datahi(t0);
+ gen_helper_mtc0_datahi(arg);
rn = "DataHi";
break;
default:
case 30:
switch (sel) {
case 0:
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, CP0_ErrorEPC));
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUState, CP0_ErrorEPC));
rn = "ErrorEPC";
break;
default:
switch (sel) {
case 0:
/* EJTAG support */
- gen_mtc0_store32(t0, offsetof(CPUState, CP0_DESAVE));
+ gen_mtc0_store32(arg, offsetof(CPUState, CP0_DESAVE));
rn = "DESAVE";
break;
default:
/* Treat as NOP. */
return;
}
- {
- TCGv t0 = tcg_temp_local_new();
-
- gen_mfc0(env, ctx, t0, rd, ctx->opcode & 0x7);
- gen_store_gpr(t0, rt);
- tcg_temp_free(t0);
- }
+ gen_mfc0(env, ctx, cpu_gpr[rt], rd, ctx->opcode & 0x7);
opn = "mfc0";
break;
case OPC_MTC0:
{
- TCGv t0 = tcg_temp_local_new();
+ TCGv t0 = tcg_temp_new();
gen_load_gpr(t0, rt);
- save_cpu_state(ctx, 1);
gen_mtc0(env, ctx, t0, rd, ctx->opcode & 0x7);
tcg_temp_free(t0);
}
/* Treat as NOP. */
return;
}
- {
- TCGv t0 = tcg_temp_local_new();
-
- gen_dmfc0(env, ctx, t0, rd, ctx->opcode & 0x7);
- gen_store_gpr(t0, rt);
- tcg_temp_free(t0);
- }
+ gen_dmfc0(env, ctx, cpu_gpr[rt], rd, ctx->opcode & 0x7);
opn = "dmfc0";
break;
case OPC_DMTC0:
check_insn(env, ctx, ISA_MIPS3);
{
- TCGv t0 = tcg_temp_local_new();
+ TCGv t0 = tcg_temp_new();
gen_load_gpr(t0, rt);
- save_cpu_state(ctx, 1);
gen_dmtc0(env, ctx, t0, rd, ctx->opcode & 0x7);
tcg_temp_free(t0);
}
case OPC_ERET:
opn = "eret";
check_insn(env, ctx, ISA_MIPS2);
- save_cpu_state(ctx, 1);
gen_helper_eret();
ctx->bstate = BS_EXCP;
break;
MIPS_INVAL(opn);
generate_exception(ctx, EXCP_RI);
} else {
- save_cpu_state(ctx, 1);
gen_helper_deret();
ctx->bstate = BS_EXCP;
}
switch (op) {
case OPC_BC1F:
- {
- int l1 = gen_new_label();
- int l2 = gen_new_label();
-
- get_fp_cond(t0);
- tcg_gen_andi_i32(t0, t0, 0x1 << cc);
- tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
- tcg_gen_movi_tl(bcond, 0);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_movi_tl(bcond, 1);
- gen_set_label(l2);
- }
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_not_i32(t0, t0);
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
opn = "bc1f";
goto not_likely;
case OPC_BC1FL:
- {
- int l1 = gen_new_label();
- int l2 = gen_new_label();
-
- get_fp_cond(t0);
- tcg_gen_andi_i32(t0, t0, 0x1 << cc);
- tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
- tcg_gen_movi_tl(bcond, 0);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_movi_tl(bcond, 1);
- gen_set_label(l2);
- }
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_not_i32(t0, t0);
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
opn = "bc1fl";
goto likely;
case OPC_BC1T:
- {
- int l1 = gen_new_label();
- int l2 = gen_new_label();
-
- get_fp_cond(t0);
- tcg_gen_andi_i32(t0, t0, 0x1 << cc);
- tcg_gen_brcondi_i32(TCG_COND_NE, t0, 0, l1);
- tcg_gen_movi_tl(bcond, 0);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_movi_tl(bcond, 1);
- gen_set_label(l2);
- }
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
opn = "bc1t";
goto not_likely;
case OPC_BC1TL:
- {
- int l1 = gen_new_label();
- int l2 = gen_new_label();
-
- get_fp_cond(t0);
- tcg_gen_andi_i32(t0, t0, 0x1 << cc);
- tcg_gen_brcondi_i32(TCG_COND_NE, t0, 0, l1);
- tcg_gen_movi_tl(bcond, 0);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_movi_tl(bcond, 1);
- gen_set_label(l2);
- }
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
opn = "bc1tl";
likely:
ctx->hflags |= MIPS_HFLAG_BL;
break;
case OPC_BC1FANY2:
{
- int l1 = gen_new_label();
- int l2 = gen_new_label();
-
- get_fp_cond(t0);
- tcg_gen_andi_i32(t0, t0, 0x3 << cc);
- tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
- tcg_gen_movi_tl(bcond, 0);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_movi_tl(bcond, 1);
- gen_set_label(l2);
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc+1));
+ tcg_gen_nor_i32(t0, t0, t1);
+ tcg_temp_free_i32(t1);
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
}
opn = "bc1any2f";
goto not_likely;
case OPC_BC1TANY2:
{
- int l1 = gen_new_label();
- int l2 = gen_new_label();
-
- get_fp_cond(t0);
- tcg_gen_andi_i32(t0, t0, 0x3 << cc);
- tcg_gen_brcondi_i32(TCG_COND_NE, t0, 0, l1);
- tcg_gen_movi_tl(bcond, 0);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_movi_tl(bcond, 1);
- gen_set_label(l2);
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc+1));
+ tcg_gen_or_i32(t0, t0, t1);
+ tcg_temp_free_i32(t1);
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
}
opn = "bc1any2t";
goto not_likely;
case OPC_BC1FANY4:
{
- int l1 = gen_new_label();
- int l2 = gen_new_label();
-
- get_fp_cond(t0);
- tcg_gen_andi_i32(t0, t0, 0xf << cc);
- tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
- tcg_gen_movi_tl(bcond, 0);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_movi_tl(bcond, 1);
- gen_set_label(l2);
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc+1));
+ tcg_gen_or_i32(t0, t0, t1);
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc+2));
+ tcg_gen_or_i32(t0, t0, t1);
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc+3));
+ tcg_gen_nor_i32(t0, t0, t1);
+ tcg_temp_free_i32(t1);
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
}
opn = "bc1any4f";
goto not_likely;
case OPC_BC1TANY4:
{
- int l1 = gen_new_label();
- int l2 = gen_new_label();
-
- get_fp_cond(t0);
- tcg_gen_andi_i32(t0, t0, 0xf << cc);
- tcg_gen_brcondi_i32(TCG_COND_NE, t0, 0, l1);
- tcg_gen_movi_tl(bcond, 0);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_movi_tl(bcond, 1);
- gen_set_label(l2);
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc+1));
+ tcg_gen_or_i32(t0, t0, t1);
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc+2));
+ tcg_gen_or_i32(t0, t0, t1);
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc+3));
+ tcg_gen_or_i32(t0, t0, t1);
+ tcg_temp_free_i32(t1);
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
}
opn = "bc1any4t";
not_likely:
static void gen_cp1 (DisasContext *ctx, uint32_t opc, int rt, int fs)
{
const char *opn = "cp1 move";
- TCGv t0 = tcg_temp_local_new();
+ TCGv t0 = tcg_temp_new();
switch (opc) {
case OPC_MFC1:
gen_helper_1i(ctc1, t0, fs);
opn = "ctc1";
break;
+#if defined(TARGET_MIPS64)
case OPC_DMFC1:
- {
- TCGv_i64 fp0 = tcg_temp_new_i64();
-
- gen_load_fpr64(ctx, fp0, fs);
- tcg_gen_trunc_i64_tl(t0, fp0);
- tcg_temp_free_i64(fp0);
- }
+ gen_load_fpr64(ctx, t0, fs);
gen_store_gpr(t0, rt);
opn = "dmfc1";
break;
case OPC_DMTC1:
gen_load_gpr(t0, rt);
- {
- TCGv_i64 fp0 = tcg_temp_new_i64();
-
- tcg_gen_extu_tl_i64(fp0, t0);
- gen_store_fpr64(ctx, fp0, fs);
- tcg_temp_free_i64(fp0);
- }
+ gen_store_fpr64(ctx, t0, fs);
opn = "dmtc1";
break;
+#endif
case OPC_MFHC1:
{
TCGv_i32 fp0 = tcg_temp_new_i32();
static void gen_movci (DisasContext *ctx, int rd, int rs, int cc, int tf)
{
- int l1 = gen_new_label();
- uint32_t ccbit;
+ int l1;
TCGCond cond;
- TCGv t0 = tcg_temp_local_new();
- TCGv_i32 r_tmp = tcg_temp_new_i32();
+ TCGv_i32 t0;
+
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
- if (cc)
- ccbit = 1 << (24 + cc);
- else
- ccbit = 1 << 23;
if (tf)
cond = TCG_COND_EQ;
else
cond = TCG_COND_NE;
- gen_load_gpr(t0, rd);
- tcg_gen_andi_i32(r_tmp, fpu_fcr31, ccbit);
- tcg_gen_brcondi_i32(cond, r_tmp, 0, l1);
- tcg_temp_free_i32(r_tmp);
- gen_load_gpr(t0, rs);
+ l1 = gen_new_label();
+ t0 = tcg_temp_new_i32();
+ tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc));
+ tcg_gen_brcondi_i32(cond, t0, 0, l1);
+ tcg_temp_free_i32(t0);
+ if (rs == 0) {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ } else {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ }
gen_set_label(l1);
- gen_store_gpr(t0, rd);
- tcg_temp_free(t0);
}
static inline void gen_movcf_s (int fs, int fd, int cc, int tf)
{
- uint32_t ccbit;
int cond;
- TCGv_i32 r_tmp1 = tcg_temp_new_i32();
- TCGv_i32 fp0 = tcg_temp_local_new_i32();
+ TCGv_i32 t0 = tcg_temp_new_i32();
int l1 = gen_new_label();
- if (cc)
- ccbit = 1 << (24 + cc);
- else
- ccbit = 1 << 23;
-
if (tf)
cond = TCG_COND_EQ;
else
cond = TCG_COND_NE;
- gen_load_fpr32(fp0, fd);
- tcg_gen_andi_i32(r_tmp1, fpu_fcr31, ccbit);
- tcg_gen_brcondi_i32(cond, r_tmp1, 0, l1);
- tcg_temp_free_i32(r_tmp1);
- gen_load_fpr32(fp0, fs);
+ tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc));
+ tcg_gen_brcondi_i32(cond, t0, 0, l1);
+ gen_load_fpr32(t0, fs);
+ gen_store_fpr32(t0, fd);
gen_set_label(l1);
- gen_store_fpr32(fp0, fd);
- tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(t0);
}
static inline void gen_movcf_d (DisasContext *ctx, int fs, int fd, int cc, int tf)
{
- uint32_t ccbit;
int cond;
- TCGv_i32 r_tmp1 = tcg_temp_new_i32();
- TCGv_i64 fp0 = tcg_temp_local_new_i64();
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i64 fp0;
int l1 = gen_new_label();
- if (cc)
- ccbit = 1 << (24 + cc);
- else
- ccbit = 1 << 23;
-
if (tf)
cond = TCG_COND_EQ;
else
cond = TCG_COND_NE;
- gen_load_fpr64(ctx, fp0, fd);
- tcg_gen_andi_i32(r_tmp1, fpu_fcr31, ccbit);
- tcg_gen_brcondi_i32(cond, r_tmp1, 0, l1);
- tcg_temp_free_i32(r_tmp1);
+ tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc));
+ tcg_gen_brcondi_i32(cond, t0, 0, l1);
+ tcg_temp_free_i32(t0);
+ fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_set_label(l1);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
+ gen_set_label(l1);
}
static inline void gen_movcf_ps (int fs, int fd, int cc, int tf)
{
- uint32_t ccbit1, ccbit2;
int cond;
- TCGv_i32 r_tmp1 = tcg_temp_new_i32();
- TCGv_i32 fp0 = tcg_temp_local_new_i32();
+ TCGv_i32 t0 = tcg_temp_new_i32();
int l1 = gen_new_label();
int l2 = gen_new_label();
- if (cc) {
- ccbit1 = 1 << (24 + cc);
- ccbit2 = 1 << (25 + cc);
- } else {
- ccbit1 = 1 << 23;
- ccbit2 = 1 << 25;
- }
-
if (tf)
cond = TCG_COND_EQ;
else
cond = TCG_COND_NE;
- gen_load_fpr32(fp0, fd);
- tcg_gen_andi_i32(r_tmp1, fpu_fcr31, ccbit1);
- tcg_gen_brcondi_i32(cond, r_tmp1, 0, l1);
- gen_load_fpr32(fp0, fs);
+ tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc));
+ tcg_gen_brcondi_i32(cond, t0, 0, l1);
+ gen_load_fpr32(t0, fs);
+ gen_store_fpr32(t0, fd);
gen_set_label(l1);
- gen_store_fpr32(fp0, fd);
- gen_load_fpr32h(fp0, fd);
- tcg_gen_andi_i32(r_tmp1, fpu_fcr31, ccbit2);
- tcg_gen_brcondi_i32(cond, r_tmp1, 0, l2);
- gen_load_fpr32h(fp0, fs);
+ tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc+1));
+ tcg_gen_brcondi_i32(cond, t0, 0, l2);
+ gen_load_fpr32h(t0, fs);
+ gen_store_fpr32h(t0, fd);
+ tcg_temp_free_i32(t0);
gen_set_label(l2);
- gen_store_fpr32h(fp0, fd);
-
- tcg_temp_free_i32(r_tmp1);
- tcg_temp_free_i32(fp0);
}
case FOP(18, 16):
{
int l1 = gen_new_label();
- TCGv t0 = tcg_temp_new();
- TCGv_i32 fp0 = tcg_temp_local_new_i32();
+ TCGv_i32 fp0;
- gen_load_gpr(t0, ft);
- tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
+ if (ft != 0) {
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_gpr[ft], 0, l1);
+ }
+ fp0 = tcg_temp_new_i32();
gen_load_fpr32(fp0, fs);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
gen_set_label(l1);
- tcg_temp_free(t0);
}
opn = "movz.s";
break;
case FOP(19, 16):
{
int l1 = gen_new_label();
- TCGv t0 = tcg_temp_new();
- TCGv_i32 fp0 = tcg_temp_local_new_i32();
-
- gen_load_gpr(t0, ft);
- tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
- gen_load_fpr32(fp0, fs);
- gen_store_fpr32(fp0, fd);
- tcg_temp_free_i32(fp0);
- gen_set_label(l1);
- tcg_temp_free(t0);
+ TCGv_i32 fp0;
+
+ if (ft != 0) {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[ft], 0, l1);
+ fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(fp0, fs);
+ gen_store_fpr32(fp0, fd);
+ tcg_temp_free_i32(fp0);
+ gen_set_label(l1);
+ }
}
opn = "movn.s";
break;
case FOP(18, 17):
{
int l1 = gen_new_label();
- TCGv t0 = tcg_temp_new();
- TCGv_i64 fp0 = tcg_temp_local_new_i64();
+ TCGv_i64 fp0;
- gen_load_gpr(t0, ft);
- tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
+ if (ft != 0) {
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_gpr[ft], 0, l1);
+ }
+ fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
gen_set_label(l1);
- tcg_temp_free(t0);
}
opn = "movz.d";
break;
case FOP(19, 17):
{
int l1 = gen_new_label();
- TCGv t0 = tcg_temp_new();
- TCGv_i64 fp0 = tcg_temp_local_new_i64();
-
- gen_load_gpr(t0, ft);
- tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
- gen_load_fpr64(ctx, fp0, fs);
- gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
- gen_set_label(l1);
- tcg_temp_free(t0);
+ TCGv_i64 fp0;
+
+ if (ft != 0) {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[ft], 0, l1);
+ fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ gen_set_label(l1);
+ }
}
opn = "movn.d";
break;
check_cp1_64bitmode(ctx);
{
int l1 = gen_new_label();
- TCGv t0 = tcg_temp_new();
- TCGv_i32 fp0 = tcg_temp_local_new_i32();
- TCGv_i32 fph0 = tcg_temp_local_new_i32();
+ TCGv_i64 fp0;
- gen_load_gpr(t0, ft);
- tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
- gen_load_fpr32(fp0, fs);
- gen_load_fpr32h(fph0, fs);
- gen_store_fpr32(fp0, fd);
- gen_store_fpr32h(fph0, fd);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fph0);
+ if (ft != 0)
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_gpr[ft], 0, l1);
+ fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
gen_set_label(l1);
- tcg_temp_free(t0);
}
opn = "movz.ps";
break;
check_cp1_64bitmode(ctx);
{
int l1 = gen_new_label();
- TCGv t0 = tcg_temp_new();
- TCGv_i32 fp0 = tcg_temp_local_new_i32();
- TCGv_i32 fph0 = tcg_temp_local_new_i32();
-
- gen_load_gpr(t0, ft);
- tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
- gen_load_fpr32(fp0, fs);
- gen_load_fpr32h(fph0, fs);
- gen_store_fpr32(fp0, fd);
- gen_store_fpr32h(fph0, fd);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fph0);
- gen_set_label(l1);
- tcg_temp_free(t0);
+ TCGv_i64 fp0;
+
+ if (ft != 0) {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[ft], 0, l1);
+ fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ gen_set_label(l1);
+ }
}
opn = "movn.ps";
break;
{
const char *opn = "extended float load/store";
int store = 0;
- TCGv t0 = tcg_temp_local_new();
- TCGv t1 = tcg_temp_local_new();
+ TCGv t0 = tcg_temp_new();
if (base == 0) {
gen_load_gpr(t0, index);
gen_load_gpr(t0, base);
} else {
gen_load_gpr(t0, index);
- gen_op_addr_add(ctx, t0, cpu_gpr[base]);
+ gen_op_addr_add(ctx, t0, cpu_gpr[base], t0);
}
/* Don't do NOP if destination is zero: we must perform the actual
memory access. */
+ save_cpu_state(ctx, 0);
switch (opc) {
case OPC_LWXC1:
check_cop1x(ctx);
{
TCGv_i32 fp0 = tcg_temp_new_i32();
- tcg_gen_qemu_ld32s(t1, t0, ctx->mem_idx);
- tcg_gen_trunc_tl_i32(fp0, t1);
+ tcg_gen_qemu_ld32s(t0, t0, ctx->mem_idx);
+ tcg_gen_trunc_tl_i32(fp0, t0);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
}
check_cop1x(ctx);
{
TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv t1 = tcg_temp_new();
gen_load_fpr32(fp0, fs);
tcg_gen_extu_i32_tl(t1, fp0);
tcg_gen_qemu_st32(t1, t0, ctx->mem_idx);
tcg_temp_free_i32(fp0);
+ tcg_temp_free(t1);
}
opn = "swxc1";
store = 1;
opn = "suxc1";
store = 1;
break;
- default:
- MIPS_INVAL(opn);
- generate_exception(ctx, EXCP_RI);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- return;
}
tcg_temp_free(t0);
- tcg_temp_free(t1);
MIPS_DEBUG("%s %s, %s(%s)", opn, fregnames[store ? fs : fd],
regnames[index], regnames[base]);
}
check_cp1_64bitmode(ctx);
{
TCGv t0 = tcg_temp_local_new();
- TCGv_i32 fp0 = tcg_temp_local_new_i32();
- TCGv_i32 fph0 = tcg_temp_local_new_i32();
- TCGv_i32 fp1 = tcg_temp_local_new_i32();
- TCGv_i32 fph1 = tcg_temp_local_new_i32();
+ TCGv_i32 fp = tcg_temp_new_i32();
+ TCGv_i32 fph = tcg_temp_new_i32();
int l1 = gen_new_label();
int l2 = gen_new_label();
gen_load_gpr(t0, fr);
tcg_gen_andi_tl(t0, t0, 0x7);
- gen_load_fpr32(fp0, fs);
- gen_load_fpr32h(fph0, fs);
- gen_load_fpr32(fp1, ft);
- gen_load_fpr32h(fph1, ft);
tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
- gen_store_fpr32(fp0, fd);
- gen_store_fpr32h(fph0, fd);
+ gen_load_fpr32(fp, fs);
+ gen_load_fpr32h(fph, fs);
+ gen_store_fpr32(fp, fd);
+ gen_store_fpr32h(fph, fd);
tcg_gen_br(l2);
gen_set_label(l1);
tcg_gen_brcondi_tl(TCG_COND_NE, t0, 4, l2);
tcg_temp_free(t0);
#ifdef TARGET_WORDS_BIGENDIAN
- gen_store_fpr32(fph1, fd);
- gen_store_fpr32h(fp0, fd);
+ gen_load_fpr32(fp, fs);
+ gen_load_fpr32h(fph, ft);
+ gen_store_fpr32h(fp, fd);
+ gen_store_fpr32(fph, fd);
#else
- gen_store_fpr32(fph0, fd);
- gen_store_fpr32h(fp1, fd);
+ gen_load_fpr32h(fph, fs);
+ gen_load_fpr32(fp, ft);
+ gen_store_fpr32(fph, fd);
+ gen_store_fpr32h(fp, fd);
#endif
gen_set_label(l2);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fph0);
- tcg_temp_free_i32(fp1);
- tcg_temp_free_i32(fph1);
+ tcg_temp_free_i32(fp);
+ tcg_temp_free_i32(fph);
}
opn = "alnv.ps";
break;
gen_store_fpr64(ctx, fp2, fd);
tcg_temp_free_i64(fp2);
}
- opn = "nmsub.d";
+ opn = "nmsub.d";
+ break;
+ case OPC_NMSUB_PS:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fr);
+ gen_helper_float_nmulsub_ps(fp2, fp0, fp1, fp2);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ }
+ opn = "nmsub.ps";
+ break;
+ default:
+ MIPS_INVAL(opn);
+ generate_exception (ctx, EXCP_RI);
+ return;
+ }
+ MIPS_DEBUG("%s %s, %s, %s, %s", opn, fregnames[fd], fregnames[fr],
+ fregnames[fs], fregnames[ft]);
+}
+
+static void handle_delay_slot (CPUState *env, DisasContext *ctx,
+ int insn_bytes)
+{
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+ int proc_hflags = ctx->hflags & MIPS_HFLAG_BMASK;
+ /* Branches completion */
+ ctx->hflags &= ~MIPS_HFLAG_BMASK;
+ ctx->bstate = BS_BRANCH;
+ save_cpu_state(ctx, 0);
+ /* FIXME: Need to clear can_do_io. */
+ switch (proc_hflags & MIPS_HFLAG_BMASK_BASE) {
+ case MIPS_HFLAG_B:
+ /* unconditional branch */
+ MIPS_DEBUG("unconditional branch");
+ if (proc_hflags & MIPS_HFLAG_BX) {
+ tcg_gen_xori_i32(hflags, hflags, MIPS_HFLAG_M16);
+ }
+ gen_goto_tb(ctx, 0, ctx->btarget);
+ break;
+ case MIPS_HFLAG_BL:
+ /* blikely taken case */
+ MIPS_DEBUG("blikely branch taken");
+ gen_goto_tb(ctx, 0, ctx->btarget);
+ break;
+ case MIPS_HFLAG_BC:
+ /* Conditional branch */
+ MIPS_DEBUG("conditional branch");
+ {
+ int l1 = gen_new_label();
+
+ tcg_gen_brcondi_tl(TCG_COND_NE, bcond, 0, l1);
+ gen_goto_tb(ctx, 1, ctx->pc + insn_bytes);
+ gen_set_label(l1);
+ gen_goto_tb(ctx, 0, ctx->btarget);
+ }
+ break;
+ case MIPS_HFLAG_BR:
+ /* unconditional branch to register */
+ MIPS_DEBUG("branch to register");
+ if (env->insn_flags & ASE_MIPS16) {
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_andi_tl(t0, btarget, 0x1);
+ tcg_gen_trunc_tl_i32(t1, t0);
+ tcg_temp_free(t0);
+ tcg_gen_andi_i32(hflags, hflags, ~(uint32_t)MIPS_HFLAG_M16);
+ tcg_gen_shli_i32(t1, t1, MIPS_HFLAG_M16_SHIFT);
+ tcg_gen_or_i32(hflags, hflags, t1);
+ tcg_temp_free_i32(t1);
+
+ tcg_gen_andi_tl(cpu_PC, btarget, ~(target_ulong)0x1);
+ } else {
+ tcg_gen_mov_tl(cpu_PC, btarget);
+ }
+ if (ctx->singlestep_enabled) {
+ save_cpu_state(ctx, 0);
+ gen_helper_0i(raise_exception, EXCP_DEBUG);
+ }
+ tcg_gen_exit_tb(0);
+ break;
+ default:
+ MIPS_DEBUG("unknown branch");
+ break;
+ }
+ }
+}
+
+/* ISA extensions (ASEs) */
+/* MIPS16 extension to MIPS32 */
+
+/* MIPS16 major opcodes */
+enum {
+ M16_OPC_ADDIUSP = 0x00,
+ M16_OPC_ADDIUPC = 0x01,
+ M16_OPC_B = 0x02,
+ M16_OPC_JAL = 0x03,
+ M16_OPC_BEQZ = 0x04,
+ M16_OPC_BNEQZ = 0x05,
+ M16_OPC_SHIFT = 0x06,
+ M16_OPC_LD = 0x07,
+ M16_OPC_RRIA = 0x08,
+ M16_OPC_ADDIU8 = 0x09,
+ M16_OPC_SLTI = 0x0a,
+ M16_OPC_SLTIU = 0x0b,
+ M16_OPC_I8 = 0x0c,
+ M16_OPC_LI = 0x0d,
+ M16_OPC_CMPI = 0x0e,
+ M16_OPC_SD = 0x0f,
+ M16_OPC_LB = 0x10,
+ M16_OPC_LH = 0x11,
+ M16_OPC_LWSP = 0x12,
+ M16_OPC_LW = 0x13,
+ M16_OPC_LBU = 0x14,
+ M16_OPC_LHU = 0x15,
+ M16_OPC_LWPC = 0x16,
+ M16_OPC_LWU = 0x17,
+ M16_OPC_SB = 0x18,
+ M16_OPC_SH = 0x19,
+ M16_OPC_SWSP = 0x1a,
+ M16_OPC_SW = 0x1b,
+ M16_OPC_RRR = 0x1c,
+ M16_OPC_RR = 0x1d,
+ M16_OPC_EXTEND = 0x1e,
+ M16_OPC_I64 = 0x1f
+};
+
+/* I8 funct field */
+enum {
+ I8_BTEQZ = 0x0,
+ I8_BTNEZ = 0x1,
+ I8_SWRASP = 0x2,
+ I8_ADJSP = 0x3,
+ I8_SVRS = 0x4,
+ I8_MOV32R = 0x5,
+ I8_MOVR32 = 0x7
+};
+
+/* RRR f field */
+enum {
+ RRR_DADDU = 0x0,
+ RRR_ADDU = 0x1,
+ RRR_DSUBU = 0x2,
+ RRR_SUBU = 0x3
+};
+
+/* RR funct field */
+enum {
+ RR_JR = 0x00,
+ RR_SDBBP = 0x01,
+ RR_SLT = 0x02,
+ RR_SLTU = 0x03,
+ RR_SLLV = 0x04,
+ RR_BREAK = 0x05,
+ RR_SRLV = 0x06,
+ RR_SRAV = 0x07,
+ RR_DSRL = 0x08,
+ RR_CMP = 0x0a,
+ RR_NEG = 0x0b,
+ RR_AND = 0x0c,
+ RR_OR = 0x0d,
+ RR_XOR = 0x0e,
+ RR_NOT = 0x0f,
+ RR_MFHI = 0x10,
+ RR_CNVT = 0x11,
+ RR_MFLO = 0x12,
+ RR_DSRA = 0x13,
+ RR_DSLLV = 0x14,
+ RR_DSRLV = 0x16,
+ RR_DSRAV = 0x17,
+ RR_MULT = 0x18,
+ RR_MULTU = 0x19,
+ RR_DIV = 0x1a,
+ RR_DIVU = 0x1b,
+ RR_DMULT = 0x1c,
+ RR_DMULTU = 0x1d,
+ RR_DDIV = 0x1e,
+ RR_DDIVU = 0x1f
+};
+
+/* I64 funct field */
+enum {
+ I64_LDSP = 0x0,
+ I64_SDSP = 0x1,
+ I64_SDRASP = 0x2,
+ I64_DADJSP = 0x3,
+ I64_LDPC = 0x4,
+ I64_DADDIU5 = 0x5,
+ I64_DADDIUPC = 0x6,
+ I64_DADDIUSP = 0x7
+};
+
+/* RR ry field for CNVT */
+enum {
+ RR_RY_CNVT_ZEB = 0x0,
+ RR_RY_CNVT_ZEH = 0x1,
+ RR_RY_CNVT_ZEW = 0x2,
+ RR_RY_CNVT_SEB = 0x4,
+ RR_RY_CNVT_SEH = 0x5,
+ RR_RY_CNVT_SEW = 0x6,
+};
+
+static int xlat (int r)
+{
+ static int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
+
+ return map[r];
+}
+
+static void gen_mips16_save (DisasContext *ctx,
+ int xsregs, int aregs,
+ int do_ra, int do_s0, int do_s1,
+ int framesize)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int args, astatic;
+
+ switch (aregs) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 11:
+ args = 0;
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ args = 1;
+ break;
+ case 8:
+ case 9:
+ case 10:
+ args = 2;
+ break;
+ case 12:
+ case 13:
+ args = 3;
+ break;
+ case 14:
+ args = 4;
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ return;
+ }
+
+ switch (args) {
+ case 4:
+ gen_base_offset_addr(ctx, t0, 29, 12);
+ gen_load_gpr(t1, 7);
+ op_ldst_sw(t1, t0, ctx);
+ /* Fall through */
+ case 3:
+ gen_base_offset_addr(ctx, t0, 29, 8);
+ gen_load_gpr(t1, 6);
+ op_ldst_sw(t1, t0, ctx);
+ /* Fall through */
+ case 2:
+ gen_base_offset_addr(ctx, t0, 29, 4);
+ gen_load_gpr(t1, 5);
+ op_ldst_sw(t1, t0, ctx);
+ /* Fall through */
+ case 1:
+ gen_base_offset_addr(ctx, t0, 29, 0);
+ gen_load_gpr(t1, 4);
+ op_ldst_sw(t1, t0, ctx);
+ }
+
+ gen_load_gpr(t0, 29);
+
+#define DECR_AND_STORE(reg) do { \
+ tcg_gen_subi_tl(t0, t0, 4); \
+ gen_load_gpr(t1, reg); \
+ op_ldst_sw(t1, t0, ctx); \
+ } while (0)
+
+ if (do_ra) {
+ DECR_AND_STORE(31);
+ }
+
+ switch (xsregs) {
+ case 7:
+ DECR_AND_STORE(30);
+ /* Fall through */
+ case 6:
+ DECR_AND_STORE(23);
+ /* Fall through */
+ case 5:
+ DECR_AND_STORE(22);
+ /* Fall through */
+ case 4:
+ DECR_AND_STORE(21);
+ /* Fall through */
+ case 3:
+ DECR_AND_STORE(20);
+ /* Fall through */
+ case 2:
+ DECR_AND_STORE(19);
+ /* Fall through */
+ case 1:
+ DECR_AND_STORE(18);
+ }
+
+ if (do_s1) {
+ DECR_AND_STORE(17);
+ }
+ if (do_s0) {
+ DECR_AND_STORE(16);
+ }
+
+ switch (aregs) {
+ case 0:
+ case 4:
+ case 8:
+ case 12:
+ case 14:
+ astatic = 0;
+ break;
+ case 1:
+ case 5:
+ case 9:
+ case 13:
+ astatic = 1;
+ break;
+ case 2:
+ case 6:
+ case 10:
+ astatic = 2;
+ break;
+ case 3:
+ case 7:
+ astatic = 3;
+ break;
+ case 11:
+ astatic = 4;
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ return;
+ }
+
+ if (astatic > 0) {
+ DECR_AND_STORE(7);
+ if (astatic > 1) {
+ DECR_AND_STORE(6);
+ if (astatic > 2) {
+ DECR_AND_STORE(5);
+ if (astatic > 3) {
+ DECR_AND_STORE(4);
+ }
+ }
+ }
+ }
+#undef DECR_AND_STORE
+
+ tcg_gen_subi_tl(cpu_gpr[29], cpu_gpr[29], framesize);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_mips16_restore (DisasContext *ctx,
+ int xsregs, int aregs,
+ int do_ra, int do_s0, int do_s1,
+ int framesize)
+{
+ int astatic;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ tcg_gen_addi_tl(t0, cpu_gpr[29], framesize);
+
+#define DECR_AND_LOAD(reg) do { \
+ tcg_gen_subi_tl(t0, t0, 4); \
+ op_ldst_lw(t1, t0, ctx); \
+ gen_store_gpr(t1, reg); \
+ } while (0)
+
+ if (do_ra) {
+ DECR_AND_LOAD(31);
+ }
+
+ switch (xsregs) {
+ case 7:
+ DECR_AND_LOAD(30);
+ /* Fall through */
+ case 6:
+ DECR_AND_LOAD(23);
+ /* Fall through */
+ case 5:
+ DECR_AND_LOAD(22);
+ /* Fall through */
+ case 4:
+ DECR_AND_LOAD(21);
+ /* Fall through */
+ case 3:
+ DECR_AND_LOAD(20);
+ /* Fall through */
+ case 2:
+ DECR_AND_LOAD(19);
+ /* Fall through */
+ case 1:
+ DECR_AND_LOAD(18);
+ }
+
+ if (do_s1) {
+ DECR_AND_LOAD(17);
+ }
+ if (do_s0) {
+ DECR_AND_LOAD(16);
+ }
+
+ switch (aregs) {
+ case 0:
+ case 4:
+ case 8:
+ case 12:
+ case 14:
+ astatic = 0;
+ break;
+ case 1:
+ case 5:
+ case 9:
+ case 13:
+ astatic = 1;
+ break;
+ case 2:
+ case 6:
+ case 10:
+ astatic = 2;
+ break;
+ case 3:
+ case 7:
+ astatic = 3;
+ break;
+ case 11:
+ astatic = 4;
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ return;
+ }
+
+ if (astatic > 0) {
+ DECR_AND_LOAD(7);
+ if (astatic > 1) {
+ DECR_AND_LOAD(6);
+ if (astatic > 2) {
+ DECR_AND_LOAD(5);
+ if (astatic > 3) {
+ DECR_AND_LOAD(4);
+ }
+ }
+ }
+ }
+#undef DECR_AND_LOAD
+
+ tcg_gen_addi_tl(cpu_gpr[29], cpu_gpr[29], framesize);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_addiupc (DisasContext *ctx, int rx, int imm,
+ int is_64_bit, int extended)
+{
+ TCGv t0;
+
+ if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ generate_exception(ctx, EXCP_RI);
+ return;
+ }
+
+ t0 = tcg_temp_new();
+
+ tcg_gen_movi_tl(t0, pc_relative_pc(ctx));
+ tcg_gen_addi_tl(cpu_gpr[rx], t0, imm);
+ if (!is_64_bit) {
+ tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ }
+
+ tcg_temp_free(t0);
+}
+
+#if defined(TARGET_MIPS64)
+static void decode_i64_mips16 (CPUState *env, DisasContext *ctx,
+ int ry, int funct, int16_t offset,
+ int extended)
+{
+ switch (funct) {
+ case I64_LDSP:
+ check_mips_64(ctx);
+ offset = extended ? offset : offset << 3;
+ gen_ldst(ctx, OPC_LD, ry, 29, offset);
+ break;
+ case I64_SDSP:
+ check_mips_64(ctx);
+ offset = extended ? offset : offset << 3;
+ gen_ldst(ctx, OPC_SD, ry, 29, offset);
+ break;
+ case I64_SDRASP:
+ check_mips_64(ctx);
+ offset = extended ? offset : (ctx->opcode & 0xff) << 3;
+ gen_ldst(ctx, OPC_SD, 31, 29, offset);
+ break;
+ case I64_DADJSP:
+ check_mips_64(ctx);
+ offset = extended ? offset : ((int8_t)ctx->opcode) << 3;
+ gen_arith_imm(env, ctx, OPC_DADDIU, 29, 29, offset);
+ break;
+ case I64_LDPC:
+ if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ generate_exception(ctx, EXCP_RI);
+ } else {
+ offset = extended ? offset : offset << 3;
+ gen_ldst(ctx, OPC_LDPC, ry, 0, offset);
+ }
+ break;
+ case I64_DADDIU5:
+ check_mips_64(ctx);
+ offset = extended ? offset : ((int8_t)(offset << 3)) >> 3;
+ gen_arith_imm(env, ctx, OPC_DADDIU, ry, ry, offset);
+ break;
+ case I64_DADDIUPC:
+ check_mips_64(ctx);
+ offset = extended ? offset : offset << 2;
+ gen_addiupc(ctx, ry, offset, 1, extended);
+ break;
+ case I64_DADDIUSP:
+ check_mips_64(ctx);
+ offset = extended ? offset : offset << 2;
+ gen_arith_imm(env, ctx, OPC_DADDIU, ry, 29, offset);
+ break;
+ }
+}
+#endif
+
+static int decode_extended_mips16_opc (CPUState *env, DisasContext *ctx,
+ int *is_branch)
+{
+ int extend = lduw_code(ctx->pc + 2);
+ int op, rx, ry, funct, sa;
+ int16_t imm, offset;
+
+ ctx->opcode = (ctx->opcode << 16) | extend;
+ op = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 22) & 0x1f;
+ funct = (ctx->opcode >> 8) & 0x7;
+ rx = xlat((ctx->opcode >> 8) & 0x7);
+ ry = xlat((ctx->opcode >> 5) & 0x7);
+ offset = imm = (int16_t) (((ctx->opcode >> 16) & 0x1f) << 11
+ | ((ctx->opcode >> 21) & 0x3f) << 5
+ | (ctx->opcode & 0x1f));
+
+ /* The extended opcodes cleverly reuse the opcodes from their 16-bit
+ counterparts. */
+ switch (op) {
+ case M16_OPC_ADDIUSP:
+ gen_arith_imm(env, ctx, OPC_ADDIU, rx, 29, imm);
+ break;
+ case M16_OPC_ADDIUPC:
+ gen_addiupc(ctx, rx, imm, 0, 1);
+ break;
+ case M16_OPC_B:
+ gen_compute_branch(ctx, OPC_BEQ, 4, 0, 0, offset << 1);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_BEQZ:
+ gen_compute_branch(ctx, OPC_BEQ, 4, rx, 0, offset << 1);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_BNEQZ:
+ gen_compute_branch(ctx, OPC_BNE, 4, rx, 0, offset << 1);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_SHIFT:
+ switch (ctx->opcode & 0x3) {
+ case 0x0:
+ gen_shift_imm(env, ctx, OPC_SLL, rx, ry, sa);
+ break;
+ case 0x1:
+#if defined(TARGET_MIPS64)
+ check_mips_64(ctx);
+ gen_shift_imm(env, ctx, OPC_DSLL, rx, ry, sa);
+#else
+ generate_exception(ctx, EXCP_RI);
+#endif
+ break;
+ case 0x2:
+ gen_shift_imm(env, ctx, OPC_SRL, rx, ry, sa);
+ break;
+ case 0x3:
+ gen_shift_imm(env, ctx, OPC_SRA, rx, ry, sa);
+ break;
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_LD:
+ check_mips_64(ctx);
+ gen_ldst(ctx, OPC_LD, ry, rx, offset);
+ break;
+#endif
+ case M16_OPC_RRIA:
+ imm = ctx->opcode & 0xf;
+ imm = imm | ((ctx->opcode >> 20) & 0x7f) << 4;
+ imm = imm | ((ctx->opcode >> 16) & 0xf) << 11;
+ imm = (int16_t) (imm << 1) >> 1;
+ if ((ctx->opcode >> 4) & 0x1) {
+#if defined(TARGET_MIPS64)
+ check_mips_64(ctx);
+ gen_arith_imm(env, ctx, OPC_DADDIU, ry, rx, imm);
+#else
+ generate_exception(ctx, EXCP_RI);
+#endif
+ } else {
+ gen_arith_imm(env, ctx, OPC_ADDIU, ry, rx, imm);
+ }
+ break;
+ case M16_OPC_ADDIU8:
+ gen_arith_imm(env, ctx, OPC_ADDIU, rx, rx, imm);
+ break;
+ case M16_OPC_SLTI:
+ gen_slt_imm(env, OPC_SLTI, 24, rx, imm);
+ break;
+ case M16_OPC_SLTIU:
+ gen_slt_imm(env, OPC_SLTIU, 24, rx, imm);
+ break;
+ case M16_OPC_I8:
+ switch (funct) {
+ case I8_BTEQZ:
+ gen_compute_branch(ctx, OPC_BEQ, 4, 24, 0, offset << 1);
+ break;
+ case I8_BTNEZ:
+ gen_compute_branch(ctx, OPC_BNE, 4, 24, 0, offset << 1);
+ break;
+ case I8_SWRASP:
+ gen_ldst(ctx, OPC_SW, 31, 29, imm);
+ break;
+ case I8_ADJSP:
+ gen_arith_imm(env, ctx, OPC_ADDIU, 29, 29, imm);
+ break;
+ case I8_SVRS:
+ {
+ int xsregs = (ctx->opcode >> 24) & 0x7;
+ int aregs = (ctx->opcode >> 16) & 0xf;
+ int do_ra = (ctx->opcode >> 6) & 0x1;
+ int do_s0 = (ctx->opcode >> 5) & 0x1;
+ int do_s1 = (ctx->opcode >> 4) & 0x1;
+ int framesize = (((ctx->opcode >> 20) & 0xf) << 4
+ | (ctx->opcode & 0xf)) << 3;
+
+ if (ctx->opcode & (1 << 7)) {
+ gen_mips16_save(ctx, xsregs, aregs,
+ do_ra, do_s0, do_s1,
+ framesize);
+ } else {
+ gen_mips16_restore(ctx, xsregs, aregs,
+ do_ra, do_s0, do_s1,
+ framesize);
+ }
+ }
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case M16_OPC_LI:
+ tcg_gen_movi_tl(cpu_gpr[rx], (uint16_t) imm);
+ break;
+ case M16_OPC_CMPI:
+ tcg_gen_xori_tl(cpu_gpr[24], cpu_gpr[rx], (uint16_t) imm);
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_SD:
+ gen_ldst(ctx, OPC_SD, ry, rx, offset);
+ break;
+#endif
+ case M16_OPC_LB:
+ gen_ldst(ctx, OPC_LB, ry, rx, offset);
+ break;
+ case M16_OPC_LH:
+ gen_ldst(ctx, OPC_LH, ry, rx, offset);
+ break;
+ case M16_OPC_LWSP:
+ gen_ldst(ctx, OPC_LW, rx, 29, offset);
+ break;
+ case M16_OPC_LW:
+ gen_ldst(ctx, OPC_LW, ry, rx, offset);
+ break;
+ case M16_OPC_LBU:
+ gen_ldst(ctx, OPC_LBU, ry, rx, offset);
+ break;
+ case M16_OPC_LHU:
+ gen_ldst(ctx, OPC_LHU, ry, rx, offset);
+ break;
+ case M16_OPC_LWPC:
+ gen_ldst(ctx, OPC_LWPC, rx, 0, offset);
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_LWU:
+ gen_ldst(ctx, OPC_LWU, ry, rx, offset);
+ break;
+#endif
+ case M16_OPC_SB:
+ gen_ldst(ctx, OPC_SB, ry, rx, offset);
+ break;
+ case M16_OPC_SH:
+ gen_ldst(ctx, OPC_SH, ry, rx, offset);
+ break;
+ case M16_OPC_SWSP:
+ gen_ldst(ctx, OPC_SW, rx, 29, offset);
+ break;
+ case M16_OPC_SW:
+ gen_ldst(ctx, OPC_SW, ry, rx, offset);
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_I64:
+ decode_i64_mips16(env, ctx, ry, funct, offset, 1);
+ break;
+#endif
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+
+ return 4;
+}
+
+static int decode_mips16_opc (CPUState *env, DisasContext *ctx,
+ int *is_branch)
+{
+ int rx, ry;
+ int sa;
+ int op, cnvt_op, op1, offset;
+ int funct;
+ int n_bytes;
+
+ op = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 2) & 0x7;
+ sa = sa == 0 ? 8 : sa;
+ rx = xlat((ctx->opcode >> 8) & 0x7);
+ cnvt_op = (ctx->opcode >> 5) & 0x7;
+ ry = xlat((ctx->opcode >> 5) & 0x7);
+ op1 = offset = ctx->opcode & 0x1f;
+
+ n_bytes = 2;
+
+ switch (op) {
+ case M16_OPC_ADDIUSP:
+ {
+ int16_t imm = ((uint8_t) ctx->opcode) << 2;
+
+ gen_arith_imm(env, ctx, OPC_ADDIU, rx, 29, imm);
+ }
+ break;
+ case M16_OPC_ADDIUPC:
+ gen_addiupc(ctx, rx, ((uint8_t) ctx->opcode) << 2, 0, 0);
+ break;
+ case M16_OPC_B:
+ offset = (ctx->opcode & 0x7ff) << 1;
+ offset = (int16_t)(offset << 4) >> 4;
+ gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, offset);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_JAL:
+ offset = lduw_code(ctx->pc + 2);
+ offset = (((ctx->opcode & 0x1f) << 21)
+ | ((ctx->opcode >> 5) & 0x1f) << 16
+ | offset) << 2;
+ op = ((ctx->opcode >> 10) & 0x1) ? OPC_JALX : OPC_JAL;
+ gen_compute_branch(ctx, op, 4, rx, ry, offset);
+ n_bytes = 4;
+ *is_branch = 1;
+ break;
+ case M16_OPC_BEQZ:
+ gen_compute_branch(ctx, OPC_BEQ, 2, rx, 0, ((int8_t)ctx->opcode) << 1);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_BNEQZ:
+ gen_compute_branch(ctx, OPC_BNE, 2, rx, 0, ((int8_t)ctx->opcode) << 1);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_SHIFT:
+ switch (ctx->opcode & 0x3) {
+ case 0x0:
+ gen_shift_imm(env, ctx, OPC_SLL, rx, ry, sa);
+ break;
+ case 0x1:
+#if defined(TARGET_MIPS64)
+ check_mips_64(ctx);
+ gen_shift_imm(env, ctx, OPC_DSLL, rx, ry, sa);
+#else
+ generate_exception(ctx, EXCP_RI);
+#endif
+ break;
+ case 0x2:
+ gen_shift_imm(env, ctx, OPC_SRL, rx, ry, sa);
+ break;
+ case 0x3:
+ gen_shift_imm(env, ctx, OPC_SRA, rx, ry, sa);
+ break;
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_LD:
+ check_mips_64(ctx);
+ gen_ldst(ctx, OPC_LD, ry, rx, offset << 3);
+ break;
+#endif
+ case M16_OPC_RRIA:
+ {
+ int16_t imm = (int8_t)((ctx->opcode & 0xf) << 4) >> 4;
+
+ if ((ctx->opcode >> 4) & 1) {
+#if defined(TARGET_MIPS64)
+ check_mips_64(ctx);
+ gen_arith_imm(env, ctx, OPC_DADDIU, ry, rx, imm);
+#else
+ generate_exception(ctx, EXCP_RI);
+#endif
+ } else {
+ gen_arith_imm(env, ctx, OPC_ADDIU, ry, rx, imm);
+ }
+ }
+ break;
+ case M16_OPC_ADDIU8:
+ {
+ int16_t imm = (int8_t) ctx->opcode;
+
+ gen_arith_imm(env, ctx, OPC_ADDIU, rx, rx, imm);
+ }
+ break;
+ case M16_OPC_SLTI:
+ {
+ int16_t imm = (uint8_t) ctx->opcode;
+
+ gen_slt_imm(env, OPC_SLTI, 24, rx, imm);
+ }
+ break;
+ case M16_OPC_SLTIU:
+ {
+ int16_t imm = (uint8_t) ctx->opcode;
+
+ gen_slt_imm(env, OPC_SLTIU, 24, rx, imm);
+ }
+ break;
+ case M16_OPC_I8:
+ {
+ int reg32;
+
+ funct = (ctx->opcode >> 8) & 0x7;
+ switch (funct) {
+ case I8_BTEQZ:
+ gen_compute_branch(ctx, OPC_BEQ, 2, 24, 0,
+ ((int8_t)ctx->opcode) << 1);
+ break;
+ case I8_BTNEZ:
+ gen_compute_branch(ctx, OPC_BNE, 2, 24, 0,
+ ((int8_t)ctx->opcode) << 1);
+ break;
+ case I8_SWRASP:
+ gen_ldst(ctx, OPC_SW, 31, 29, (ctx->opcode & 0xff) << 2);
+ break;
+ case I8_ADJSP:
+ gen_arith_imm(env, ctx, OPC_ADDIU, 29, 29,
+ ((int8_t)ctx->opcode) << 3);
+ break;
+ case I8_SVRS:
+ {
+ int do_ra = ctx->opcode & (1 << 6);
+ int do_s0 = ctx->opcode & (1 << 5);
+ int do_s1 = ctx->opcode & (1 << 4);
+ int framesize = ctx->opcode & 0xf;
+
+ if (framesize == 0) {
+ framesize = 128;
+ } else {
+ framesize = framesize << 3;
+ }
+
+ if (ctx->opcode & (1 << 7)) {
+ gen_mips16_save(ctx, 0, 0,
+ do_ra, do_s0, do_s1, framesize);
+ } else {
+ gen_mips16_restore(ctx, 0, 0,
+ do_ra, do_s0, do_s1, framesize);
+ }
+ }
+ break;
+ case I8_MOV32R:
+ {
+ int rz = xlat(ctx->opcode & 0x7);
+
+ reg32 = (((ctx->opcode >> 3) & 0x3) << 3) |
+ ((ctx->opcode >> 5) & 0x7);
+ gen_arith(env, ctx, OPC_ADDU, reg32, rz, 0);
+ }
+ break;
+ case I8_MOVR32:
+ reg32 = ctx->opcode & 0x1f;
+ gen_arith(env, ctx, OPC_ADDU, ry, reg32, 0);
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ }
+ break;
+ case M16_OPC_LI:
+ {
+ int16_t imm = (uint8_t) ctx->opcode;
+
+ gen_arith_imm(env, ctx, OPC_ADDIU, rx, 0, imm);
+ }
+ break;
+ case M16_OPC_CMPI:
+ {
+ int16_t imm = (uint8_t) ctx->opcode;
+
+ gen_logic_imm(env, OPC_XORI, 24, rx, imm);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_SD:
+ check_mips_64(ctx);
+ gen_ldst(ctx, OPC_SD, ry, rx, offset << 3);
+ break;
+#endif
+ case M16_OPC_LB:
+ gen_ldst(ctx, OPC_LB, ry, rx, offset);
+ break;
+ case M16_OPC_LH:
+ gen_ldst(ctx, OPC_LH, ry, rx, offset << 1);
+ break;
+ case M16_OPC_LWSP:
+ gen_ldst(ctx, OPC_LW, rx, 29, ((uint8_t)ctx->opcode) << 2);
+ break;
+ case M16_OPC_LW:
+ gen_ldst(ctx, OPC_LW, ry, rx, offset << 2);
+ break;
+ case M16_OPC_LBU:
+ gen_ldst(ctx, OPC_LBU, ry, rx, offset);
+ break;
+ case M16_OPC_LHU:
+ gen_ldst(ctx, OPC_LHU, ry, rx, offset << 1);
+ break;
+ case M16_OPC_LWPC:
+ gen_ldst(ctx, OPC_LWPC, rx, 0, ((uint8_t)ctx->opcode) << 2);
+ break;
+#if defined (TARGET_MIPS64)
+ case M16_OPC_LWU:
+ check_mips_64(ctx);
+ gen_ldst(ctx, OPC_LWU, ry, rx, offset << 2);
+ break;
+#endif
+ case M16_OPC_SB:
+ gen_ldst(ctx, OPC_SB, ry, rx, offset);
+ break;
+ case M16_OPC_SH:
+ gen_ldst(ctx, OPC_SH, ry, rx, offset << 1);
+ break;
+ case M16_OPC_SWSP:
+ gen_ldst(ctx, OPC_SW, rx, 29, ((uint8_t)ctx->opcode) << 2);
+ break;
+ case M16_OPC_SW:
+ gen_ldst(ctx, OPC_SW, ry, rx, offset << 2);
+ break;
+ case M16_OPC_RRR:
+ {
+ int rz = xlat((ctx->opcode >> 2) & 0x7);
+ int mips32_op;
+
+ switch (ctx->opcode & 0x3) {
+ case RRR_ADDU:
+ mips32_op = OPC_ADDU;
+ break;
+ case RRR_SUBU:
+ mips32_op = OPC_SUBU;
+ break;
+#if defined(TARGET_MIPS64)
+ case RRR_DADDU:
+ mips32_op = OPC_DADDU;
+ check_mips_64(ctx);
+ break;
+ case RRR_DSUBU:
+ mips32_op = OPC_DSUBU;
+ check_mips_64(ctx);
+ break;
+#endif
+ default:
+ generate_exception(ctx, EXCP_RI);
+ goto done;
+ }
+
+ gen_arith(env, ctx, mips32_op, rz, rx, ry);
+ done:
+ ;
+ }
break;
- case OPC_NMSUB_PS:
- check_cp1_64bitmode(ctx);
- {
- TCGv_i64 fp0 = tcg_temp_new_i64();
- TCGv_i64 fp1 = tcg_temp_new_i64();
- TCGv_i64 fp2 = tcg_temp_new_i64();
+ case M16_OPC_RR:
+ switch (op1) {
+ case RR_JR:
+ {
+ int nd = (ctx->opcode >> 7) & 0x1;
+ int link = (ctx->opcode >> 6) & 0x1;
+ int ra = (ctx->opcode >> 5) & 0x1;
- gen_load_fpr64(ctx, fp0, fs);
- gen_load_fpr64(ctx, fp1, ft);
- gen_load_fpr64(ctx, fp2, fr);
- gen_helper_float_nmulsub_ps(fp2, fp0, fp1, fp2);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(fp1);
- gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
+ if (link) {
+ op = nd ? OPC_JALRC : OPC_JALR;
+ } else {
+ op = OPC_JR;
+ }
+
+ gen_compute_branch(ctx, op, 2, ra ? 31 : rx, 31, 0);
+ if (!nd) {
+ *is_branch = 1;
+ }
+ }
+ break;
+ case RR_SDBBP:
+ /* XXX: not clear which exception should be raised
+ * when in debug mode...
+ */
+ check_insn(env, ctx, ISA_MIPS32);
+ if (!(ctx->hflags & MIPS_HFLAG_DM)) {
+ generate_exception(ctx, EXCP_DBp);
+ } else {
+ generate_exception(ctx, EXCP_DBp);
+ }
+ break;
+ case RR_SLT:
+ gen_slt(env, OPC_SLT, 24, rx, ry);
+ break;
+ case RR_SLTU:
+ gen_slt(env, OPC_SLTU, 24, rx, ry);
+ break;
+ case RR_BREAK:
+ generate_exception(ctx, EXCP_BREAK);
+ break;
+ case RR_SLLV:
+ gen_shift(env, ctx, OPC_SLLV, ry, rx, ry);
+ break;
+ case RR_SRLV:
+ gen_shift(env, ctx, OPC_SRLV, ry, rx, ry);
+ break;
+ case RR_SRAV:
+ gen_shift(env, ctx, OPC_SRAV, ry, rx, ry);
+ break;
+#if defined (TARGET_MIPS64)
+ case RR_DSRL:
+ check_mips_64(ctx);
+ gen_shift_imm(env, ctx, OPC_DSRL, ry, ry, sa);
+ break;
+#endif
+ case RR_CMP:
+ gen_logic(env, OPC_XOR, 24, rx, ry);
+ break;
+ case RR_NEG:
+ gen_arith(env, ctx, OPC_SUBU, rx, 0, ry);
+ break;
+ case RR_AND:
+ gen_logic(env, OPC_AND, rx, rx, ry);
+ break;
+ case RR_OR:
+ gen_logic(env, OPC_OR, rx, rx, ry);
+ break;
+ case RR_XOR:
+ gen_logic(env, OPC_XOR, rx, rx, ry);
+ break;
+ case RR_NOT:
+ gen_logic(env, OPC_NOR, rx, ry, 0);
+ break;
+ case RR_MFHI:
+ gen_HILO(ctx, OPC_MFHI, rx);
+ break;
+ case RR_CNVT:
+ switch (cnvt_op) {
+ case RR_RY_CNVT_ZEB:
+ tcg_gen_ext8u_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+ case RR_RY_CNVT_ZEH:
+ tcg_gen_ext16u_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+ case RR_RY_CNVT_SEB:
+ tcg_gen_ext8s_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+ case RR_RY_CNVT_SEH:
+ tcg_gen_ext16s_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+#if defined (TARGET_MIPS64)
+ case RR_RY_CNVT_ZEW:
+ check_mips_64(ctx);
+ tcg_gen_ext32u_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+ case RR_RY_CNVT_SEW:
+ check_mips_64(ctx);
+ tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+#endif
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case RR_MFLO:
+ gen_HILO(ctx, OPC_MFLO, rx);
+ break;
+#if defined (TARGET_MIPS64)
+ case RR_DSRA:
+ check_mips_64(ctx);
+ gen_shift_imm(env, ctx, OPC_DSRA, ry, ry, sa);
+ break;
+ case RR_DSLLV:
+ check_mips_64(ctx);
+ gen_shift(env, ctx, OPC_DSLLV, ry, rx, ry);
+ break;
+ case RR_DSRLV:
+ check_mips_64(ctx);
+ gen_shift(env, ctx, OPC_DSRLV, ry, rx, ry);
+ break;
+ case RR_DSRAV:
+ check_mips_64(ctx);
+ gen_shift(env, ctx, OPC_DSRAV, ry, rx, ry);
+ break;
+#endif
+ case RR_MULT:
+ gen_muldiv(ctx, OPC_MULT, rx, ry);
+ break;
+ case RR_MULTU:
+ gen_muldiv(ctx, OPC_MULTU, rx, ry);
+ break;
+ case RR_DIV:
+ gen_muldiv(ctx, OPC_DIV, rx, ry);
+ break;
+ case RR_DIVU:
+ gen_muldiv(ctx, OPC_DIVU, rx, ry);
+ break;
+#if defined (TARGET_MIPS64)
+ case RR_DMULT:
+ check_mips_64(ctx);
+ gen_muldiv(ctx, OPC_DMULT, rx, ry);
+ break;
+ case RR_DMULTU:
+ check_mips_64(ctx);
+ gen_muldiv(ctx, OPC_DMULTU, rx, ry);
+ break;
+ case RR_DDIV:
+ check_mips_64(ctx);
+ gen_muldiv(ctx, OPC_DDIV, rx, ry);
+ break;
+ case RR_DDIVU:
+ check_mips_64(ctx);
+ gen_muldiv(ctx, OPC_DDIVU, rx, ry);
+ break;
+#endif
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
}
- opn = "nmsub.ps";
break;
+ case M16_OPC_EXTEND:
+ decode_extended_mips16_opc(env, ctx, is_branch);
+ n_bytes = 4;
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_I64:
+ funct = (ctx->opcode >> 8) & 0x7;
+ decode_i64_mips16(env, ctx, ry, funct, offset, 0);
+ break;
+#endif
default:
- MIPS_INVAL(opn);
- generate_exception (ctx, EXCP_RI);
- return;
+ generate_exception(ctx, EXCP_RI);
+ break;
}
- MIPS_DEBUG("%s %s, %s, %s, %s", opn, fregnames[fd], fregnames[fr],
- fregnames[fs], fregnames[ft]);
+
+ return n_bytes;
}
-/* ISA extensions (ASEs) */
-/* MIPS16 extension to MIPS32 */
/* SmartMIPS extension to MIPS32 */
#if defined(TARGET_MIPS64)
#endif
-static void decode_opc (CPUState *env, DisasContext *ctx)
+static void decode_opc (CPUState *env, DisasContext *ctx, int *is_branch)
{
int32_t offset;
int rs, rt, rd, sa;
}
/* Handle blikely not taken case */
- if ((ctx->hflags & MIPS_HFLAG_BMASK) == MIPS_HFLAG_BL) {
+ if ((ctx->hflags & MIPS_HFLAG_BMASK_BASE) == MIPS_HFLAG_BL) {
int l1 = gen_new_label();
MIPS_DEBUG("blikely condition (" TARGET_FMT_lx ")", ctx->pc + 4);
gen_goto_tb(ctx, 1, ctx->pc + 4);
gen_set_label(l1);
}
+
+ if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
+ tcg_gen_debug_insn_start(ctx->pc);
+
op = MASK_OP_MAJOR(ctx->opcode);
rs = (ctx->opcode >> 21) & 0x1f;
rt = (ctx->opcode >> 16) & 0x1f;
case OPC_SPECIAL:
op1 = MASK_SPECIAL(ctx->opcode);
switch (op1) {
- case OPC_SLL: /* Arithmetic with immediate */
- case OPC_SRL ... OPC_SRA:
- gen_arith_imm(env, ctx, op1, rd, rt, sa);
+ case OPC_SLL: /* Shift with immediate */
+ case OPC_SRA:
+ gen_shift_imm(env, ctx, op1, rd, rt, sa);
+ break;
+ case OPC_SRL:
+ switch ((ctx->opcode >> 21) & 0x1f) {
+ case 1:
+ /* rotr is decoded as srl on non-R2 CPUs */
+ if (env->insn_flags & ISA_MIPS32R2) {
+ op1 = OPC_ROTR;
+ }
+ /* Fallthrough */
+ case 0:
+ gen_shift_imm(env, ctx, op1, rd, rt, sa);
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
break;
- case OPC_MOVZ ... OPC_MOVN:
+ case OPC_MOVN: /* Conditional move */
+ case OPC_MOVZ:
check_insn(env, ctx, ISA_MIPS4 | ISA_MIPS32);
- case OPC_SLLV: /* Arithmetic */
- case OPC_SRLV ... OPC_SRAV:
- case OPC_ADD ... OPC_NOR:
- case OPC_SLT ... OPC_SLTU:
+ gen_cond_move(env, op1, rd, rs, rt);
+ break;
+ case OPC_ADD ... OPC_SUBU:
gen_arith(env, ctx, op1, rd, rs, rt);
break;
+ case OPC_SLLV: /* Shifts */
+ case OPC_SRAV:
+ gen_shift(env, ctx, op1, rd, rs, rt);
+ break;
+ case OPC_SRLV:
+ switch ((ctx->opcode >> 6) & 0x1f) {
+ case 1:
+ /* rotrv is decoded as srlv on non-R2 CPUs */
+ if (env->insn_flags & ISA_MIPS32R2) {
+ op1 = OPC_ROTRV;
+ }
+ /* Fallthrough */
+ case 0:
+ gen_shift(env, ctx, op1, rd, rs, rt);
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_SLT: /* Set on less than */
+ case OPC_SLTU:
+ gen_slt(env, op1, rd, rs, rt);
+ break;
+ case OPC_AND: /* Logic*/
+ case OPC_OR:
+ case OPC_NOR:
+ case OPC_XOR:
+ gen_logic(env, op1, rd, rs, rt);
+ break;
case OPC_MULT ... OPC_DIVU:
if (sa) {
check_insn(env, ctx, INSN_VR54XX);
gen_muldiv(ctx, op1, rs, rt);
break;
case OPC_JR ... OPC_JALR:
- gen_compute_branch(ctx, op1, rs, rd, sa);
- return;
+ gen_compute_branch(ctx, op1, 4, rs, rd, sa);
+ *is_branch = 1;
+ break;
case OPC_TGE ... OPC_TEQ: /* Traps */
case OPC_TNE:
gen_trap(ctx, op1, rs, rt, -1);
break;
case OPC_SYSCALL:
generate_exception(ctx, EXCP_SYSCALL);
+ ctx->bstate = BS_STOP;
break;
case OPC_BREAK:
generate_exception(ctx, EXCP_BREAK);
case OPC_MOVCI:
check_insn(env, ctx, ISA_MIPS4 | ISA_MIPS32);
if (env->CP0_Config1 & (1 << CP0C1_FP)) {
- save_cpu_state(ctx, 1);
check_cp1_enabled(ctx);
gen_movci(ctx, rd, rs, (ctx->opcode >> 18) & 0x7,
(ctx->opcode >> 16) & 1);
#if defined(TARGET_MIPS64)
/* MIPS64 specific opcodes */
case OPC_DSLL:
- case OPC_DSRL ... OPC_DSRA:
+ case OPC_DSRA:
case OPC_DSLL32:
- case OPC_DSRL32 ... OPC_DSRA32:
+ case OPC_DSRA32:
check_insn(env, ctx, ISA_MIPS3);
check_mips_64(ctx);
- gen_arith_imm(env, ctx, op1, rd, rt, sa);
+ gen_shift_imm(env, ctx, op1, rd, rt, sa);
+ break;
+ case OPC_DSRL:
+ switch ((ctx->opcode >> 21) & 0x1f) {
+ case 1:
+ /* drotr is decoded as dsrl on non-R2 CPUs */
+ if (env->insn_flags & ISA_MIPS32R2) {
+ op1 = OPC_DROTR;
+ }
+ /* Fallthrough */
+ case 0:
+ check_insn(env, ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift_imm(env, ctx, op1, rd, rt, sa);
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_DSRL32:
+ switch ((ctx->opcode >> 21) & 0x1f) {
+ case 1:
+ /* drotr32 is decoded as dsrl32 on non-R2 CPUs */
+ if (env->insn_flags & ISA_MIPS32R2) {
+ op1 = OPC_DROTR32;
+ }
+ /* Fallthrough */
+ case 0:
+ check_insn(env, ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift_imm(env, ctx, op1, rd, rt, sa);
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
break;
- case OPC_DSLLV:
- case OPC_DSRLV ... OPC_DSRAV:
case OPC_DADD ... OPC_DSUBU:
check_insn(env, ctx, ISA_MIPS3);
check_mips_64(ctx);
gen_arith(env, ctx, op1, rd, rs, rt);
break;
+ case OPC_DSLLV:
+ case OPC_DSRAV:
+ check_insn(env, ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift(env, ctx, op1, rd, rs, rt);
+ break;
+ case OPC_DSRLV:
+ switch ((ctx->opcode >> 6) & 0x1f) {
+ case 1:
+ /* drotrv is decoded as dsrlv on non-R2 CPUs */
+ if (env->insn_flags & ISA_MIPS32R2) {
+ op1 = OPC_DROTRV;
+ }
+ /* Fallthrough */
+ case 0:
+ check_insn(env, ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift(env, ctx, op1, rd, rs, rt);
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ break;
case OPC_DMULT ... OPC_DDIVU:
check_insn(env, ctx, ISA_MIPS3);
check_mips_64(ctx);
case OPC_RDHWR:
check_insn(env, ctx, ISA_MIPS32R2);
{
- TCGv t0 = tcg_temp_local_new();
+ TCGv t0 = tcg_temp_new();
switch (rd) {
case 0:
save_cpu_state(ctx, 1);
gen_helper_rdhwr_cpunum(t0);
+ gen_store_gpr(t0, rt);
break;
case 1:
save_cpu_state(ctx, 1);
gen_helper_rdhwr_synci_step(t0);
+ gen_store_gpr(t0, rt);
break;
case 2:
save_cpu_state(ctx, 1);
gen_helper_rdhwr_cc(t0);
+ gen_store_gpr(t0, rt);
break;
case 3:
save_cpu_state(ctx, 1);
gen_helper_rdhwr_ccres(t0);
+ gen_store_gpr(t0, rt);
break;
case 29:
#if defined(CONFIG_USER_ONLY)
tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, tls_value));
+ gen_store_gpr(t0, rt);
break;
#else
/* XXX: Some CPUs implement this in hardware.
generate_exception(ctx, EXCP_RI);
break;
}
- gen_store_gpr(t0, rt);
tcg_temp_free(t0);
}
break;
case OPC_FORK:
check_insn(env, ctx, ASE_MT);
{
- TCGv t0 = tcg_temp_local_new();
- TCGv t1 = tcg_temp_local_new();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
gen_load_gpr(t0, rt);
gen_load_gpr(t1, rs);
case OPC_YIELD:
check_insn(env, ctx, ASE_MT);
{
- TCGv t0 = tcg_temp_local_new();
+ TCGv t0 = tcg_temp_new();
+ save_cpu_state(ctx, 1);
gen_load_gpr(t0, rs);
gen_helper_yield(t0, t0);
gen_store_gpr(t0, rd);
switch (op1) {
case OPC_BLTZ ... OPC_BGEZL: /* REGIMM branches */
case OPC_BLTZAL ... OPC_BGEZALL:
- gen_compute_branch(ctx, op1, rs, -1, imm << 2);
- return;
+ gen_compute_branch(ctx, op1, 4, rs, -1, imm << 2);
+ *is_branch = 1;
+ break;
case OPC_TGEI ... OPC_TEQI: /* REGIMM traps */
case OPC_TNEI:
gen_trap(ctx, op1, rs, -1, imm);
case OPC_MFMC0:
#ifndef CONFIG_USER_ONLY
{
- TCGv t0 = tcg_temp_local_new();
+ TCGv t0 = tcg_temp_new();
op2 = MASK_MFMC0(ctx->opcode);
switch (op2) {
case OPC_DMT:
check_insn(env, ctx, ASE_MT);
gen_helper_dmt(t0, t0);
+ gen_store_gpr(t0, rt);
break;
case OPC_EMT:
check_insn(env, ctx, ASE_MT);
gen_helper_emt(t0, t0);
+ gen_store_gpr(t0, rt);
break;
case OPC_DVPE:
check_insn(env, ctx, ASE_MT);
gen_helper_dvpe(t0, t0);
+ gen_store_gpr(t0, rt);
break;
case OPC_EVPE:
check_insn(env, ctx, ASE_MT);
gen_helper_evpe(t0, t0);
+ gen_store_gpr(t0, rt);
break;
case OPC_DI:
check_insn(env, ctx, ISA_MIPS32R2);
save_cpu_state(ctx, 1);
gen_helper_di(t0);
+ gen_store_gpr(t0, rt);
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
break;
check_insn(env, ctx, ISA_MIPS32R2);
save_cpu_state(ctx, 1);
gen_helper_ei(t0);
+ gen_store_gpr(t0, rt);
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
break;
generate_exception(ctx, EXCP_RI);
break;
}
- gen_store_gpr(t0, rt);
tcg_temp_free(t0);
}
#endif /* !CONFIG_USER_ONLY */
break;
}
break;
- case OPC_ADDI ... OPC_LUI: /* Arithmetic with immediate opcode */
+ case OPC_ADDI: /* Arithmetic with immediate opcode */
+ case OPC_ADDIU:
gen_arith_imm(env, ctx, op, rt, rs, imm);
break;
+ case OPC_SLTI: /* Set on less than with immediate opcode */
+ case OPC_SLTIU:
+ gen_slt_imm(env, op, rt, rs, imm);
+ break;
+ case OPC_ANDI: /* Arithmetic with immediate opcode */
+ case OPC_LUI:
+ case OPC_ORI:
+ case OPC_XORI:
+ gen_logic_imm(env, op, rt, rs, imm);
+ break;
case OPC_J ... OPC_JAL: /* Jump */
offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2;
- gen_compute_branch(ctx, op, rs, rt, offset);
- return;
+ gen_compute_branch(ctx, op, 4, rs, rt, offset);
+ *is_branch = 1;
+ break;
case OPC_BEQ ... OPC_BGTZ: /* Branch */
case OPC_BEQL ... OPC_BGTZL:
- gen_compute_branch(ctx, op, rs, rt, imm << 2);
- return;
+ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2);
+ *is_branch = 1;
+ break;
case OPC_LB ... OPC_LWR: /* Load and stores */
case OPC_SB ... OPC_SW:
case OPC_SWR:
case OPC_LL:
- case OPC_SC:
gen_ldst(ctx, op, rt, rs, imm);
break;
+ case OPC_SC:
+ gen_st_cond(ctx, op, rt, rs, imm);
+ break;
case OPC_CACHE:
check_insn(env, ctx, ISA_MIPS3 | ISA_MIPS32);
/* Treat as NOP. */
case OPC_SWC1:
case OPC_SDC1:
if (env->CP0_Config1 & (1 << CP0C1_FP)) {
- save_cpu_state(ctx, 1);
check_cp1_enabled(ctx);
gen_flt_ldst(ctx, op, rt, rs, imm);
} else {
case OPC_CP1:
if (env->CP0_Config1 & (1 << CP0C1_FP)) {
- save_cpu_state(ctx, 1);
check_cp1_enabled(ctx);
op1 = MASK_CP1(ctx->opcode);
switch (op1) {
case OPC_BC1:
gen_compute_branch1(env, ctx, MASK_BC1(ctx->opcode),
(rt >> 2) & 0x7, imm << 2);
- return;
+ *is_branch = 1;
+ break;
case OPC_S_FMT:
case OPC_D_FMT:
case OPC_W_FMT:
case OPC_CP3:
if (env->CP0_Config1 & (1 << CP0C1_FP)) {
- save_cpu_state(ctx, 1);
check_cp1_enabled(ctx);
op1 = MASK_CP3(ctx->opcode);
switch (op1) {
case OPC_SDL ... OPC_SDR:
case OPC_LLD:
case OPC_LD:
- case OPC_SCD:
case OPC_SD:
check_insn(env, ctx, ISA_MIPS3);
check_mips_64(ctx);
gen_ldst(ctx, op, rt, rs, imm);
break;
- case OPC_DADDI ... OPC_DADDIU:
+ case OPC_SCD:
+ check_insn(env, ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_st_cond(ctx, op, rt, rs, imm);
+ break;
+ case OPC_DADDI:
+ case OPC_DADDIU:
check_insn(env, ctx, ISA_MIPS3);
check_mips_64(ctx);
gen_arith_imm(env, ctx, op, rt, rs, imm);
#endif
case OPC_JALX:
check_insn(env, ctx, ASE_MIPS16);
- /* MIPS16: Not implemented. */
+ offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2;
+ gen_compute_branch(ctx, op, 4, rs, rt, offset);
+ *is_branch = 1;
+ break;
case OPC_MDMX:
check_insn(env, ctx, ASE_MDMX);
/* MDMX: Not implemented. */
generate_exception(ctx, EXCP_RI);
break;
}
- if (ctx->hflags & MIPS_HFLAG_BMASK) {
- int hflags = ctx->hflags & MIPS_HFLAG_BMASK;
- /* Branches completion */
- ctx->hflags &= ~MIPS_HFLAG_BMASK;
- ctx->bstate = BS_BRANCH;
- save_cpu_state(ctx, 0);
- /* FIXME: Need to clear can_do_io. */
- switch (hflags) {
- case MIPS_HFLAG_B:
- /* unconditional branch */
- MIPS_DEBUG("unconditional branch");
- gen_goto_tb(ctx, 0, ctx->btarget);
- break;
- case MIPS_HFLAG_BL:
- /* blikely taken case */
- MIPS_DEBUG("blikely branch taken");
- gen_goto_tb(ctx, 0, ctx->btarget);
- break;
- case MIPS_HFLAG_BC:
- /* Conditional branch */
- MIPS_DEBUG("conditional branch");
- {
- int l1 = gen_new_label();
-
- tcg_gen_brcondi_tl(TCG_COND_NE, bcond, 0, l1);
- gen_goto_tb(ctx, 1, ctx->pc + 4);
- gen_set_label(l1);
- gen_goto_tb(ctx, 0, ctx->btarget);
- }
- break;
- case MIPS_HFLAG_BR:
- /* unconditional branch to register */
- MIPS_DEBUG("branch to register");
- tcg_gen_mov_tl(cpu_PC, btarget);
- tcg_gen_exit_tb(0);
- break;
- default:
- MIPS_DEBUG("unknown branch");
- break;
- }
- }
}
static inline void
int j, lj = -1;
int num_insns;
int max_insns;
+ int insn_bytes;
+ int is_branch;
if (search_pc)
qemu_log("search pc %d\n", search_pc);
pc_start = tb->pc;
- /* Leave some spare opc slots for branch handling. */
- gen_opc_end = gen_opc_buf + OPC_MAX_SIZE - 16;
+ gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
ctx.pc = pc_start;
ctx.saved_pc = -1;
+ ctx.singlestep_enabled = env->singlestep_enabled;
ctx.tb = tb;
ctx.bstate = BS_NONE;
/* Restore delay slot state from the tb context. */
LOG_DISAS("\ntb %p idx %d hflags %04x\n", tb, ctx.mem_idx, ctx.hflags);
gen_icount_start();
while (ctx.bstate == BS_NONE) {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
+ QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
if (bp->pc == ctx.pc) {
save_cpu_state(&ctx, 1);
ctx.bstate = BS_BRANCH;
}
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
gen_io_start();
- ctx.opcode = ldl_code(ctx.pc);
- decode_opc(env, &ctx);
- ctx.pc += 4;
+
+ is_branch = 0;
+ if (!(ctx.hflags & MIPS_HFLAG_M16)) {
+ ctx.opcode = ldl_code(ctx.pc);
+ insn_bytes = 4;
+ decode_opc(env, &ctx, &is_branch);
+ } else if (env->insn_flags & ASE_MIPS16) {
+ ctx.opcode = lduw_code(ctx.pc);
+ insn_bytes = decode_mips16_opc(env, &ctx, &is_branch);
+ } else {
+ generate_exception(&ctx, EXCP_RI);
+ break;
+ }
+ if (!is_branch) {
+ handle_delay_slot(env, &ctx, insn_bytes);
+ }
+ ctx.pc += insn_bytes;
+
num_insns++;
- if (env->singlestep_enabled)
+ /* Execute a branch and its delay slot as a single instruction.
+ This is what GDB expects and is consistent with what the
+ hardware does (e.g. if a delay slot instruction faults, the
+ reported PC is the PC of the branch). */
+ if (env->singlestep_enabled && (ctx.hflags & MIPS_HFLAG_BMASK) == 0)
break;
if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
if (num_insns >= max_insns)
break;
-#if defined (MIPS_SINGLE_STEP)
- break;
-#endif
+
+ if (singlestep)
+ break;
}
if (tb->cflags & CF_LAST_IO)
gen_io_end();
- if (env->singlestep_enabled) {
+ if (env->singlestep_enabled && ctx.bstate != BS_BRANCH) {
save_cpu_state(&ctx, ctx.bstate == BS_NONE);
gen_helper_0i(raise_exception, EXCP_DEBUG);
} else {
if (!SIGN_EXT_P(env->CP0_EPC))
cpu_fprintf(f, "BROKEN: EPC=0x" TARGET_FMT_lx "\n", env->CP0_EPC);
- if (!SIGN_EXT_P(env->CP0_LLAddr))
- cpu_fprintf(f, "BROKEN: LLAddr=0x" TARGET_FMT_lx "\n", env->CP0_LLAddr);
+ if (!SIGN_EXT_P(env->lladdr))
+ cpu_fprintf(f, "BROKEN: LLAddr=0x" TARGET_FMT_lx "\n", env->lladdr);
}
#endif
cpu_fprintf(f, "CP0 Status 0x%08x Cause 0x%08x EPC 0x" TARGET_FMT_lx "\n",
env->CP0_Status, env->CP0_Cause, env->CP0_EPC);
cpu_fprintf(f, " Config0 0x%08x Config1 0x%08x LLAddr 0x" TARGET_FMT_lx "\n",
- env->CP0_Config0, env->CP0_Config1, env->CP0_LLAddr);
+ env->CP0_Config0, env->CP0_Config1, env->lladdr);
if (env->hflags & MIPS_HFLAG_FPU)
fpu_dump_state(env, f, cpu_fprintf, flags);
#if defined(TARGET_MIPS64) && defined(MIPS_DEBUG_SIGN_EXTENSIONS)
return;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- for (i = 0; i < 32; i++)
+ TCGV_UNUSED(cpu_gpr[0]);
+ for (i = 1; i < 32; i++)
cpu_gpr[i] = tcg_global_mem_new(TCG_AREG0,
offsetof(CPUState, active_tc.gpr[i]),
regnames[i]);
hflags = tcg_global_mem_new_i32(TCG_AREG0,
offsetof(CPUState, hflags), "hflags");
- for (i = 0; i < 32; i++)
- fpu_fpr32[i] = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, active_fpu.fpr[i].w[FP_ENDIAN_IDX]),
- fregnames[i]);
- for (i = 0; i < 32; i++)
- fpu_fpr32h[i] = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, active_fpu.fpr[i].w[!FP_ENDIAN_IDX]),
- fregnames_h[i]);
fpu_fcr0 = tcg_global_mem_new_i32(TCG_AREG0,
offsetof(CPUState, active_fpu.fcr0),
"fcr0");
return NULL;
env = qemu_mallocz(sizeof(CPUMIPSState));
env->cpu_model = def;
+ env->cpu_model_str = cpu_model;
cpu_exec_init(env);
- env->cpu_model_str = cpu_model;
+#ifndef CONFIG_USER_ONLY
+ mmu_init(env, def);
+#endif
+ fpu_init(env, def);
+ mvp_init(env, def);
mips_tcg_init();
cpu_reset(env);
+ qemu_init_vcpu(env);
return env;
}
}
memset(env, 0, offsetof(CPUMIPSState, breakpoints));
-
tlb_flush(env, 1);
- /* Minimal init */
+ /* Reset registers to their default values */
+ env->CP0_PRid = env->cpu_model->CP0_PRid;
+ env->CP0_Config0 = env->cpu_model->CP0_Config0;
+#ifdef TARGET_WORDS_BIGENDIAN
+ env->CP0_Config0 |= (1 << CP0C0_BE);
+#endif
+ env->CP0_Config1 = env->cpu_model->CP0_Config1;
+ env->CP0_Config2 = env->cpu_model->CP0_Config2;
+ env->CP0_Config3 = env->cpu_model->CP0_Config3;
+ env->CP0_Config6 = env->cpu_model->CP0_Config6;
+ env->CP0_Config7 = env->cpu_model->CP0_Config7;
+ env->CP0_LLAddr_rw_bitmask = env->cpu_model->CP0_LLAddr_rw_bitmask
+ << env->cpu_model->CP0_LLAddr_shift;
+ env->CP0_LLAddr_shift = env->cpu_model->CP0_LLAddr_shift;
+ env->SYNCI_Step = env->cpu_model->SYNCI_Step;
+ env->CCRes = env->cpu_model->CCRes;
+ env->CP0_Status_rw_bitmask = env->cpu_model->CP0_Status_rw_bitmask;
+ env->CP0_TCStatus_rw_bitmask = env->cpu_model->CP0_TCStatus_rw_bitmask;
+ env->CP0_SRSCtl = env->cpu_model->CP0_SRSCtl;
+ env->current_tc = 0;
+ env->SEGBITS = env->cpu_model->SEGBITS;
+ env->SEGMask = (target_ulong)((1ULL << env->cpu_model->SEGBITS) - 1);
+#if defined(TARGET_MIPS64)
+ if (env->cpu_model->insn_flags & ISA_MIPS3) {
+ env->SEGMask |= 3ULL << 62;
+ }
+#endif
+ env->PABITS = env->cpu_model->PABITS;
+ env->PAMask = (target_ulong)((1ULL << env->cpu_model->PABITS) - 1);
+ env->CP0_SRSConf0_rw_bitmask = env->cpu_model->CP0_SRSConf0_rw_bitmask;
+ env->CP0_SRSConf0 = env->cpu_model->CP0_SRSConf0;
+ env->CP0_SRSConf1_rw_bitmask = env->cpu_model->CP0_SRSConf1_rw_bitmask;
+ env->CP0_SRSConf1 = env->cpu_model->CP0_SRSConf1;
+ env->CP0_SRSConf2_rw_bitmask = env->cpu_model->CP0_SRSConf2_rw_bitmask;
+ env->CP0_SRSConf2 = env->cpu_model->CP0_SRSConf2;
+ env->CP0_SRSConf3_rw_bitmask = env->cpu_model->CP0_SRSConf3_rw_bitmask;
+ env->CP0_SRSConf3 = env->cpu_model->CP0_SRSConf3;
+ env->CP0_SRSConf4_rw_bitmask = env->cpu_model->CP0_SRSConf4_rw_bitmask;
+ env->CP0_SRSConf4 = env->cpu_model->CP0_SRSConf4;
+ env->insn_flags = env->cpu_model->insn_flags;
+
#if defined(CONFIG_USER_ONLY)
env->hflags = MIPS_HFLAG_UM;
+ /* Enable access to the SYNCI_Step register. */
+ env->CP0_HWREna |= (1 << 1);
+ if (env->CP0_Config1 & (1 << CP0C1_FP)) {
+ env->hflags |= MIPS_HFLAG_FPU;
+ }
+#ifdef TARGET_MIPS64
+ if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
+ env->hflags |= MIPS_HFLAG_F64;
+ }
+#endif
#else
if (env->hflags & MIPS_HFLAG_BMASK) {
/* If the exception was raised from a delay slot,
env->CP0_ErrorEPC = env->active_tc.PC;
}
env->active_tc.PC = (int32_t)0xBFC00000;
+ env->CP0_Random = env->tlb->nb_tlb - 1;
+ env->tlb->tlb_in_use = env->tlb->nb_tlb;
env->CP0_Wired = 0;
/* SMP not implemented */
env->CP0_EBase = 0x80000000;
/* Count register increments in debug mode, EJTAG version 1 */
env->CP0_Debug = (1 << CP0DB_CNT) | (0x1 << CP0DB_VER);
env->hflags = MIPS_HFLAG_CP0;
+#endif
+#if defined(TARGET_MIPS64)
+ if (env->cpu_model->insn_flags & ISA_MIPS3) {
+ env->hflags |= MIPS_HFLAG_64;
+ }
#endif
env->exception_index = EXCP_NONE;
- cpu_mips_register(env, env->cpu_model);
}
void gen_pc_load(CPUState *env, TranslationBlock *tb,