/* Jump and branches */
OPC_J = (0x02 << 26),
OPC_JAL = (0x03 << 26),
- OPC_JALS = OPC_JAL | 0x5,
OPC_BEQ = (0x04 << 26), /* Unconditional if rs = rt = 0 (B) */
OPC_BEQL = (0x14 << 26),
OPC_BNE = (0x05 << 26),
OPC_BLEZL = (0x16 << 26),
OPC_BGTZ = (0x07 << 26),
OPC_BGTZL = (0x17 << 26),
- OPC_JALX = (0x1D << 26), /* MIPS 16 only */
- OPC_JALXS = OPC_JALX | 0x5,
+ OPC_JALX = (0x1D << 26),
+ OPC_DAUI = (0x1D << 26),
/* Load and stores */
OPC_LDL = (0x1A << 26),
OPC_LDR = (0x1B << 26),
OPC_SWC2 = (0x3A << 26),
OPC_SDC1 = (0x3D << 26),
OPC_SDC2 = (0x3E << 26),
+ /* Compact Branches */
+ OPC_BLEZALC = (0x06 << 26),
+ OPC_BGEZALC = (0x06 << 26),
+ OPC_BGEUC = (0x06 << 26),
+ OPC_BGTZALC = (0x07 << 26),
+ OPC_BLTZALC = (0x07 << 26),
+ OPC_BLTUC = (0x07 << 26),
+ OPC_BOVC = (0x08 << 26),
+ OPC_BEQZALC = (0x08 << 26),
+ OPC_BEQC = (0x08 << 26),
+ OPC_BLEZC = (0x16 << 26),
+ OPC_BGEZC = (0x16 << 26),
+ OPC_BGEC = (0x16 << 26),
+ OPC_BGTZC = (0x17 << 26),
+ OPC_BLTZC = (0x17 << 26),
+ OPC_BLTC = (0x17 << 26),
+ OPC_BNVC = (0x18 << 26),
+ OPC_BNEZALC = (0x18 << 26),
+ OPC_BNEC = (0x18 << 26),
+ OPC_BC = (0x32 << 26),
+ OPC_BEQZC = (0x36 << 26),
+ OPC_JIC = (0x36 << 26),
+ OPC_BALC = (0x3A << 26),
+ OPC_BNEZC = (0x3E << 26),
+ OPC_JIALC = (0x3E << 26),
/* MDMX ASE specific */
OPC_MDMX = (0x1E << 26),
/* Cache and prefetch */
OPC_CACHE = (0x2F << 26),
OPC_PREF = (0x33 << 26),
- /* Reserved major opcode */
- OPC_MAJOR3B_RESERVED = (0x3B << 26),
+ /* PC-relative address computation / loads */
+ OPC_PCREL = (0x3B << 26),
+};
+
+/* PC-relative address computation / loads */
+#define MASK_OPC_PCREL_TOP2BITS(op) (MASK_OP_MAJOR(op) | (op & (3 << 19)))
+#define MASK_OPC_PCREL_TOP5BITS(op) (MASK_OP_MAJOR(op) | (op & (0x1f << 16)))
+enum {
+ /* Instructions determined by bits 19 and 20 */
+ OPC_ADDIUPC = OPC_PCREL | (0 << 19),
+ R6_OPC_LWPC = OPC_PCREL | (1 << 19),
+ OPC_LWUPC = OPC_PCREL | (2 << 19),
+
+ /* Instructions determined by bits 16 ... 20 */
+ OPC_AUIPC = OPC_PCREL | (0x1e << 16),
+ OPC_ALUIPC = OPC_PCREL | (0x1f << 16),
+
+ /* Other */
+ R6_OPC_LDPC = OPC_PCREL | (6 << 18),
};
/* MIPS special opcodes */
/* Jumps */
OPC_JR = 0x08 | OPC_SPECIAL, /* Also JR.HB */
OPC_JALR = 0x09 | OPC_SPECIAL, /* Also JALR.HB */
- OPC_JALRC = OPC_JALR | (0x5 << 6),
- OPC_JALRS = 0x10 | OPC_SPECIAL | (0x5 << 6),
/* Traps */
OPC_TGE = 0x30 | OPC_SPECIAL,
OPC_TGEU = 0x31 | OPC_SPECIAL,
OPC_SPIM = 0x0E | OPC_SPECIAL, /* unofficial */
OPC_SYNC = 0x0F | OPC_SPECIAL,
- OPC_SPECIAL15_RESERVED = 0x15 | OPC_SPECIAL,
OPC_SPECIAL28_RESERVED = 0x28 | OPC_SPECIAL,
OPC_SPECIAL29_RESERVED = 0x29 | OPC_SPECIAL,
OPC_SPECIAL39_RESERVED = 0x39 | OPC_SPECIAL,
R6_OPC_DMOD = OPC_DDIV | (3 << 6),
R6_OPC_DDIVU = OPC_DDIVU | (2 << 6),
R6_OPC_DMODU = OPC_DDIVU | (3 << 6),
+
+ R6_OPC_CLZ = 0x10 | OPC_SPECIAL,
+ R6_OPC_CLO = 0x11 | OPC_SPECIAL,
+ R6_OPC_DCLZ = 0x12 | OPC_SPECIAL,
+ R6_OPC_DCLO = 0x13 | OPC_SPECIAL,
+ R6_OPC_SDBBP = 0x0e | OPC_SPECIAL,
+
+ OPC_LSA = 0x05 | OPC_SPECIAL,
+ OPC_DLSA = 0x15 | OPC_SPECIAL,
};
/* Multiplication variants of the vr54xx. */
OPC_BGEZ = (0x01 << 16) | OPC_REGIMM,
OPC_BGEZL = (0x03 << 16) | OPC_REGIMM,
OPC_BLTZAL = (0x10 << 16) | OPC_REGIMM,
- OPC_BLTZALS = OPC_BLTZAL | 0x5, /* microMIPS */
OPC_BLTZALL = (0x12 << 16) | OPC_REGIMM,
OPC_BGEZAL = (0x11 << 16) | OPC_REGIMM,
- OPC_BGEZALS = OPC_BGEZAL | 0x5, /* microMIPS */
OPC_BGEZALL = (0x13 << 16) | OPC_REGIMM,
OPC_TGEI = (0x08 << 16) | OPC_REGIMM,
OPC_TGEIU = (0x09 << 16) | OPC_REGIMM,
OPC_TEQI = (0x0C << 16) | OPC_REGIMM,
OPC_TNEI = (0x0E << 16) | OPC_REGIMM,
OPC_SYNCI = (0x1F << 16) | OPC_REGIMM,
+
+ OPC_DAHI = (0x06 << 16) | OPC_REGIMM,
+ OPC_DATI = (0x1e << 16) | OPC_REGIMM,
};
/* Special2 opcodes */
#define MASK_BSHFL(op) MASK_SPECIAL3(op) | (op & (0x1F << 6))
enum {
- OPC_WSBH = (0x02 << 6) | OPC_BSHFL,
- OPC_SEB = (0x10 << 6) | OPC_BSHFL,
- OPC_SEH = (0x18 << 6) | OPC_BSHFL,
+ OPC_WSBH = (0x02 << 6) | OPC_BSHFL,
+ OPC_SEB = (0x10 << 6) | OPC_BSHFL,
+ OPC_SEH = (0x18 << 6) | OPC_BSHFL,
+ OPC_ALIGN = (0x08 << 6) | OPC_BSHFL, /* 010.bp */
+ OPC_ALIGN_END = (0x0B << 6) | OPC_BSHFL, /* 010.00 to 010.11 */
+ OPC_BITSWAP = (0x00 << 6) | OPC_BSHFL /* 00000 */
};
/* DBSHFL opcodes */
#define MASK_DBSHFL(op) MASK_SPECIAL3(op) | (op & (0x1F << 6))
enum {
- OPC_DSBH = (0x02 << 6) | OPC_DBSHFL,
- OPC_DSHD = (0x05 << 6) | OPC_DBSHFL,
+ OPC_DSBH = (0x02 << 6) | OPC_DBSHFL,
+ OPC_DSHD = (0x05 << 6) | OPC_DBSHFL,
+ OPC_DALIGN = (0x08 << 6) | OPC_DBSHFL, /* 01.bp */
+ OPC_DALIGN_END = (0x0F << 6) | OPC_DBSHFL, /* 01.000 to 01.111 */
+ OPC_DBITSWAP = (0x00 << 6) | OPC_DBSHFL, /* 00000 */
};
/* MIPS DSP REGIMM opcodes */
enum {
OPC_TLBR = 0x01 | OPC_C0,
OPC_TLBWI = 0x02 | OPC_C0,
+ OPC_TLBINV = 0x03 | OPC_C0,
+ OPC_TLBINVF = 0x04 | OPC_C0,
OPC_TLBWR = 0x06 | OPC_C0,
OPC_TLBP = 0x08 | OPC_C0,
OPC_RFE = 0x10 | OPC_C0,
OPC_W_FMT = (FMT_W << 21) | OPC_CP1,
OPC_L_FMT = (FMT_L << 21) | OPC_CP1,
OPC_PS_FMT = (FMT_PS << 21) | OPC_CP1,
+ OPC_BC1EQZ = (0x09 << 21) | OPC_CP1,
+ OPC_BC1NEZ = (0x0D << 21) | OPC_CP1,
};
#define MASK_CP1_FUNC(op) MASK_CP1(op) | (op & 0x3F)
OPC_CTC2 = (0x06 << 21) | OPC_CP2,
OPC_MTHC2 = (0x07 << 21) | OPC_CP2,
OPC_BC2 = (0x08 << 21) | OPC_CP2,
+ OPC_BC2EQZ = (0x09 << 21) | OPC_CP2,
+ OPC_BC2NEZ = (0x0D << 21) | OPC_CP2,
};
#define MASK_LMI(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 21)) | (op & 0x1F))
/* global register indices */
static TCGv_ptr cpu_env;
static TCGv cpu_gpr[32], cpu_PC;
-static TCGv cpu_HI[MIPS_DSP_ACC], cpu_LO[MIPS_DSP_ACC], cpu_ACX[MIPS_DSP_ACC];
+static TCGv cpu_HI[MIPS_DSP_ACC], cpu_LO[MIPS_DSP_ACC];
static TCGv cpu_dspctrl, btarget, bcond;
static TCGv_i32 hflags;
static TCGv_i32 fpu_fcr0, fpu_fcr31;
int bstate;
target_ulong btarget;
bool ulri;
+ int kscrexist;
+ bool rxi;
+ int ie;
+ bool bi;
+ bool bp;
} DisasContext;
enum {
"LO0", "LO1", "LO2", "LO3",
};
-static const char * const regnames_ACX[] = {
- "ACX0", "ACX1", "ACX2", "ACX3",
-};
-
static const char * const fregnames[] = {
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
"f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
tcg_gen_mov_tl(cpu_gpr[reg], t);
}
-/* Moves to/from ACX register. */
-static inline void gen_load_ACX (TCGv t, int reg)
-{
- tcg_gen_mov_tl(t, cpu_ACX[reg]);
-}
-
-static inline void gen_store_ACX (TCGv t, int reg)
-{
- tcg_gen_mov_tl(cpu_ACX[reg], t);
-}
-
/* Moves to/from shadow registers. */
static inline void gen_load_srsgpr (int from, int to)
{
tcg_gen_add_tl(ret, arg0, arg1);
#if defined(TARGET_MIPS64)
- /* For compatibility with 32-bit code, data reference in user mode
- with Status_UX = 0 should be casted to 32-bit and sign extended.
- See the MIPS64 PRA manual, section 4.10. */
- if (((ctx->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM) &&
- !(ctx->hflags & MIPS_HFLAG_UX)) {
+ if (ctx->hflags & MIPS_HFLAG_AWRAP) {
tcg_gen_ext32s_i64(ret, ret);
}
#endif
}
+/* Addresses computation (translation time) */
+static target_long addr_add(DisasContext *ctx, target_long base,
+ target_long offset)
+{
+ target_long sum = base + offset;
+
+#if defined(TARGET_MIPS64)
+ if (ctx->hflags & MIPS_HFLAG_AWRAP) {
+ sum = (int32_t)sum;
+ }
+#endif
+ return sum;
+}
+
static inline void check_cp0_enabled(DisasContext *ctx)
{
if (unlikely(!(ctx->hflags & MIPS_HFLAG_CP0)))
}
}
+#ifdef TARGET_MIPS64
/* This code generates a "reserved instruction" exception if 64-bit
instructions are not enabled. */
static inline void check_mips_64(DisasContext *ctx)
if (unlikely(!(ctx->hflags & MIPS_HFLAG_64)))
generate_exception(ctx, EXCP_RI);
}
+#endif
/* Define small wrappers for gen_load_fpr* so that we have a uniform
calling interface for 32 and 64-bit FPRs. No sense in changing
FOP_CONDS(, 0, ps, FMT_PS, 64)
FOP_CONDS(abs, 1, ps, FMT_PS, 64)
#undef FOP_CONDS
+
+#define FOP_CONDNS(fmt, ifmt, bits, STORE) \
+static inline void gen_r6_cmp_ ## fmt(DisasContext * ctx, int n, \
+ int ft, int fs, int fd) \
+{ \
+ TCGv_i ## bits fp0 = tcg_temp_new_i ## bits(); \
+ TCGv_i ## bits fp1 = tcg_temp_new_i ## bits(); \
+ switch (ifmt) { \
+ case FMT_D: \
+ check_cp1_registers(ctx, fs | ft | fd); \
+ break; \
+ } \
+ gen_ldcmp_fpr ## bits(ctx, fp0, fs); \
+ gen_ldcmp_fpr ## bits(ctx, fp1, ft); \
+ switch (n) { \
+ case 0: \
+ gen_helper_r6_cmp_ ## fmt ## _af(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 1: \
+ gen_helper_r6_cmp_ ## fmt ## _un(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 2: \
+ gen_helper_r6_cmp_ ## fmt ## _eq(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 3: \
+ gen_helper_r6_cmp_ ## fmt ## _ueq(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 4: \
+ gen_helper_r6_cmp_ ## fmt ## _lt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 5: \
+ gen_helper_r6_cmp_ ## fmt ## _ult(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 6: \
+ gen_helper_r6_cmp_ ## fmt ## _le(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 7: \
+ gen_helper_r6_cmp_ ## fmt ## _ule(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 8: \
+ gen_helper_r6_cmp_ ## fmt ## _saf(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 9: \
+ gen_helper_r6_cmp_ ## fmt ## _sun(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 10: \
+ gen_helper_r6_cmp_ ## fmt ## _seq(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 11: \
+ gen_helper_r6_cmp_ ## fmt ## _sueq(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 12: \
+ gen_helper_r6_cmp_ ## fmt ## _slt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 13: \
+ gen_helper_r6_cmp_ ## fmt ## _sult(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 14: \
+ gen_helper_r6_cmp_ ## fmt ## _sle(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 15: \
+ gen_helper_r6_cmp_ ## fmt ## _sule(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 17: \
+ gen_helper_r6_cmp_ ## fmt ## _or(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 18: \
+ gen_helper_r6_cmp_ ## fmt ## _une(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 19: \
+ gen_helper_r6_cmp_ ## fmt ## _ne(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 25: \
+ gen_helper_r6_cmp_ ## fmt ## _sor(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 26: \
+ gen_helper_r6_cmp_ ## fmt ## _sune(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 27: \
+ gen_helper_r6_cmp_ ## fmt ## _sne(fp0, cpu_env, fp0, fp1); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ STORE; \
+ tcg_temp_free_i ## bits (fp0); \
+ tcg_temp_free_i ## bits (fp1); \
+}
+
+FOP_CONDNS(d, FMT_D, 64, gen_store_fpr64(ctx, fp0, fd))
+FOP_CONDNS(s, FMT_S, 32, gen_store_fpr32(fp0, fd))
+#undef FOP_CONDNS
#undef gen_ldcmp_fpr32
#undef gen_ldcmp_fpr64
regnames[rs], uimm);
break;
case OPC_LUI:
- tcg_gen_movi_tl(cpu_gpr[rt], imm << 16);
- MIPS_DEBUG("lui %s, " TARGET_FMT_lx, regnames[rt], uimm);
+ if (rs != 0 && (ctx->insn_flags & ISA_MIPS32R6)) {
+ /* OPC_AUI */
+ tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], imm << 16);
+ tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
+ MIPS_DEBUG("aui %s, %s, %04x", regnames[rt], regnames[rs], imm);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rt], imm << 16);
+ MIPS_DEBUG("lui %s, " TARGET_FMT_lx, regnames[rt], uimm);
+ }
break;
default:
MIPS_DEBUG("%s %s", opn, regnames[reg]);
}
+static inline void gen_r6_ld(target_long addr, int reg, int memidx,
+ TCGMemOp memop)
+{
+ TCGv t0 = tcg_const_tl(addr);
+ tcg_gen_qemu_ld_tl(t0, t0, memidx, memop);
+ gen_store_gpr(t0, reg);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_pcrel(DisasContext *ctx, int rs, int16_t imm)
+{
+ target_long offset;
+ target_long addr;
+
+ switch (MASK_OPC_PCREL_TOP2BITS(ctx->opcode)) {
+ case OPC_ADDIUPC:
+ if (rs != 0) {
+ offset = sextract32(ctx->opcode << 2, 0, 21);
+ addr = addr_add(ctx, ctx->pc, offset);
+ tcg_gen_movi_tl(cpu_gpr[rs], addr);
+ }
+ break;
+ case R6_OPC_LWPC:
+ offset = sextract32(ctx->opcode << 2, 0, 21);
+ addr = addr_add(ctx, ctx->pc, offset);
+ gen_r6_ld(addr, rs, ctx->mem_idx, MO_TESL);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_LWUPC:
+ check_mips_64(ctx);
+ offset = sextract32(ctx->opcode << 2, 0, 21);
+ addr = addr_add(ctx, ctx->pc, offset);
+ gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEUL);
+ break;
+#endif
+ default:
+ switch (MASK_OPC_PCREL_TOP5BITS(ctx->opcode)) {
+ case OPC_AUIPC:
+ if (rs != 0) {
+ offset = imm << 16;
+ addr = addr_add(ctx, ctx->pc, offset);
+ tcg_gen_movi_tl(cpu_gpr[rs], addr);
+ }
+ break;
+ case OPC_ALUIPC:
+ if (rs != 0) {
+ offset = imm << 16;
+ addr = ~0xFFFF & addr_add(ctx, ctx->pc, offset);
+ tcg_gen_movi_tl(cpu_gpr[rs], addr);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case R6_OPC_LDPC: /* bits 16 and 17 are part of immediate */
+ case R6_OPC_LDPC + (1 << 16):
+ case R6_OPC_LDPC + (2 << 16):
+ case R6_OPC_LDPC + (3 << 16):
+ check_mips_64(ctx);
+ offset = sextract32(ctx->opcode << 3, 0, 21);
+ addr = addr_add(ctx, (ctx->pc & ~0x7), offset);
+ gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEQ);
+ break;
+#endif
+ default:
+ MIPS_INVAL("OPC_PCREL");
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ }
+}
+
static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt)
{
const char *opn = "r6 mul/div";
gen_load_gpr(t0, rs);
switch (opc) {
case OPC_CLO:
+ case R6_OPC_CLO:
gen_helper_clo(cpu_gpr[rd], t0);
opn = "clo";
break;
case OPC_CLZ:
+ case R6_OPC_CLZ:
gen_helper_clz(cpu_gpr[rd], t0);
opn = "clz";
break;
#if defined(TARGET_MIPS64)
case OPC_DCLO:
+ case R6_OPC_DCLO:
gen_helper_dclo(cpu_gpr[rd], t0);
opn = "dclo";
break;
case OPC_DCLZ:
+ case R6_OPC_DCLZ:
gen_helper_dclz(cpu_gpr[rd], t0);
opn = "dclz";
break;
/* Branches (before delay slot) */
static void gen_compute_branch (DisasContext *ctx, uint32_t opc,
int insn_bytes,
- int rs, int rt, int32_t offset)
+ int rs, int rt, int32_t offset,
+ int delayslot_size)
{
target_ulong btgt = -1;
int blink = 0;
if (ctx->hflags & MIPS_HFLAG_BMASK) {
#ifdef MIPS_DEBUG_DISAS
- LOG_DISAS("Branch in delay slot at PC 0x" TARGET_FMT_lx "\n", ctx->pc);
+ LOG_DISAS("Branch in delay / forbidden slot at PC 0x"
+ TARGET_FMT_lx "\n", ctx->pc);
#endif
generate_exception(ctx, EXCP_RI);
goto out;
break;
case OPC_BGEZ:
case OPC_BGEZAL:
- case OPC_BGEZALS:
case OPC_BGEZALL:
case OPC_BGEZL:
case OPC_BGTZ:
case OPC_BLEZL:
case OPC_BLTZ:
case OPC_BLTZAL:
- case OPC_BLTZALS:
case OPC_BLTZALL:
case OPC_BLTZL:
/* Compare to zero */
case OPC_J:
case OPC_JAL:
case OPC_JALX:
- case OPC_JALS:
- case OPC_JALXS:
/* Jump to immediate */
btgt = ((ctx->pc + insn_bytes) & (int32_t)0xF0000000) | (uint32_t)offset;
break;
case OPC_JR:
case OPC_JALR:
- case OPC_JALRC:
- case OPC_JALRS:
/* Jump to register */
if (offset != 0 && offset != 16) {
/* Hint = 0 is JR/JALR, hint 16 is JR.HB/JALR.HB, the
ctx->hflags |= MIPS_HFLAG_B;
MIPS_DEBUG("balways");
break;
- case OPC_BGEZALS:
case OPC_BGEZAL: /* 0 >= 0 */
case OPC_BGEZALL: /* 0 >= 0 likely */
- ctx->hflags |= (opc == OPC_BGEZALS
- ? MIPS_HFLAG_BDS16
- : MIPS_HFLAG_BDS32);
/* Always take and link */
blink = 31;
ctx->hflags |= MIPS_HFLAG_B;
/* Treat as NOP. */
MIPS_DEBUG("bnever (NOP)");
goto out;
- case OPC_BLTZALS:
case OPC_BLTZAL: /* 0 < 0 */
- ctx->hflags |= (opc == OPC_BLTZALS
- ? MIPS_HFLAG_BDS16
- : MIPS_HFLAG_BDS32);
/* Handle as an unconditional branch to get correct delay
slot checking. */
blink = 31;
- btgt = ctx->pc + (opc == OPC_BLTZALS ? 6 : 8);
+ btgt = ctx->pc + insn_bytes + delayslot_size;
ctx->hflags |= MIPS_HFLAG_B;
MIPS_DEBUG("bnever and link");
break;
ctx->hflags |= MIPS_HFLAG_B;
MIPS_DEBUG("j " TARGET_FMT_lx, btgt);
break;
- case OPC_JALXS:
case OPC_JALX:
ctx->hflags |= MIPS_HFLAG_BX;
/* Fallthrough */
- case OPC_JALS:
case OPC_JAL:
blink = 31;
ctx->hflags |= MIPS_HFLAG_B;
- ctx->hflags |= ((opc == OPC_JALS || opc == OPC_JALXS)
- ? MIPS_HFLAG_BDS16
- : MIPS_HFLAG_BDS32);
MIPS_DEBUG("jal " TARGET_FMT_lx, btgt);
break;
case OPC_JR:
ctx->hflags |= MIPS_HFLAG_BR;
- if (insn_bytes == 4)
- ctx->hflags |= MIPS_HFLAG_BDS32;
MIPS_DEBUG("jr %s", regnames[rs]);
break;
- case OPC_JALRS:
case OPC_JALR:
- case OPC_JALRC:
blink = rt;
ctx->hflags |= MIPS_HFLAG_BR;
- ctx->hflags |= (opc == OPC_JALRS
- ? MIPS_HFLAG_BDS16
- : MIPS_HFLAG_BDS32);
MIPS_DEBUG("jalr %s, %s", regnames[rt], regnames[rs]);
break;
default:
tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0);
MIPS_DEBUG("bgezl %s, " TARGET_FMT_lx, regnames[rs], btgt);
goto likely;
- case OPC_BGEZALS:
case OPC_BGEZAL:
- ctx->hflags |= (opc == OPC_BGEZALS
- ? MIPS_HFLAG_BDS16
- : MIPS_HFLAG_BDS32);
tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0);
MIPS_DEBUG("bgezal %s, " TARGET_FMT_lx, regnames[rs], btgt);
blink = 31;
MIPS_DEBUG("bposge64 " TARGET_FMT_lx, btgt);
goto not_likely;
#endif
- case OPC_BLTZALS:
case OPC_BLTZAL:
- ctx->hflags |= (opc == OPC_BLTZALS
- ? MIPS_HFLAG_BDS16
- : MIPS_HFLAG_BDS32);
tcg_gen_setcondi_tl(TCG_COND_LT, bcond, t0, 0);
blink = 31;
MIPS_DEBUG("bltzal %s, " TARGET_FMT_lx, regnames[rs], btgt);
blink, ctx->hflags, btgt);
ctx->btarget = btgt;
+
+ switch (delayslot_size) {
+ case 2:
+ ctx->hflags |= MIPS_HFLAG_BDS16;
+ break;
+ case 4:
+ ctx->hflags |= MIPS_HFLAG_BDS32;
+ break;
+ }
+
if (blink > 0) {
- int post_delay = insn_bytes;
+ int post_delay = insn_bytes + delayslot_size;
int lowbit = !!(ctx->hflags & MIPS_HFLAG_M16);
- if (opc != OPC_JALRC)
- post_delay += ((ctx->hflags & MIPS_HFLAG_BDS16) ? 2 : 4);
-
tcg_gen_movi_tl(cpu_gpr[blink], ctx->pc + post_delay + lowbit);
}
tcg_gen_st_tl(arg, cpu_env, off);
}
+static inline void gen_mfc0_unimplemented(DisasContext *ctx, TCGv arg)
+{
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ tcg_gen_movi_tl(arg, 0);
+ } else {
+ tcg_gen_movi_tl(arg, ~0);
+ }
+}
+
static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
{
const char *rn = "invalid";
switch (sel) {
case 0:
tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryLo0));
+#if defined(TARGET_MIPS64)
+ if (ctx->rxi) {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_andi_tl(tmp, arg, (3ull << 62));
+ tcg_gen_shri_tl(tmp, tmp, 32);
+ tcg_gen_or_tl(arg, arg, tmp);
+ tcg_temp_free(tmp);
+ }
+#endif
tcg_gen_ext32s_tl(arg, arg);
rn = "EntryLo0";
break;
switch (sel) {
case 0:
tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryLo1));
+#if defined(TARGET_MIPS64)
+ if (ctx->rxi) {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_andi_tl(tmp, arg, (3ull << 62));
+ tcg_gen_shri_tl(tmp, tmp, 32);
+ tcg_gen_or_tl(arg, arg, tmp);
+ tcg_temp_free(tmp);
+ }
+#endif
tcg_gen_ext32s_tl(arg, arg);
rn = "EntryLo1";
break;
tcg_gen_ext32s_tl(arg, arg);
rn = "BadVAddr";
break;
+ case 1:
+ if (ctx->bi) {
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstr));
+ rn = "BadInstr";
+ } else {
+ gen_mfc0_unimplemented(ctx, arg);
+ }
+ break;
+ case 2:
+ if (ctx->bp) {
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstrP));
+ rn = "BadInstrP";
+ } else {
+ gen_mfc0_unimplemented(ctx, arg);
+ }
+ break;
default:
goto die;
- }
+ }
break;
case 9:
switch (sel) {
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
rn = "DESAVE";
break;
+ case 2 ... 7:
+ if (ctx->kscrexist & (1 << sel)) {
+ tcg_gen_ld_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_KScratch[sel-2]));
+ tcg_gen_ext32s_tl(arg, arg);
+ rn = "KScratch";
+ } else {
+ gen_mfc0_unimplemented(ctx, arg);
+ }
+ break;
default:
goto die;
}
}
break;
case 8:
- /* ignored */
- rn = "BadVAddr";
+ switch (sel) {
+ case 0:
+ /* ignored */
+ rn = "BadVAddr";
+ break;
+ case 1:
+ /* ignored */
+ rn = "BadInstr";
+ break;
+ case 2:
+ /* ignored */
+ rn = "BadInstrP";
+ break;
+ default:
+ goto die;
+ }
break;
case 9:
switch (sel) {
gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
rn = "DESAVE";
break;
+ case 2 ... 7:
+ if (ctx->kscrexist & (1 << sel)) {
+ tcg_gen_st_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_KScratch[sel-2]));
+ rn = "KScratch";
+ }
+ break;
default:
goto die;
}
tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr));
rn = "BadVAddr";
break;
+ case 1:
+ if (ctx->bi) {
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstr));
+ rn = "BadInstr";
+ } else {
+ gen_mfc0_unimplemented(ctx, arg);
+ }
+ break;
+ case 2:
+ if (ctx->bp) {
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstrP));
+ rn = "BadInstrP";
+ } else {
+ gen_mfc0_unimplemented(ctx, arg);
+ }
+ break;
default:
goto die;
}
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config3));
rn = "Config3";
break;
+ case 4:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config4));
+ rn = "Config4";
+ break;
+ case 5:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config5));
+ rn = "Config5";
+ break;
/* 6,7 are implementation dependent */
case 6:
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config6));
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
rn = "DESAVE";
break;
+ case 2 ... 7:
+ if (ctx->kscrexist & (1 << sel)) {
+ tcg_gen_ld_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_KScratch[sel-2]));
+ rn = "KScratch";
+ } else {
+ gen_mfc0_unimplemented(ctx, arg);
+ }
+ break;
default:
goto die;
}
case 2:
switch (sel) {
case 0:
- gen_helper_mtc0_entrylo0(cpu_env, arg);
+ gen_helper_dmtc0_entrylo0(cpu_env, arg);
rn = "EntryLo0";
break;
case 1:
case 3:
switch (sel) {
case 0:
- gen_helper_mtc0_entrylo1(cpu_env, arg);
+ gen_helper_dmtc0_entrylo1(cpu_env, arg);
rn = "EntryLo1";
break;
default:
}
break;
case 8:
- /* ignored */
- rn = "BadVAddr";
+ switch (sel) {
+ case 0:
+ /* ignored */
+ rn = "BadVAddr";
+ break;
+ case 1:
+ /* ignored */
+ rn = "BadInstr";
+ break;
+ case 2:
+ /* ignored */
+ rn = "BadInstrP";
+ break;
+ default:
+ goto die;
+ }
break;
case 9:
switch (sel) {
/* ignored */
rn = "Config3";
break;
+ case 4:
+ /* currently ignored */
+ rn = "Config4";
+ break;
+ case 5:
+ gen_helper_mtc0_config5(cpu_env, arg);
+ rn = "Config5";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ break;
/* 6,7 are implementation dependent */
default:
rn = "Invalid config selector";
gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
rn = "DESAVE";
break;
+ case 2 ... 7:
+ if (ctx->kscrexist & (1 << sel)) {
+ tcg_gen_st_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_KScratch[sel-2]));
+ rn = "KScratch";
+ }
+ break;
default:
goto die;
}
goto die;
gen_helper_tlbwi(cpu_env);
break;
+ case OPC_TLBINV:
+ opn = "tlbinv";
+ if (ctx->ie >= 2) {
+ if (!env->tlb->helper_tlbinv) {
+ goto die;
+ }
+ gen_helper_tlbinv(cpu_env);
+ } /* treat as nop if TLBINV not supported */
+ break;
+ case OPC_TLBINVF:
+ opn = "tlbinvf";
+ if (ctx->ie >= 2) {
+ if (!env->tlb->helper_tlbinvf) {
+ goto die;
+ }
+ gen_helper_tlbinvf(cpu_env);
+ } /* treat as nop if TLBINV not supported */
+ break;
case OPC_TLBWR:
opn = "tlbwr";
if (!env->tlb->helper_tlbwr)
case OPC_ERET:
opn = "eret";
check_insn(ctx, ISA_MIPS2);
+ if ((ctx->insn_flags & ISA_MIPS32R6) &&
+ (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ MIPS_DEBUG("CTI in delay / forbidden slot");
+ goto die;
+ }
gen_helper_eret(cpu_env);
ctx->bstate = BS_EXCP;
break;
case OPC_DERET:
opn = "deret";
check_insn(ctx, ISA_MIPS32);
+ if ((ctx->insn_flags & ISA_MIPS32R6) &&
+ (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ MIPS_DEBUG("CTI in delay / forbidden slot");
+ goto die;
+ }
if (!(ctx->hflags & MIPS_HFLAG_DM)) {
MIPS_INVAL(opn);
generate_exception(ctx, EXCP_RI);
case OPC_WAIT:
opn = "wait";
check_insn(ctx, ISA_MIPS3 | ISA_MIPS32);
+ if ((ctx->insn_flags & ISA_MIPS32R6) &&
+ (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ MIPS_DEBUG("CTI in delay / forbidden slot");
+ goto die;
+ }
/* If we get an exception, we want to restart at next instruction */
ctx->pc += 4;
save_cpu_state(ctx, 1);
const char *opn = "cp1 cond branch";
TCGv_i32 t0 = tcg_temp_new_i32();
+ if ((ctx->insn_flags & ISA_MIPS32R6) && (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ MIPS_DEBUG("CTI in delay / forbidden slot");
+ generate_exception(ctx, EXCP_RI);
+ goto out;
+ }
+
if (cc != 0)
check_insn(ctx, ISA_MIPS4 | ISA_MIPS32);
MIPS_DEBUG("%s: cond %02x target " TARGET_FMT_lx, opn,
ctx->hflags, btarget);
ctx->btarget = btarget;
-
+ ctx->hflags |= MIPS_HFLAG_BDS32;
out:
tcg_temp_free_i32(t0);
}
+/* R6 CP1 Branches */
+static void gen_compute_branch1_r6(DisasContext *ctx, uint32_t op,
+ int32_t ft, int32_t offset)
+{
+ target_ulong btarget;
+ const char *opn = "cp1 cond branch";
+ TCGv_i64 t0 = tcg_temp_new_i64();
+
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+#ifdef MIPS_DEBUG_DISAS
+ LOG_DISAS("Branch in delay / forbidden slot at PC 0x" TARGET_FMT_lx
+ "\n", ctx->pc);
+#endif
+ generate_exception(ctx, EXCP_RI);
+ goto out;
+ }
+
+ gen_load_fpr64(ctx, t0, ft);
+ tcg_gen_andi_i64(t0, t0, 1);
+
+ btarget = addr_add(ctx, ctx->pc + 4, offset);
+
+ switch (op) {
+ case OPC_BC1EQZ:
+ tcg_gen_xori_i64(t0, t0, 1);
+ opn = "bc1eqz";
+ ctx->hflags |= MIPS_HFLAG_BC;
+ break;
+ case OPC_BC1NEZ:
+ /* t0 already set */
+ opn = "bc1nez";
+ ctx->hflags |= MIPS_HFLAG_BC;
+ break;
+ default:
+ MIPS_INVAL(opn);
+ generate_exception(ctx, EXCP_RI);
+ goto out;
+ }
+
+ tcg_gen_trunc_i64_tl(bcond, t0);
+
+ (void)opn; /* avoid a compiler warning */
+ MIPS_DEBUG("%s: cond %02x target " TARGET_FMT_lx, opn,
+ ctx->hflags, btarget);
+ ctx->btarget = btarget;
+
+out:
+ tcg_temp_free_i64(t0);
+}
+
/* Coprocessor 1 (FPU) */
#define FOP(func, fmt) (((fmt) << 21) | (func))
OPC_TRUNC_W_S = FOP(13, FMT_S),
OPC_CEIL_W_S = FOP(14, FMT_S),
OPC_FLOOR_W_S = FOP(15, FMT_S),
+ OPC_SEL_S = FOP(16, FMT_S),
OPC_MOVCF_S = FOP(17, FMT_S),
OPC_MOVZ_S = FOP(18, FMT_S),
OPC_MOVN_S = FOP(19, FMT_S),
+ OPC_SELEQZ_S = FOP(20, FMT_S),
OPC_RECIP_S = FOP(21, FMT_S),
OPC_RSQRT_S = FOP(22, FMT_S),
+ OPC_SELNEZ_S = FOP(23, FMT_S),
+ OPC_MADDF_S = FOP(24, FMT_S),
+ OPC_MSUBF_S = FOP(25, FMT_S),
+ OPC_RINT_S = FOP(26, FMT_S),
+ OPC_CLASS_S = FOP(27, FMT_S),
+ OPC_MIN_S = FOP(28, FMT_S),
OPC_RECIP2_S = FOP(28, FMT_S),
+ OPC_MINA_S = FOP(29, FMT_S),
OPC_RECIP1_S = FOP(29, FMT_S),
+ OPC_MAX_S = FOP(30, FMT_S),
OPC_RSQRT1_S = FOP(30, FMT_S),
+ OPC_MAXA_S = FOP(31, FMT_S),
OPC_RSQRT2_S = FOP(31, FMT_S),
OPC_CVT_D_S = FOP(33, FMT_S),
OPC_CVT_W_S = FOP(36, FMT_S),
OPC_TRUNC_W_D = FOP(13, FMT_D),
OPC_CEIL_W_D = FOP(14, FMT_D),
OPC_FLOOR_W_D = FOP(15, FMT_D),
+ OPC_SEL_D = FOP(16, FMT_D),
OPC_MOVCF_D = FOP(17, FMT_D),
OPC_MOVZ_D = FOP(18, FMT_D),
OPC_MOVN_D = FOP(19, FMT_D),
+ OPC_SELEQZ_D = FOP(20, FMT_D),
OPC_RECIP_D = FOP(21, FMT_D),
OPC_RSQRT_D = FOP(22, FMT_D),
+ OPC_SELNEZ_D = FOP(23, FMT_D),
+ OPC_MADDF_D = FOP(24, FMT_D),
+ OPC_MSUBF_D = FOP(25, FMT_D),
+ OPC_RINT_D = FOP(26, FMT_D),
+ OPC_CLASS_D = FOP(27, FMT_D),
+ OPC_MIN_D = FOP(28, FMT_D),
OPC_RECIP2_D = FOP(28, FMT_D),
+ OPC_MINA_D = FOP(29, FMT_D),
OPC_RECIP1_D = FOP(29, FMT_D),
+ OPC_MAX_D = FOP(30, FMT_D),
OPC_RSQRT1_D = FOP(30, FMT_D),
+ OPC_MAXA_D = FOP(31, FMT_D),
OPC_RSQRT2_D = FOP(31, FMT_D),
OPC_CVT_S_D = FOP(32, FMT_D),
OPC_CVT_W_D = FOP(36, FMT_D),
OPC_CMP_NGT_PS = FOP (63, FMT_PS),
};
+enum r6_f_cmp_op {
+ R6_OPC_CMP_AF_S = FOP(0, FMT_W),
+ R6_OPC_CMP_UN_S = FOP(1, FMT_W),
+ R6_OPC_CMP_EQ_S = FOP(2, FMT_W),
+ R6_OPC_CMP_UEQ_S = FOP(3, FMT_W),
+ R6_OPC_CMP_LT_S = FOP(4, FMT_W),
+ R6_OPC_CMP_ULT_S = FOP(5, FMT_W),
+ R6_OPC_CMP_LE_S = FOP(6, FMT_W),
+ R6_OPC_CMP_ULE_S = FOP(7, FMT_W),
+ R6_OPC_CMP_SAF_S = FOP(8, FMT_W),
+ R6_OPC_CMP_SUN_S = FOP(9, FMT_W),
+ R6_OPC_CMP_SEQ_S = FOP(10, FMT_W),
+ R6_OPC_CMP_SEUQ_S = FOP(11, FMT_W),
+ R6_OPC_CMP_SLT_S = FOP(12, FMT_W),
+ R6_OPC_CMP_SULT_S = FOP(13, FMT_W),
+ R6_OPC_CMP_SLE_S = FOP(14, FMT_W),
+ R6_OPC_CMP_SULE_S = FOP(15, FMT_W),
+ R6_OPC_CMP_OR_S = FOP(17, FMT_W),
+ R6_OPC_CMP_UNE_S = FOP(18, FMT_W),
+ R6_OPC_CMP_NE_S = FOP(19, FMT_W),
+ R6_OPC_CMP_SOR_S = FOP(25, FMT_W),
+ R6_OPC_CMP_SUNE_S = FOP(26, FMT_W),
+ R6_OPC_CMP_SNE_S = FOP(27, FMT_W),
+
+ R6_OPC_CMP_AF_D = FOP(0, FMT_L),
+ R6_OPC_CMP_UN_D = FOP(1, FMT_L),
+ R6_OPC_CMP_EQ_D = FOP(2, FMT_L),
+ R6_OPC_CMP_UEQ_D = FOP(3, FMT_L),
+ R6_OPC_CMP_LT_D = FOP(4, FMT_L),
+ R6_OPC_CMP_ULT_D = FOP(5, FMT_L),
+ R6_OPC_CMP_LE_D = FOP(6, FMT_L),
+ R6_OPC_CMP_ULE_D = FOP(7, FMT_L),
+ R6_OPC_CMP_SAF_D = FOP(8, FMT_L),
+ R6_OPC_CMP_SUN_D = FOP(9, FMT_L),
+ R6_OPC_CMP_SEQ_D = FOP(10, FMT_L),
+ R6_OPC_CMP_SEUQ_D = FOP(11, FMT_L),
+ R6_OPC_CMP_SLT_D = FOP(12, FMT_L),
+ R6_OPC_CMP_SULT_D = FOP(13, FMT_L),
+ R6_OPC_CMP_SLE_D = FOP(14, FMT_L),
+ R6_OPC_CMP_SULE_D = FOP(15, FMT_L),
+ R6_OPC_CMP_OR_D = FOP(17, FMT_L),
+ R6_OPC_CMP_UNE_D = FOP(18, FMT_L),
+ R6_OPC_CMP_NE_D = FOP(19, FMT_L),
+ R6_OPC_CMP_SOR_D = FOP(25, FMT_L),
+ R6_OPC_CMP_SUNE_D = FOP(26, FMT_L),
+ R6_OPC_CMP_SNE_D = FOP(27, FMT_L),
+};
static void gen_cp1 (DisasContext *ctx, uint32_t opc, int rt, int fs)
{
const char *opn = "cp1 move";
gen_set_label(l2);
}
+static void gen_sel_s(DisasContext *ctx, enum fopcode op1, int fd, int ft,
+ int fs)
+{
+ TCGv_i32 t1 = tcg_const_i32(0);
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+ gen_load_fpr32(fp0, fd);
+ gen_load_fpr32(fp1, ft);
+ gen_load_fpr32(fp2, fs);
+
+ switch (op1) {
+ case OPC_SEL_S:
+ tcg_gen_andi_i32(fp0, fp0, 1);
+ tcg_gen_movcond_i32(TCG_COND_NE, fp0, fp0, t1, fp1, fp2);
+ break;
+ case OPC_SELEQZ_S:
+ tcg_gen_andi_i32(fp1, fp1, 1);
+ tcg_gen_movcond_i32(TCG_COND_EQ, fp0, fp1, t1, fp2, t1);
+ break;
+ case OPC_SELNEZ_S:
+ tcg_gen_andi_i32(fp1, fp1, 1);
+ tcg_gen_movcond_i32(TCG_COND_NE, fp0, fp1, t1, fp2, t1);
+ break;
+ default:
+ MIPS_INVAL("gen_sel_s");
+ generate_exception (ctx, EXCP_RI);
+ break;
+ }
+
+ gen_store_fpr32(fp0, fd);
+ tcg_temp_free_i32(fp2);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(t1);
+}
+
+static void gen_sel_d(DisasContext *ctx, enum fopcode op1, int fd, int ft,
+ int fs)
+{
+ TCGv_i64 t1 = tcg_const_i64(0);
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fd);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fs);
+
+ switch (op1) {
+ case OPC_SEL_D:
+ tcg_gen_andi_i64(fp0, fp0, 1);
+ tcg_gen_movcond_i64(TCG_COND_NE, fp0, fp0, t1, fp1, fp2);
+ break;
+ case OPC_SELEQZ_D:
+ tcg_gen_andi_i64(fp1, fp1, 1);
+ tcg_gen_movcond_i64(TCG_COND_EQ, fp0, fp1, t1, fp2, t1);
+ break;
+ case OPC_SELNEZ_D:
+ tcg_gen_andi_i64(fp1, fp1, 1);
+ tcg_gen_movcond_i64(TCG_COND_NE, fp0, fp1, t1, fp2, t1);
+ break;
+ default:
+ MIPS_INVAL("gen_sel_d");
+ generate_exception (ctx, EXCP_RI);
+ break;
+ }
+
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp2);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(t1);
+}
static void gen_farith (DisasContext *ctx, enum fopcode op1,
int ft, int fs, int fd, int cc)
}
opn = "floor.w.s";
break;
+ case OPC_SEL_S:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_sel_s(ctx, op1, fd, ft, fs);
+ opn = "sel.s";
+ break;
+ case OPC_SELEQZ_S:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_sel_s(ctx, op1, fd, ft, fs);
+ opn = "seleqz.s";
+ break;
+ case OPC_SELNEZ_S:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_sel_s(ctx, op1, fd, ft, fs);
+ opn = "selnez.s";
+ break;
case OPC_MOVCF_S:
check_insn_opc_removed(ctx, ISA_MIPS32R6);
gen_movcf_s(fs, fd, (ft >> 2) & 0x7, ft & 0x1);
}
opn = "rsqrt.s";
break;
- case OPC_RECIP2_S:
- check_cp1_64bitmode(ctx);
+ case OPC_MADDF_S:
+ check_insn(ctx, ISA_MIPS32R6);
{
TCGv_i32 fp0 = tcg_temp_new_i32();
TCGv_i32 fp1 = tcg_temp_new_i32();
-
+ TCGv_i32 fp2 = tcg_temp_new_i32();
gen_load_fpr32(fp0, fs);
gen_load_fpr32(fp1, ft);
- gen_helper_float_recip2_s(fp0, cpu_env, fp0, fp1);
+ gen_load_fpr32(fp2, fd);
+ gen_helper_float_maddf_s(fp2, cpu_env, fp0, fp1, fp2);
+ gen_store_fpr32(fp2, fd);
+ tcg_temp_free_i32(fp2);
tcg_temp_free_i32(fp1);
- gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
+ opn = "maddf.s";
}
- opn = "recip2.s";
break;
- case OPC_RECIP1_S:
- check_cp1_64bitmode(ctx);
+ case OPC_MSUBF_S:
+ check_insn(ctx, ISA_MIPS32R6);
{
TCGv_i32 fp0 = tcg_temp_new_i32();
-
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
gen_load_fpr32(fp0, fs);
- gen_helper_float_recip1_s(fp0, cpu_env, fp0);
- gen_store_fpr32(fp0, fd);
- tcg_temp_free_i32(fp0);
+ gen_load_fpr32(fp1, ft);
+ gen_load_fpr32(fp2, fd);
+ gen_helper_float_msubf_s(fp2, cpu_env, fp0, fp1, fp2);
+ gen_store_fpr32(fp2, fd);
+ tcg_temp_free_i32(fp2);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ opn = "msubf.s";
}
- opn = "recip1.s";
break;
- case OPC_RSQRT1_S:
- check_cp1_64bitmode(ctx);
+ case OPC_RINT_S:
+ check_insn(ctx, ISA_MIPS32R6);
{
TCGv_i32 fp0 = tcg_temp_new_i32();
-
gen_load_fpr32(fp0, fs);
- gen_helper_float_rsqrt1_s(fp0, cpu_env, fp0);
+ gen_helper_float_rint_s(fp0, cpu_env, fp0);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
+ opn = "rint.s";
}
- opn = "rsqrt1.s";
break;
- case OPC_RSQRT2_S:
- check_cp1_64bitmode(ctx);
+ case OPC_CLASS_S:
+ check_insn(ctx, ISA_MIPS32R6);
{
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(fp0, fs);
+ gen_helper_float_class_s(fp0, fp0);
+ gen_store_fpr32(fp0, fd);
+ tcg_temp_free_i32(fp0);
+ opn = "class.s";
+ }
+ break;
+ case OPC_MIN_S: /* OPC_RECIP2_S */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_MIN_S */
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+ gen_load_fpr32(fp0, fs);
+ gen_load_fpr32(fp1, ft);
+ gen_helper_float_min_s(fp2, cpu_env, fp0, fp1);
+ gen_store_fpr32(fp2, fd);
+ tcg_temp_free_i32(fp2);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ opn = "min.s";
+ } else {
+ /* OPC_RECIP2_S */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(fp0, fs);
+ gen_load_fpr32(fp1, ft);
+ gen_helper_float_recip2_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ opn = "recip2.s";
+ }
+ break;
+ case OPC_MINA_S: /* OPC_RECIP1_S */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_MINA_S */
TCGv_i32 fp0 = tcg_temp_new_i32();
TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+ gen_load_fpr32(fp0, fs);
+ gen_load_fpr32(fp1, ft);
+ gen_helper_float_mina_s(fp2, cpu_env, fp0, fp1);
+ gen_store_fpr32(fp2, fd);
+ tcg_temp_free_i32(fp2);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ opn = "mina.s";
+ } else {
+ /* OPC_RECIP1_S */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(fp0, fs);
+ gen_helper_float_recip1_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ opn = "recip1.s";
+ }
+ break;
+ case OPC_MAX_S: /* OPC_RSQRT1_S */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_MAX_S */
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
gen_load_fpr32(fp0, fs);
gen_load_fpr32(fp1, ft);
- gen_helper_float_rsqrt2_s(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_max_s(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr32(fp1, fd);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ opn = "max.s";
+ } else {
+ /* OPC_RSQRT1_S */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(fp0, fs);
+ gen_helper_float_rsqrt1_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ opn = "rsqrt1.s";
+ }
+ break;
+ case OPC_MAXA_S: /* OPC_RSQRT2_S */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_MAXA_S */
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ gen_load_fpr32(fp0, fs);
+ gen_load_fpr32(fp1, ft);
+ gen_helper_float_maxa_s(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr32(fp1, fd);
tcg_temp_free_i32(fp1);
- gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
+ opn = "maxa.s";
+ } else {
+ /* OPC_RSQRT2_S */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(fp0, fs);
+ gen_load_fpr32(fp1, ft);
+ gen_helper_float_rsqrt2_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ opn = "rsqrt2.s";
}
- opn = "rsqrt2.s";
break;
case OPC_CVT_D_S:
check_cp1_registers(ctx, fd);
}
opn = "floor.w.d";
break;
+ case OPC_SEL_D:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_sel_d(ctx, op1, fd, ft, fs);
+ opn = "sel.d";
+ break;
+ case OPC_SELEQZ_D:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_sel_d(ctx, op1, fd, ft, fs);
+ opn = "seleqz.d";
+ break;
+ case OPC_SELNEZ_D:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_sel_d(ctx, op1, fd, ft, fs);
+ opn = "selnez.d";
+ break;
case OPC_MOVCF_D:
check_insn_opc_removed(ctx, ISA_MIPS32R6);
gen_movcf_d(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1);
}
opn = "rsqrt.d";
break;
- case OPC_RECIP2_D:
- check_cp1_64bitmode(ctx);
+ case OPC_MADDF_D:
+ check_insn(ctx, ISA_MIPS32R6);
{
TCGv_i64 fp0 = tcg_temp_new_i64();
TCGv_i64 fp1 = tcg_temp_new_i64();
-
+ TCGv_i64 fp2 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_recip2_d(fp0, cpu_env, fp0, fp1);
+ gen_load_fpr64(ctx, fp2, fd);
+ gen_helper_float_maddf_d(fp2, cpu_env, fp0, fp1, fp2);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
tcg_temp_free_i64(fp1);
- gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
+ opn = "maddf.d";
}
- opn = "recip2.d";
break;
- case OPC_RECIP1_D:
- check_cp1_64bitmode(ctx);
+ case OPC_MSUBF_D:
+ check_insn(ctx, ISA_MIPS32R6);
{
TCGv_i64 fp0 = tcg_temp_new_i64();
-
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_recip1_d(fp0, cpu_env, fp0);
- gen_store_fpr64(ctx, fp0, fd);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fd);
+ gen_helper_float_msubf_d(fp2, cpu_env, fp0, fp1, fp2);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ tcg_temp_free_i64(fp1);
tcg_temp_free_i64(fp0);
+ opn = "msubf.d";
}
- opn = "recip1.d";
break;
- case OPC_RSQRT1_D:
- check_cp1_64bitmode(ctx);
+ case OPC_RINT_D:
+ check_insn(ctx, ISA_MIPS32R6);
{
TCGv_i64 fp0 = tcg_temp_new_i64();
-
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_rsqrt1_d(fp0, cpu_env, fp0);
+ gen_helper_float_rint_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
+ opn = "rint.d";
}
- opn = "rsqrt1.d";
break;
- case OPC_RSQRT2_D:
- check_cp1_64bitmode(ctx);
+ case OPC_CLASS_D:
+ check_insn(ctx, ISA_MIPS32R6);
{
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_class_d(fp0, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ opn = "class.d";
+ }
+ break;
+ case OPC_MIN_D: /* OPC_RECIP2_D */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_MIN_D */
TCGv_i64 fp0 = tcg_temp_new_i64();
TCGv_i64 fp1 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_min_d(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr64(ctx, fp1, fd);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ opn = "min.d";
+ } else {
+ /* OPC_RECIP2_D */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_recip2_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ opn = "recip2.d";
+ }
+ break;
+ case OPC_MINA_D: /* OPC_RECIP1_D */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_MINA_D */
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_rsqrt2_d(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_mina_d(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr64(ctx, fp1, fd);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ opn = "mina.d";
+ } else {
+ /* OPC_RECIP1_D */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_recip1_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ opn = "recip1.d";
+ }
+ break;
+ case OPC_MAX_D: /* OPC_RSQRT1_D */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_MAX_D */
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_max_d(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr64(ctx, fp1, fd);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ opn = "max.d";
+ } else {
+ /* OPC_RSQRT1_D */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_rsqrt1_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ opn = "rsqrt1.d";
+ }
+ break;
+ case OPC_MAXA_D: /* OPC_RSQRT2_D */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_MAXA_D */
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_maxa_d(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr64(ctx, fp1, fd);
tcg_temp_free_i64(fp1);
- gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
+ opn = "maxa.d";
+ } else {
+ /* OPC_RSQRT2_D */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_rsqrt2_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ opn = "rsqrt2.d";
}
- opn = "rsqrt2.d";
break;
case OPC_CMP_F_D:
case OPC_CMP_UN_D:
tcg_temp_free(t0);
}
-static void handle_delay_slot(DisasContext *ctx, int insn_bytes)
+static void gen_branch(DisasContext *ctx, int insn_bytes)
{
if (ctx->hflags & MIPS_HFLAG_BMASK) {
int proc_hflags = ctx->hflags & MIPS_HFLAG_BMASK;
save_cpu_state(ctx, 0);
/* FIXME: Need to clear can_do_io. */
switch (proc_hflags & MIPS_HFLAG_BMASK_BASE) {
+ case MIPS_HFLAG_FBNSLOT:
+ MIPS_DEBUG("forbidden slot");
+ gen_goto_tb(ctx, 0, ctx->pc + insn_bytes);
+ break;
case MIPS_HFLAG_B:
/* unconditional branch */
MIPS_DEBUG("unconditional branch");
gen_addiupc(ctx, rx, imm, 0, 1);
break;
case M16_OPC_B:
- gen_compute_branch(ctx, OPC_BEQ, 4, 0, 0, offset << 1);
+ gen_compute_branch(ctx, OPC_BEQ, 4, 0, 0, offset << 1, 0);
/* No delay slot, so just process as a normal instruction */
break;
case M16_OPC_BEQZ:
- gen_compute_branch(ctx, OPC_BEQ, 4, rx, 0, offset << 1);
+ gen_compute_branch(ctx, OPC_BEQ, 4, rx, 0, offset << 1, 0);
/* No delay slot, so just process as a normal instruction */
break;
case M16_OPC_BNEQZ:
- gen_compute_branch(ctx, OPC_BNE, 4, rx, 0, offset << 1);
+ gen_compute_branch(ctx, OPC_BNE, 4, rx, 0, offset << 1, 0);
/* No delay slot, so just process as a normal instruction */
break;
case M16_OPC_SHIFT:
case M16_OPC_I8:
switch (funct) {
case I8_BTEQZ:
- gen_compute_branch(ctx, OPC_BEQ, 4, 24, 0, offset << 1);
+ gen_compute_branch(ctx, OPC_BEQ, 4, 24, 0, offset << 1, 0);
break;
case I8_BTNEZ:
- gen_compute_branch(ctx, OPC_BNE, 4, 24, 0, offset << 1);
+ gen_compute_branch(ctx, OPC_BNE, 4, 24, 0, offset << 1, 0);
break;
case I8_SWRASP:
gen_st(ctx, OPC_SW, 31, 29, imm);
case M16_OPC_B:
offset = (ctx->opcode & 0x7ff) << 1;
offset = (int16_t)(offset << 4) >> 4;
- gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, offset);
+ gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, offset, 0);
/* No delay slot, so just process as a normal instruction */
break;
case M16_OPC_JAL:
offset = (((ctx->opcode & 0x1f) << 21)
| ((ctx->opcode >> 5) & 0x1f) << 16
| offset) << 2;
- op = ((ctx->opcode >> 10) & 0x1) ? OPC_JALXS : OPC_JALS;
- gen_compute_branch(ctx, op, 4, rx, ry, offset);
+ op = ((ctx->opcode >> 10) & 0x1) ? OPC_JALX : OPC_JAL;
+ gen_compute_branch(ctx, op, 4, rx, ry, offset, 2);
n_bytes = 4;
break;
case M16_OPC_BEQZ:
- gen_compute_branch(ctx, OPC_BEQ, 2, rx, 0, ((int8_t)ctx->opcode) << 1);
+ gen_compute_branch(ctx, OPC_BEQ, 2, rx, 0,
+ ((int8_t)ctx->opcode) << 1, 0);
/* No delay slot, so just process as a normal instruction */
break;
case M16_OPC_BNEQZ:
- gen_compute_branch(ctx, OPC_BNE, 2, rx, 0, ((int8_t)ctx->opcode) << 1);
+ gen_compute_branch(ctx, OPC_BNE, 2, rx, 0,
+ ((int8_t)ctx->opcode) << 1, 0);
/* No delay slot, so just process as a normal instruction */
break;
case M16_OPC_SHIFT:
switch (funct) {
case I8_BTEQZ:
gen_compute_branch(ctx, OPC_BEQ, 2, 24, 0,
- ((int8_t)ctx->opcode) << 1);
+ ((int8_t)ctx->opcode) << 1, 0);
break;
case I8_BTNEZ:
gen_compute_branch(ctx, OPC_BNE, 2, 24, 0,
- ((int8_t)ctx->opcode) << 1);
+ ((int8_t)ctx->opcode) << 1, 0);
break;
case I8_SWRASP:
gen_st(ctx, OPC_SW, 31, 29, (ctx->opcode & 0xff) << 2);
int ra = (ctx->opcode >> 5) & 0x1;
if (link) {
- op = nd ? OPC_JALRC : OPC_JALRS;
+ op = OPC_JALR;
} else {
op = OPC_JR;
}
- gen_compute_branch(ctx, op, 2, ra ? 31 : rx, 31, 0);
+ gen_compute_branch(ctx, op, 2, ra ? 31 : rx, 31, 0,
+ (nd ? 0 : 2));
}
break;
case RR_SDBBP:
{
int rd = mmreg((ctx->opcode >> 3) & 0x7);
int rs = mmreg(ctx->opcode & 0x7);
- int opc;
switch (((ctx->opcode) >> 4) & 0x3f) {
case NOT16 + 0:
{
int reg = ctx->opcode & 0x1f;
- gen_compute_branch(ctx, OPC_JR, 2, reg, 0, 0);
+ gen_compute_branch(ctx, OPC_JR, 2, reg, 0, 0, 4);
}
break;
case JRC16 + 0:
case JRC16 + 1:
{
int reg = ctx->opcode & 0x1f;
-
- gen_compute_branch(ctx, OPC_JR, 2, reg, 0, 0);
+ gen_compute_branch(ctx, OPC_JR, 2, reg, 0, 0, 0);
/* Let normal delay slot handling in our caller take us
to the branch target. */
}
break;
case JALR16 + 0:
case JALR16 + 1:
- opc = OPC_JALR;
- goto do_jalr;
+ gen_compute_branch(ctx, OPC_JALR, 2, ctx->opcode & 0x1f, 31, 0, 4);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
case JALR16S + 0:
case JALR16S + 1:
- opc = OPC_JALRS;
- do_jalr:
- {
- int reg = ctx->opcode & 0x1f;
-
- gen_compute_branch(ctx, opc, 2, reg, 31, 0);
- }
+ gen_compute_branch(ctx, OPC_JALR, 2, ctx->opcode & 0x1f, 31, 0, 2);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
break;
case MFHI16 + 0:
case MFHI16 + 1:
case JRADDIUSP + 1:
{
int imm = ZIMM(ctx->opcode, 0, 5);
-
- gen_compute_branch(ctx, OPC_JR, 2, 31, 0, 0);
+ gen_compute_branch(ctx, OPC_JR, 2, 31, 0, 0, 0);
gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm << 2);
/* Let normal delay slot handling in our caller take us
to the branch target. */
switch (minor) {
case JALR:
case JALR_HB:
- gen_compute_branch (ctx, OPC_JALR, 4, rs, rt, 0);
+ gen_compute_branch(ctx, OPC_JALR, 4, rs, rt, 0, 4);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
break;
case JALRS:
case JALRS_HB:
- gen_compute_branch (ctx, OPC_JALRS, 4, rs, rt, 0);
+ gen_compute_branch(ctx, OPC_JALR, 4, rs, rt, 0, 2);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
break;
default:
goto pool32axf_invalid;
minor = (ctx->opcode >> 21) & 0x1f;
switch (minor) {
case BLTZ:
- mips32_op = OPC_BLTZ;
- goto do_branch;
+ gen_compute_branch(ctx, OPC_BLTZ, 4, rs, -1, imm << 1, 4);
+ break;
case BLTZAL:
- mips32_op = OPC_BLTZAL;
- goto do_branch;
+ gen_compute_branch(ctx, OPC_BLTZAL, 4, rs, -1, imm << 1, 4);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
case BLTZALS:
- mips32_op = OPC_BLTZALS;
- goto do_branch;
+ gen_compute_branch(ctx, OPC_BLTZAL, 4, rs, -1, imm << 1, 2);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
case BGEZ:
- mips32_op = OPC_BGEZ;
- goto do_branch;
+ gen_compute_branch(ctx, OPC_BGEZ, 4, rs, -1, imm << 1, 4);
+ break;
case BGEZAL:
- mips32_op = OPC_BGEZAL;
- goto do_branch;
+ gen_compute_branch(ctx, OPC_BGEZAL, 4, rs, -1, imm << 1, 4);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
case BGEZALS:
- mips32_op = OPC_BGEZALS;
- goto do_branch;
+ gen_compute_branch(ctx, OPC_BGEZAL, 4, rs, -1, imm << 1, 2);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
case BLEZ:
- mips32_op = OPC_BLEZ;
- goto do_branch;
+ gen_compute_branch(ctx, OPC_BLEZ, 4, rs, -1, imm << 1, 4);
+ break;
case BGTZ:
- mips32_op = OPC_BGTZ;
- do_branch:
- gen_compute_branch(ctx, mips32_op, 4, rs, -1, imm << 1);
+ gen_compute_branch(ctx, OPC_BGTZ, 4, rs, -1, imm << 1, 4);
break;
/* Traps */
case BNEZC:
case BEQZC:
gen_compute_branch(ctx, minor == BNEZC ? OPC_BNE : OPC_BEQ,
- 4, rs, 0, imm << 1);
+ 4, rs, 0, imm << 1, 0);
/* Compact branches don't have a delay slot, so just let
the normal delay slot handling take us to the branch
target. */
gen_logic_imm(ctx, OPC_LUI, rs, -1, imm);
break;
case SYNCI:
+ /* Break the TB to be able to sync copied instructions
+ immediately */
+ ctx->bstate = BS_STOP;
break;
case BC2F:
case BC2T:
break;
case JALX32:
offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2;
- gen_compute_branch(ctx, OPC_JALX, 4, rt, rs, offset);
+ gen_compute_branch(ctx, OPC_JALX, 4, rt, rs, offset, 4);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
break;
case JALS32:
offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 1;
- gen_compute_branch(ctx, OPC_JALS, 4, rt, rs, offset);
+ gen_compute_branch(ctx, OPC_JAL, 4, rt, rs, offset, 2);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
break;
case BEQ32:
- gen_compute_branch(ctx, OPC_BEQ, 4, rt, rs, imm << 1);
+ gen_compute_branch(ctx, OPC_BEQ, 4, rt, rs, imm << 1, 4);
break;
case BNE32:
- gen_compute_branch(ctx, OPC_BNE, 4, rt, rs, imm << 1);
+ gen_compute_branch(ctx, OPC_BNE, 4, rt, rs, imm << 1, 4);
break;
case J32:
gen_compute_branch(ctx, OPC_J, 4, rt, rs,
- (int32_t)(ctx->opcode & 0x3FFFFFF) << 1);
+ (int32_t)(ctx->opcode & 0x3FFFFFF) << 1, 4);
break;
case JAL32:
gen_compute_branch(ctx, OPC_JAL, 4, rt, rs,
- (int32_t)(ctx->opcode & 0x3FFFFFF) << 1);
+ (int32_t)(ctx->opcode & 0x3FFFFFF) << 1, 4);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
break;
/* Floating point (COP1) */
case LWC132:
op = (ctx->opcode >> 10) & 0x3f;
/* Enforce properly-sized instructions in a delay slot */
- if (ctx->hflags & MIPS_HFLAG_BMASK) {
- int bits = ctx->hflags & MIPS_HFLAG_BMASK_EXT;
-
- switch (op) {
- case POOL32A:
- case POOL32B:
- case POOL32I:
- case POOL32C:
- case ADDI32:
- case ADDIU32:
- case ORI32:
- case XORI32:
- case SLTI32:
- case SLTIU32:
- case ANDI32:
- case JALX32:
- case LBU32:
- case LHU32:
- case POOL32F:
- case JALS32:
- case BEQ32:
- case BNE32:
- case J32:
- case JAL32:
- case SB32:
- case SH32:
- case POOL32S:
- case ADDIUPC:
- case SWC132:
- case SDC132:
- case SD32:
- case SW32:
- case LB32:
- case LH32:
- case DADDIU32:
- case LWC132:
- case LDC132:
- case LD32:
- case LW32:
- if (bits & MIPS_HFLAG_BDS16) {
+ if (ctx->hflags & MIPS_HFLAG_BDS_STRICT) {
+ switch (op & 0x7) { /* MSB-3..MSB-5 */
+ case 0:
+ /* POOL32A, POOL32B, POOL32I, POOL32C */
+ case 4:
+ /* ADDI32, ADDIU32, ORI32, XORI32, SLTI32, SLTIU32, ANDI32, JALX32 */
+ case 5:
+ /* LBU32, LHU32, POOL32F, JALS32, BEQ32, BNE32, J32, JAL32 */
+ case 6:
+ /* SB32, SH32, ADDIUPC, SWC132, SDC132, SW32 */
+ case 7:
+ /* LB32, LH32, LWC132, LDC132, LW32 */
+ if (ctx->hflags & MIPS_HFLAG_BDS16) {
generate_exception(ctx, EXCP_RI);
/* Just stop translation; the user is confused. */
ctx->bstate = BS_STOP;
return 2;
}
break;
- case POOL16A:
- case POOL16B:
- case POOL16C:
- case LWGP16:
- case POOL16F:
- case LBU16:
- case LHU16:
- case LWSP16:
- case LW16:
- case SB16:
- case SH16:
- case SWSP16:
- case SW16:
- case MOVE16:
- case ANDI16:
- case POOL16D:
- case POOL16E:
- case BEQZ16:
- case BNEZ16:
- case B16:
- case LI16:
- if (bits & MIPS_HFLAG_BDS32) {
+ case 1:
+ /* POOL16A, POOL16B, POOL16C, LWGP16, POOL16F */
+ case 2:
+ /* LBU16, LHU16, LWSP16, LW16, SB16, SH16, SWSP16, SW16 */
+ case 3:
+ /* MOVE16, ANDI16, POOL16D, POOL16E, BEQZ16, BNEZ16, B16, LI16 */
+ if (ctx->hflags & MIPS_HFLAG_BDS32) {
generate_exception(ctx, EXCP_RI);
/* Just stop translation; the user is confused. */
ctx->bstate = BS_STOP;
return 2;
}
break;
- default:
- break;
}
}
+
switch (op) {
case POOL16A:
{
break;
case B16:
gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0,
- SIMM(ctx->opcode, 0, 10) << 1);
+ SIMM(ctx->opcode, 0, 10) << 1, 4);
break;
case BNEZ16:
case BEQZ16:
gen_compute_branch(ctx, op == BNEZ16 ? OPC_BNE : OPC_BEQ, 2,
mmreg(uMIPS_RD(ctx->opcode)),
- 0, SIMM(ctx->opcode, 0, 7) << 1);
+ 0, SIMM(ctx->opcode, 0, 7) << 1, 4);
break;
case LI16:
{
/* End MIPSDSP functions. */
-static void decode_opc_special_r6(CPUMIPSState *env, DisasContext *ctx)
+/* Compact Branches */
+static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc,
+ int rs, int rt, int32_t offset)
{
- int rs, rt, rd;
- uint32_t op1, op2;
+ int bcond_compute = 0;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
- rs = (ctx->opcode >> 21) & 0x1f;
- rt = (ctx->opcode >> 16) & 0x1f;
- rd = (ctx->opcode >> 11) & 0x1f;
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+#ifdef MIPS_DEBUG_DISAS
+ LOG_DISAS("Branch in delay / forbidden slot at PC 0x" TARGET_FMT_lx
+ "\n", ctx->pc);
+#endif
+ generate_exception(ctx, EXCP_RI);
+ goto out;
+ }
- op1 = MASK_SPECIAL(ctx->opcode);
- switch (op1) {
- case OPC_MULT ... OPC_DIVU:
- op2 = MASK_R6_MULDIV(ctx->opcode);
- switch (op2) {
- case R6_OPC_MUL:
- case R6_OPC_MUH:
- case R6_OPC_MULU:
- case R6_OPC_MUHU:
- case R6_OPC_DIV:
- case R6_OPC_MOD:
- case R6_OPC_DIVU:
- case R6_OPC_MODU:
- gen_r6_muldiv(ctx, op2, rd, rs, rt);
- break;
- default:
- MIPS_INVAL("special_r6 muldiv");
- generate_exception(ctx, EXCP_RI);
- break;
+ /* Load needed operands and calculate btarget */
+ switch (opc) {
+ /* compact branch */
+ case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC */
+ case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ bcond_compute = 1;
+ ctx->btarget = addr_add(ctx, ctx->pc + 4, offset);
+ if (rs <= rt && rs == 0) {
+ /* OPC_BEQZALC, OPC_BNEZALC */
+ tcg_gen_movi_tl(cpu_gpr[31], ctx->pc + 4);
}
break;
- case OPC_SELEQZ:
- case OPC_SELNEZ:
- gen_cond_move(ctx, op1, rd, rs, rt);
+ case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC */
+ case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC */
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ bcond_compute = 1;
+ ctx->btarget = addr_add(ctx, ctx->pc + 4, offset);
break;
-#if defined(TARGET_MIPS64)
- case OPC_DMULT ... OPC_DDIVU:
- op2 = MASK_R6_MULDIV(ctx->opcode);
- switch (op2) {
- case R6_OPC_DMUL:
- case R6_OPC_DMUH:
- case R6_OPC_DMULU:
- case R6_OPC_DMUHU:
- case R6_OPC_DDIV:
- case R6_OPC_DMOD:
- case R6_OPC_DDIVU:
- case R6_OPC_DMODU:
- check_mips_64(ctx);
- gen_r6_muldiv(ctx, op2, rd, rs, rt);
- break;
- default:
- MIPS_INVAL("special_r6 muldiv");
- generate_exception(ctx, EXCP_RI);
- break;
+ case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC */
+ case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC */
+ if (rs == 0 || rs == rt) {
+ /* OPC_BLEZALC, OPC_BGEZALC */
+ /* OPC_BGTZALC, OPC_BLTZALC */
+ tcg_gen_movi_tl(cpu_gpr[31], ctx->pc + 4);
}
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ bcond_compute = 1;
+ ctx->btarget = addr_add(ctx, ctx->pc + 4, offset);
break;
-#endif
- default: /* Invalid */
- MIPS_INVAL("special_r6");
- generate_exception(ctx, EXCP_RI);
+ case OPC_BC:
+ case OPC_BALC:
+ ctx->btarget = addr_add(ctx, ctx->pc + 4, offset);
+ break;
+ case OPC_BEQZC:
+ case OPC_BNEZC:
+ if (rs != 0) {
+ /* OPC_BEQZC, OPC_BNEZC */
+ gen_load_gpr(t0, rs);
+ bcond_compute = 1;
+ ctx->btarget = addr_add(ctx, ctx->pc + 4, offset);
+ } else {
+ /* OPC_JIC, OPC_JIALC */
+ TCGv tbase = tcg_temp_new();
+ TCGv toffset = tcg_temp_new();
+
+ gen_load_gpr(tbase, rt);
+ tcg_gen_movi_tl(toffset, offset);
+ gen_op_addr_add(ctx, btarget, tbase, toffset);
+ tcg_temp_free(tbase);
+ tcg_temp_free(toffset);
+ }
+ break;
+ default:
+ MIPS_INVAL("Compact branch/jump");
+ generate_exception(ctx, EXCP_RI);
+ goto out;
+ }
+
+ if (bcond_compute == 0) {
+ /* Uncoditional compact branch */
+ switch (opc) {
+ case OPC_JIALC:
+ tcg_gen_movi_tl(cpu_gpr[31], ctx->pc + 4);
+ /* Fallthrough */
+ case OPC_JIC:
+ ctx->hflags |= MIPS_HFLAG_BR;
+ break;
+ case OPC_BALC:
+ tcg_gen_movi_tl(cpu_gpr[31], ctx->pc + 4);
+ /* Fallthrough */
+ case OPC_BC:
+ ctx->hflags |= MIPS_HFLAG_B;
+ break;
+ default:
+ MIPS_INVAL("Compact branch/jump");
+ generate_exception(ctx, EXCP_RI);
+ goto out;
+ }
+
+ /* Generating branch here as compact branches don't have delay slot */
+ gen_branch(ctx, 4);
+ } else {
+ /* Conditional compact branch */
+ int fs = gen_new_label();
+ save_cpu_state(ctx, 0);
+
+ switch (opc) {
+ case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC */
+ if (rs == 0 && rt != 0) {
+ /* OPC_BLEZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LE), t1, 0, fs);
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* OPC_BGEZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GE), t1, 0, fs);
+ } else {
+ /* OPC_BGEUC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_GEU), t0, t1, fs);
+ }
+ break;
+ case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC */
+ if (rs == 0 && rt != 0) {
+ /* OPC_BGTZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GT), t1, 0, fs);
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* OPC_BLTZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LT), t1, 0, fs);
+ } else {
+ /* OPC_BLTUC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_LTU), t0, t1, fs);
+ }
+ break;
+ case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC */
+ if (rs == 0 && rt != 0) {
+ /* OPC_BLEZC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LE), t1, 0, fs);
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* OPC_BGEZC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GE), t1, 0, fs);
+ } else {
+ /* OPC_BGEC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_GE), t0, t1, fs);
+ }
+ break;
+ case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC */
+ if (rs == 0 && rt != 0) {
+ /* OPC_BGTZC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GT), t1, 0, fs);
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* OPC_BLTZC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LT), t1, 0, fs);
+ } else {
+ /* OPC_BLTC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_LT), t0, t1, fs);
+ }
+ break;
+ case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC */
+ case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */
+ if (rs >= rt) {
+ /* OPC_BOVC, OPC_BNVC */
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t4 = tcg_temp_new();
+ TCGv input_overflow = tcg_temp_new();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ tcg_gen_ext32s_tl(t2, t0);
+ tcg_gen_setcond_tl(TCG_COND_NE, input_overflow, t2, t0);
+ tcg_gen_ext32s_tl(t3, t1);
+ tcg_gen_setcond_tl(TCG_COND_NE, t4, t3, t1);
+ tcg_gen_or_tl(input_overflow, input_overflow, t4);
+
+ tcg_gen_add_tl(t4, t2, t3);
+ tcg_gen_ext32s_tl(t4, t4);
+ tcg_gen_xor_tl(t2, t2, t3);
+ tcg_gen_xor_tl(t3, t4, t3);
+ tcg_gen_andc_tl(t2, t3, t2);
+ tcg_gen_setcondi_tl(TCG_COND_LT, t4, t2, 0);
+ tcg_gen_or_tl(t4, t4, input_overflow);
+ if (opc == OPC_BOVC) {
+ /* OPC_BOVC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_NE), t4, 0, fs);
+ } else {
+ /* OPC_BNVC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_EQ), t4, 0, fs);
+ }
+ tcg_temp_free(input_overflow);
+ tcg_temp_free(t4);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ } else if (rs < rt && rs == 0) {
+ /* OPC_BEQZALC, OPC_BNEZALC */
+ if (opc == OPC_BEQZALC) {
+ /* OPC_BEQZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_EQ), t1, 0, fs);
+ } else {
+ /* OPC_BNEZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_NE), t1, 0, fs);
+ }
+ } else {
+ /* OPC_BEQC, OPC_BNEC */
+ if (opc == OPC_BEQC) {
+ /* OPC_BEQC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_EQ), t0, t1, fs);
+ } else {
+ /* OPC_BNEC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_NE), t0, t1, fs);
+ }
+ }
+ break;
+ case OPC_BEQZC:
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_EQ), t0, 0, fs);
+ break;
+ case OPC_BNEZC:
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_NE), t0, 0, fs);
+ break;
+ default:
+ MIPS_INVAL("Compact conditional branch/jump");
+ generate_exception(ctx, EXCP_RI);
+ goto out;
+ }
+
+ /* Generating branch here as compact branches don't have delay slot */
+ gen_goto_tb(ctx, 1, ctx->btarget);
+ gen_set_label(fs);
+
+ ctx->hflags |= MIPS_HFLAG_FBNSLOT;
+ MIPS_DEBUG("Compact conditional branch");
+ }
+
+out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void decode_opc_special_r6(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rs, rt, rd, sa;
+ uint32_t op1, op2;
+
+ rs = (ctx->opcode >> 21) & 0x1f;
+ rt = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 6) & 0x1f;
+
+ op1 = MASK_SPECIAL(ctx->opcode);
+ switch (op1) {
+ case OPC_LSA:
+ if (rd != 0) {
+ int imm2 = extract32(ctx->opcode, 6, 3);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ tcg_gen_shli_tl(t0, t0, imm2 + 1);
+ tcg_gen_add_tl(t0, t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ }
+ break;
+ case OPC_MULT ... OPC_DIVU:
+ op2 = MASK_R6_MULDIV(ctx->opcode);
+ switch (op2) {
+ case R6_OPC_MUL:
+ case R6_OPC_MUH:
+ case R6_OPC_MULU:
+ case R6_OPC_MUHU:
+ case R6_OPC_DIV:
+ case R6_OPC_MOD:
+ case R6_OPC_DIVU:
+ case R6_OPC_MODU:
+ gen_r6_muldiv(ctx, op2, rd, rs, rt);
+ break;
+ default:
+ MIPS_INVAL("special_r6 muldiv");
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_SELEQZ:
+ case OPC_SELNEZ:
+ gen_cond_move(ctx, op1, rd, rs, rt);
+ break;
+ case R6_OPC_CLO:
+ case R6_OPC_CLZ:
+ if (rt == 0 && sa == 1) {
+ /* Major opcode and function field is shared with preR6 MFHI/MTHI.
+ We need additionally to check other fields */
+ gen_cl(ctx, op1, rd, rs);
+ } else {
+ generate_exception(ctx, EXCP_RI);
+ }
+ break;
+ case R6_OPC_SDBBP:
+ if (ctx->hflags & MIPS_HFLAG_SBRI) {
+ generate_exception(ctx, EXCP_RI);
+ } else {
+ generate_exception(ctx, EXCP_DBp);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DLSA:
+ check_mips_64(ctx);
+ if (rd != 0) {
+ int imm2 = extract32(ctx->opcode, 6, 3);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ tcg_gen_shli_tl(t0, t0, imm2 + 1);
+ tcg_gen_add_tl(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ }
+ break;
+ case R6_OPC_DCLO:
+ case R6_OPC_DCLZ:
+ if (rt == 0 && sa == 1) {
+ /* Major opcode and function field is shared with preR6 MFHI/MTHI.
+ We need additionally to check other fields */
+ check_mips_64(ctx);
+ gen_cl(ctx, op1, rd, rs);
+ } else {
+ generate_exception(ctx, EXCP_RI);
+ }
+ break;
+ case OPC_DMULT ... OPC_DDIVU:
+ op2 = MASK_R6_MULDIV(ctx->opcode);
+ switch (op2) {
+ case R6_OPC_DMUL:
+ case R6_OPC_DMUH:
+ case R6_OPC_DMULU:
+ case R6_OPC_DMUHU:
+ case R6_OPC_DDIV:
+ case R6_OPC_DMOD:
+ case R6_OPC_DDIVU:
+ case R6_OPC_DMODU:
+ check_mips_64(ctx);
+ gen_r6_muldiv(ctx, op2, rd, rs, rt);
+ break;
+ default:
+ MIPS_INVAL("special_r6 muldiv");
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ break;
+#endif
+ default: /* Invalid */
+ MIPS_INVAL("special_r6");
+ generate_exception(ctx, EXCP_RI);
break;
}
}
gen_muldiv(ctx, op1, 0, rs, rt);
break;
#endif
+ case OPC_JR:
+ gen_compute_branch(ctx, op1, 4, rs, rd, sa, 4);
+ break;
+ case OPC_SPIM:
+#ifdef MIPS_STRICT_STANDARD
+ MIPS_INVAL("SPIM");
+ generate_exception(ctx, EXCP_RI);
+#else
+ /* Implemented as RI exception for now. */
+ MIPS_INVAL("spim (unofficial)");
+ generate_exception(ctx, EXCP_RI);
+#endif
+ break;
default: /* Invalid */
MIPS_INVAL("special_legacy");
generate_exception(ctx, EXCP_RI);
op1 = MASK_SPECIAL(ctx->opcode);
switch (op1) {
case OPC_SLL: /* Shift with immediate */
+ if (sa == 5 && rd == 0 &&
+ rs == 0 && rt == 0) { /* PAUSE */
+ if ((ctx->insn_flags & ISA_MIPS32R6) &&
+ (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ MIPS_DEBUG("CTI in delay / forbidden slot");
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ }
+ /* Fallthrough */
case OPC_SRA:
gen_shift_imm(ctx, op1, rd, rt, sa);
break;
case OPC_XOR:
gen_logic(ctx, op1, rd, rs, rt);
break;
- case OPC_JR ... OPC_JALR:
- gen_compute_branch(ctx, op1, 4, rs, rd, sa);
+ case OPC_JALR:
+ gen_compute_branch(ctx, op1, 4, rs, rd, sa, 4);
break;
case OPC_TGE ... OPC_TEQ: /* Traps */
case OPC_TNE:
gen_trap(ctx, op1, rs, rt, -1);
break;
- case OPC_PMON: /* Pmon entry point, also R4010 selsl */
+ case OPC_LSA: /* OPC_PMON */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ decode_opc_special_r6(env, ctx);
+ } else {
+ /* Pmon entry point, also R4010 selsl */
#ifdef MIPS_STRICT_STANDARD
- MIPS_INVAL("PMON / selsl");
- generate_exception(ctx, EXCP_RI);
+ MIPS_INVAL("PMON / selsl");
+ generate_exception(ctx, EXCP_RI);
#else
- gen_helper_0e0i(pmon, sa);
+ gen_helper_0e0i(pmon, sa);
#endif
+ }
break;
case OPC_SYSCALL:
generate_exception(ctx, EXCP_SYSCALL);
case OPC_BREAK:
generate_exception(ctx, EXCP_BREAK);
break;
- case OPC_SPIM:
-#ifdef MIPS_STRICT_STANDARD
- MIPS_INVAL("SPIM");
- generate_exception(ctx, EXCP_RI);
-#else
- /* Implemented as RI exception for now. */
- MIPS_INVAL("spim (unofficial)");
- generate_exception(ctx, EXCP_RI);
-#endif
- break;
case OPC_SYNC:
/* Treat as NOP. */
break;
}
}
-static void decode_opc_special2_r6(CPUMIPSState *env, DisasContext *ctx)
-{
- uint32_t op1;
-
- op1 = MASK_SPECIAL2(ctx->opcode);
- switch (op1) {
- default: /* Invalid */
- MIPS_INVAL("special2_r6");
- generate_exception(ctx, EXCP_RI);
- break;
- }
-}
-
static void decode_opc_special2_legacy(CPUMIPSState *env, DisasContext *ctx)
{
int rs, rt, rd;
uint32_t op1;
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+
rs = (ctx->opcode >> 21) & 0x1f;
rt = (ctx->opcode >> 16) & 0x1f;
rd = (ctx->opcode >> 11) & 0x1f;
check_insn(ctx, INSN_LOONGSON2F);
gen_loongson_integer(ctx, op1, rd, rs, rt);
break;
-#if defined(TARGET_MIPS64)
- case OPC_DMULT_G_2F:
- case OPC_DMULTU_G_2F:
- case OPC_DDIV_G_2F:
- case OPC_DDIVU_G_2F:
- case OPC_DMOD_G_2F:
- case OPC_DMODU_G_2F:
- check_insn(ctx, INSN_LOONGSON2F);
- gen_loongson_integer(ctx, op1, rd, rs, rt);
- break;
-#endif
- default: /* Invalid */
- MIPS_INVAL("special2_legacy");
- generate_exception(ctx, EXCP_RI);
- break;
- }
-}
-
-static void decode_opc_special2(CPUMIPSState *env, DisasContext *ctx)
-{
- int rs, rd;
- uint32_t op1;
-
- rs = (ctx->opcode >> 21) & 0x1f;
- rd = (ctx->opcode >> 11) & 0x1f;
-
- op1 = MASK_SPECIAL2(ctx->opcode);
- switch (op1) {
case OPC_CLO:
case OPC_CLZ:
check_insn(ctx, ISA_MIPS32);
check_mips_64(ctx);
gen_cl(ctx, op1, rd, rs);
break;
+ case OPC_DMULT_G_2F:
+ case OPC_DMULTU_G_2F:
+ case OPC_DDIV_G_2F:
+ case OPC_DDIVU_G_2F:
+ case OPC_DMOD_G_2F:
+ case OPC_DMODU_G_2F:
+ check_insn(ctx, INSN_LOONGSON2F);
+ gen_loongson_integer(ctx, op1, rd, rs, rt);
+ break;
#endif
- default:
- if (ctx->insn_flags & ISA_MIPS32R6) {
- decode_opc_special2_r6(env, ctx);
- } else {
- decode_opc_special2_legacy(env, ctx);
- }
+ default: /* Invalid */
+ MIPS_INVAL("special2_legacy");
+ generate_exception(ctx, EXCP_RI);
+ break;
}
}
static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx)
{
- int rs, rt;
- uint32_t op1;
+ int rs, rt, rd, sa;
+ uint32_t op1, op2;
int16_t imm;
rs = (ctx->opcode >> 21) & 0x1f;
rt = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 6) & 0x1f;
imm = (int16_t)ctx->opcode >> 7;
op1 = MASK_SPECIAL3(ctx->opcode);
case R6_OPC_LL:
gen_ld(ctx, op1, rt, rs, imm);
break;
+ case OPC_BSHFL:
+ {
+ if (rd == 0) {
+ /* Treat as NOP. */
+ break;
+ }
+ TCGv t0 = tcg_temp_new();
+ gen_load_gpr(t0, rt);
+
+ op2 = MASK_BSHFL(ctx->opcode);
+ switch (op2) {
+ case OPC_ALIGN ... OPC_ALIGN_END:
+ sa &= 3;
+ if (sa == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], t0);
+ } else {
+ TCGv t1 = tcg_temp_new();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ gen_load_gpr(t1, rs);
+ tcg_gen_concat_tl_i64(t2, t1, t0);
+ tcg_gen_shri_i64(t2, t2, 8 * (4 - sa));
+#if defined(TARGET_MIPS64)
+ tcg_gen_ext32s_i64(cpu_gpr[rd], t2);
+#else
+ tcg_gen_trunc_i64_i32(cpu_gpr[rd], t2);
+#endif
+ tcg_temp_free_i64(t2);
+ tcg_temp_free(t1);
+ }
+ break;
+ case OPC_BITSWAP:
+ gen_helper_bitswap(cpu_gpr[rd], t0);
+ break;
+ }
+ tcg_temp_free(t0);
+ }
+ break;
#if defined(TARGET_MIPS64)
case R6_OPC_SCD:
gen_st_cond(ctx, op1, rt, rs, imm);
case R6_OPC_LLD:
gen_ld(ctx, op1, rt, rs, imm);
break;
+ case OPC_DBSHFL:
+ check_mips_64(ctx);
+ {
+ if (rd == 0) {
+ /* Treat as NOP. */
+ break;
+ }
+ TCGv t0 = tcg_temp_new();
+ gen_load_gpr(t0, rt);
+
+ op2 = MASK_DBSHFL(ctx->opcode);
+ switch (op2) {
+ case OPC_DALIGN ... OPC_DALIGN_END:
+ sa &= 7;
+ if (sa == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], t0);
+ } else {
+ TCGv t1 = tcg_temp_new();
+ gen_load_gpr(t1, rs);
+ tcg_gen_shli_tl(t0, t0, 8 * sa);
+ tcg_gen_shri_tl(t1, t1, 8 * (8 - sa));
+ tcg_gen_or_tl(cpu_gpr[rd], t1, t0);
+ tcg_temp_free(t1);
+ }
+ break;
+ case OPC_DBITSWAP:
+ gen_helper_dbitswap(cpu_gpr[rd], t0);
+ break;
+ }
+ tcg_temp_free(t0);
+ }
+ break;
#endif
default: /* Invalid */
MIPS_INVAL("special3_r6");
gen_bitops(ctx, op1, rt, rs, sa, rd);
break;
case OPC_BSHFL:
- check_insn(ctx, ISA_MIPS32R2);
op2 = MASK_BSHFL(ctx->opcode);
- gen_bshfl(ctx, op2, rt, rd);
+ switch (op2) {
+ case OPC_ALIGN ... OPC_ALIGN_END:
+ case OPC_BITSWAP:
+ check_insn(ctx, ISA_MIPS32R6);
+ decode_opc_special3_r6(env, ctx);
+ break;
+ default:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_bshfl(ctx, op2, rt, rd);
+ break;
+ }
break;
#if defined(TARGET_MIPS64)
case OPC_DEXTM ... OPC_DEXT:
gen_bitops(ctx, op1, rt, rs, sa, rd);
break;
case OPC_DBSHFL:
- check_insn(ctx, ISA_MIPS64R2);
- check_mips_64(ctx);
op2 = MASK_DBSHFL(ctx->opcode);
- gen_bshfl(ctx, op2, rt, rd);
+ switch (op2) {
+ case OPC_DALIGN ... OPC_DALIGN_END:
+ case OPC_DBITSWAP:
+ check_insn(ctx, ISA_MIPS32R6);
+ decode_opc_special3_r6(env, ctx);
+ break;
+ default:
+ check_insn(ctx, ISA_MIPS64R2);
+ check_mips_64(ctx);
+ op2 = MASK_DBSHFL(ctx->opcode);
+ gen_bshfl(ctx, op2, rt, rd);
+ break;
+ }
break;
#endif
case OPC_RDHWR:
/* make sure instructions are on a word boundary */
if (ctx->pc & 0x3) {
env->CP0_BadVAddr = ctx->pc;
- generate_exception(ctx, EXCP_AdEL);
+ generate_exception_err(ctx, EXCP_AdEL, EXCP_INST_NOTAVAIL);
return;
}
decode_opc_special(env, ctx);
break;
case OPC_SPECIAL2:
- decode_opc_special2(env, ctx);
+ decode_opc_special2_legacy(env, ctx);
break;
case OPC_SPECIAL3:
decode_opc_special3(env, ctx);
check_insn_opc_removed(ctx, ISA_MIPS32R6);
case OPC_BLTZ:
case OPC_BGEZ:
+ gen_compute_branch(ctx, op1, 4, rs, -1, imm << 2, 4);
+ break;
case OPC_BLTZAL:
case OPC_BGEZAL:
- gen_compute_branch(ctx, op1, 4, rs, -1, imm << 2);
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ if (rs == 0) {
+ /* OPC_NAL, OPC_BAL */
+ gen_compute_branch(ctx, op1, 4, 0, -1, imm << 2, 4);
+ } else {
+ generate_exception(ctx, EXCP_RI);
+ }
+ } else {
+ gen_compute_branch(ctx, op1, 4, rs, -1, imm << 2, 4);
+ }
break;
case OPC_TGEI ... OPC_TEQI: /* REGIMM traps */
case OPC_TNEI:
break;
case OPC_SYNCI:
check_insn(ctx, ISA_MIPS32R2);
- /* Treat as NOP. */
+ /* Break the TB to be able to sync copied instructions
+ immediately */
+ ctx->bstate = BS_STOP;
break;
case OPC_BPOSGE32: /* MIPS DSP branch */
#if defined(TARGET_MIPS64)
case OPC_BPOSGE64:
#endif
check_dsp(ctx);
- gen_compute_branch(ctx, op1, 4, -1, -2, (int32_t)imm << 2);
+ gen_compute_branch(ctx, op1, 4, -1, -2, (int32_t)imm << 2, 4);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DAHI:
+ check_insn(ctx, ISA_MIPS32R6);
+ check_mips_64(ctx);
+ if (rs != 0) {
+ tcg_gen_addi_tl(cpu_gpr[rs], cpu_gpr[rs], (int64_t)imm << 32);
+ }
+ MIPS_DEBUG("dahi %s, %04x", regnames[rs], imm);
break;
+ case OPC_DATI:
+ check_insn(ctx, ISA_MIPS32R6);
+ check_mips_64(ctx);
+ if (rs != 0) {
+ tcg_gen_addi_tl(cpu_gpr[rs], cpu_gpr[rs], (int64_t)imm << 48);
+ }
+ MIPS_DEBUG("dati %s, %04x", regnames[rs], imm);
+ break;
+#endif
default: /* Invalid */
MIPS_INVAL("regimm");
generate_exception(ctx, EXCP_RI);
break;
}
break;
- case OPC_ADDI: /* Arithmetic with immediate opcode */
+ case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC, OPC_ADDI */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_BOVC, OPC_BEQZALC, OPC_BEQC */
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ } else {
+ /* OPC_ADDI */
+ /* Arithmetic with immediate opcode */
+ gen_arith_imm(ctx, op, rt, rs, imm);
+ }
+ break;
case OPC_ADDIU:
gen_arith_imm(ctx, op, rt, rs, imm);
break;
gen_slt_imm(ctx, op, rt, rs, imm);
break;
case OPC_ANDI: /* Arithmetic with immediate opcode */
- case OPC_LUI:
+ case OPC_LUI: /* OPC_AUI */
case OPC_ORI:
case OPC_XORI:
gen_logic_imm(ctx, op, rt, rs, imm);
break;
case OPC_J ... OPC_JAL: /* Jump */
offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2;
- gen_compute_branch(ctx, op, 4, rs, rt, offset);
+ gen_compute_branch(ctx, op, 4, rs, rt, offset, 4);
break;
- case OPC_BEQL ... OPC_BGTZL: /* Branch */
+ /* Branch */
+ case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC, OPC_BLEZL */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ if (rt == 0) {
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ /* OPC_BLEZC, OPC_BGEZC, OPC_BGEC */
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ } else {
+ /* OPC_BLEZL */
+ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4);
+ }
+ break;
+ case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC, OPC_BGTZL */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ if (rt == 0) {
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ /* OPC_BGTZC, OPC_BLTZC, OPC_BLTC */
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ } else {
+ /* OPC_BGTZL */
+ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4);
+ }
+ break;
+ case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC, OPC_BLEZ */
+ if (rt == 0) {
+ /* OPC_BLEZ */
+ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4);
+ } else {
+ check_insn(ctx, ISA_MIPS32R6);
+ /* OPC_BLEZALC, OPC_BGEZALC, OPC_BGEUC */
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ }
+ break;
+ case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC, OPC_BGTZ */
+ if (rt == 0) {
+ /* OPC_BGTZ */
+ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4);
+ } else {
+ check_insn(ctx, ISA_MIPS32R6);
+ /* OPC_BGTZALC, OPC_BLTZALC, OPC_BLTUC */
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ }
+ break;
+ case OPC_BEQL:
+ case OPC_BNEL:
check_insn_opc_removed(ctx, ISA_MIPS32R6);
- case OPC_BEQ ... OPC_BGTZ:
- gen_compute_branch(ctx, op, 4, rs, rt, imm << 2);
+ case OPC_BEQ:
+ case OPC_BNE:
+ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4);
break;
case OPC_LWL: /* Load and stores */
case OPC_LWR:
gen_cp1(ctx, op1, rt, rd);
break;
#endif
- case OPC_BC1ANY2:
+ case OPC_BC1EQZ: /* OPC_BC1ANY2 */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_BC1EQZ */
+ gen_compute_branch1_r6(ctx, MASK_CP1(ctx->opcode),
+ rt, imm << 2);
+ } else {
+ /* OPC_BC1ANY2 */
+ check_cop1x(ctx);
+ check_insn(ctx, ASE_MIPS3D);
+ gen_compute_branch1(ctx, MASK_BC1(ctx->opcode),
+ (rt >> 2) & 0x7, imm << 2);
+ }
+ break;
+ case OPC_BC1NEZ:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_compute_branch1_r6(ctx, MASK_CP1(ctx->opcode),
+ rt, imm << 2);
+ break;
case OPC_BC1ANY4:
check_insn_opc_removed(ctx, ISA_MIPS32R6);
check_cop1x(ctx);
check_insn_opc_removed(ctx, ISA_MIPS32R6);
case OPC_S_FMT:
case OPC_D_FMT:
- case OPC_W_FMT:
- case OPC_L_FMT:
gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa,
(imm >> 8) & 0x7);
break;
+ case OPC_W_FMT:
+ case OPC_L_FMT:
+ {
+ int r6_op = ctx->opcode & FOP(0x3f, 0x1f);
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ switch (r6_op) {
+ case R6_OPC_CMP_AF_S:
+ case R6_OPC_CMP_UN_S:
+ case R6_OPC_CMP_EQ_S:
+ case R6_OPC_CMP_UEQ_S:
+ case R6_OPC_CMP_LT_S:
+ case R6_OPC_CMP_ULT_S:
+ case R6_OPC_CMP_LE_S:
+ case R6_OPC_CMP_ULE_S:
+ case R6_OPC_CMP_SAF_S:
+ case R6_OPC_CMP_SUN_S:
+ case R6_OPC_CMP_SEQ_S:
+ case R6_OPC_CMP_SEUQ_S:
+ case R6_OPC_CMP_SLT_S:
+ case R6_OPC_CMP_SULT_S:
+ case R6_OPC_CMP_SLE_S:
+ case R6_OPC_CMP_SULE_S:
+ case R6_OPC_CMP_OR_S:
+ case R6_OPC_CMP_UNE_S:
+ case R6_OPC_CMP_NE_S:
+ case R6_OPC_CMP_SOR_S:
+ case R6_OPC_CMP_SUNE_S:
+ case R6_OPC_CMP_SNE_S:
+ gen_r6_cmp_s(ctx, ctx->opcode & 0x1f, rt, rd, sa);
+ break;
+ case R6_OPC_CMP_AF_D:
+ case R6_OPC_CMP_UN_D:
+ case R6_OPC_CMP_EQ_D:
+ case R6_OPC_CMP_UEQ_D:
+ case R6_OPC_CMP_LT_D:
+ case R6_OPC_CMP_ULT_D:
+ case R6_OPC_CMP_LE_D:
+ case R6_OPC_CMP_ULE_D:
+ case R6_OPC_CMP_SAF_D:
+ case R6_OPC_CMP_SUN_D:
+ case R6_OPC_CMP_SEQ_D:
+ case R6_OPC_CMP_SEUQ_D:
+ case R6_OPC_CMP_SLT_D:
+ case R6_OPC_CMP_SULT_D:
+ case R6_OPC_CMP_SLE_D:
+ case R6_OPC_CMP_SULE_D:
+ case R6_OPC_CMP_OR_D:
+ case R6_OPC_CMP_UNE_D:
+ case R6_OPC_CMP_NE_D:
+ case R6_OPC_CMP_SOR_D:
+ case R6_OPC_CMP_SUNE_D:
+ case R6_OPC_CMP_SNE_D:
+ gen_r6_cmp_d(ctx, ctx->opcode & 0x1f, rt, rd, sa);
+ break;
+ default:
+ gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa,
+ (imm >> 8) & 0x7);
+ break;
+ }
+ } else {
+ gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa,
+ (imm >> 8) & 0x7);
+ }
+ break;
+ }
default:
MIPS_INVAL("cp1");
generate_exception (ctx, EXCP_RI);
}
break;
- /* COP2. */
- case OPC_LWC2:
- case OPC_LDC2:
- case OPC_SWC2:
- case OPC_SDC2:
- /* COP2: Not implemented. */
- generate_exception_err(ctx, EXCP_CpU, 2);
+ /* Compact branches [R6] and COP2 [non-R6] */
+ case OPC_BC: /* OPC_LWC2 */
+ case OPC_BALC: /* OPC_SWC2 */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_BC, OPC_BALC */
+ gen_compute_compact_branch(ctx, op, 0, 0,
+ sextract32(ctx->opcode << 2, 0, 28));
+ } else {
+ /* OPC_LWC2, OPC_SWC2 */
+ /* COP2: Not implemented. */
+ generate_exception_err(ctx, EXCP_CpU, 2);
+ }
+ break;
+ case OPC_BEQZC: /* OPC_JIC, OPC_LDC2 */
+ case OPC_BNEZC: /* OPC_JIALC, OPC_SDC2 */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ if (rs != 0) {
+ /* OPC_BEQZC, OPC_BNEZC */
+ gen_compute_compact_branch(ctx, op, rs, 0,
+ sextract32(ctx->opcode << 2, 0, 23));
+ } else {
+ /* OPC_JIC, OPC_JIALC */
+ gen_compute_compact_branch(ctx, op, 0, rt, imm);
+ }
+ } else {
+ /* OPC_LWC2, OPC_SWC2 */
+ /* COP2: Not implemented. */
+ generate_exception_err(ctx, EXCP_CpU, 2);
+ }
break;
case OPC_CP2:
check_insn(ctx, INSN_LOONGSON2F);
check_mips_64(ctx);
gen_st_cond(ctx, op, rt, rs, imm);
break;
- case OPC_DADDI:
+ case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC, OPC_DADDI */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_BNVC, OPC_BNEZALC, OPC_BNEC */
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ } else {
+ /* OPC_DADDI */
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_arith_imm(ctx, op, rt, rs, imm);
+ }
+ break;
case OPC_DADDIU:
check_insn(ctx, ISA_MIPS3);
check_mips_64(ctx);
gen_arith_imm(ctx, op, rt, rs, imm);
break;
+#else
+ case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ } else {
+ MIPS_INVAL("major opcode");
+ generate_exception(ctx, EXCP_RI);
+ }
+ break;
#endif
- case OPC_JALX:
- check_insn(ctx, ASE_MIPS16 | ASE_MICROMIPS);
- offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2;
- gen_compute_branch(ctx, op, 4, rs, rt, offset);
+ case OPC_DAUI: /* OPC_JALX */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+#if defined(TARGET_MIPS64)
+ /* OPC_DAUI */
+ check_mips_64(ctx);
+ if (rt != 0) {
+ TCGv t0 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ tcg_gen_addi_tl(cpu_gpr[rt], t0, imm << 16);
+ tcg_temp_free(t0);
+ }
+ MIPS_DEBUG("daui %s, %s, %04x", regnames[rt], regnames[rs], imm);
+#else
+ generate_exception(ctx, EXCP_RI);
+ MIPS_INVAL("major opcode");
+#endif
+ } else {
+ /* OPC_JALX */
+ check_insn(ctx, ASE_MIPS16 | ASE_MICROMIPS);
+ offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2;
+ gen_compute_branch(ctx, op, 4, rs, rt, offset, 4);
+ }
break;
case OPC_MDMX:
check_insn(ctx, ASE_MDMX);
/* MDMX: Not implemented. */
+ break;
+ case OPC_PCREL:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_pcrel(ctx, rs, imm);
+ break;
default: /* Invalid */
MIPS_INVAL("major opcode");
generate_exception(ctx, EXCP_RI);
int num_insns;
int max_insns;
int insn_bytes;
- int is_delay;
+ int is_slot;
if (search_pc)
qemu_log("search pc %d\n", search_pc);
ctx.CP0_Config1 = env->CP0_Config1;
ctx.tb = tb;
ctx.bstate = BS_NONE;
+ ctx.kscrexist = (env->CP0_Config4 >> CP0C4_KScrExist) & 0xff;
+ ctx.rxi = (env->CP0_Config3 >> CP0C3_RXI) & 1;
+ ctx.ie = (env->CP0_Config4 >> CP0C4_IE) & 3;
+ ctx.bi = (env->CP0_Config3 >> CP0C3_BI) & 1;
+ ctx.bp = (env->CP0_Config3 >> CP0C3_BP) & 1;
/* Restore delay slot state from the tb context. */
ctx.hflags = (uint32_t)tb->flags; /* FIXME: maybe use 64 bits here? */
ctx.ulri = env->CP0_Config3 & (1 << CP0C3_ULRI);
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
gen_io_start();
- is_delay = ctx.hflags & MIPS_HFLAG_BMASK;
+ is_slot = ctx.hflags & MIPS_HFLAG_BMASK;
if (!(ctx.hflags & MIPS_HFLAG_M16)) {
ctx.opcode = cpu_ldl_code(env, ctx.pc);
insn_bytes = 4;
ctx.bstate = BS_STOP;
break;
}
- if (is_delay) {
- handle_delay_slot(&ctx, insn_bytes);
+
+ if (ctx.hflags & MIPS_HFLAG_BMASK) {
+ if (!(ctx.hflags & (MIPS_HFLAG_BDS16 | MIPS_HFLAG_BDS32 |
+ MIPS_HFLAG_FBNSLOT))) {
+ /* force to generate branch as there is neither delay nor
+ forbidden slot */
+ is_slot = 1;
+ }
+ }
+ if (is_slot) {
+ gen_branch(&ctx, insn_bytes);
}
ctx.pc += insn_bytes;
cpu_LO[i] = tcg_global_mem_new(TCG_AREG0,
offsetof(CPUMIPSState, active_tc.LO[i]),
regnames_LO[i]);
- cpu_ACX[i] = tcg_global_mem_new(TCG_AREG0,
- offsetof(CPUMIPSState, active_tc.ACX[i]),
- regnames_ACX[i]);
}
cpu_dspctrl = tcg_global_mem_new(TCG_AREG0,
offsetof(CPUMIPSState, active_tc.DSPControl),
env->CP0_SRSConf3 = env->cpu_model->CP0_SRSConf3;
env->CP0_SRSConf4_rw_bitmask = env->cpu_model->CP0_SRSConf4_rw_bitmask;
env->CP0_SRSConf4 = env->cpu_model->CP0_SRSConf4;
+ env->CP0_PageGrain_rw_bitmask = env->cpu_model->CP0_PageGrain_rw_bitmask;
+ env->CP0_PageGrain = env->cpu_model->CP0_PageGrain;
env->active_fpu.fcr0 = env->cpu_model->CP1_fcr0;
env->insn_flags = env->cpu_model->insn_flags;
}
}
#endif
+ if ((env->insn_flags & ISA_MIPS32R6) &&
+ (env->active_fpu.fcr0 & (1 << FCR0_F64))) {
+ /* Status.FR = 0 mode in 64-bit FPU not allowed in R6 */
+ env->CP0_Status |= (1 << CP0St_FR);
+ }
+
compute_hflags(env);
cs->exception_index = EXCP_NONE;
}