* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "disas/disas.h"
#include "tcg-op.h"
#include "exec/semihost.h"
#include "trace-tcg.h"
+#include "exec/log.h"
#define MIPS_DEBUG_DISAS 0
OPC_TLTIU = (0x0B << 16) | OPC_REGIMM,
OPC_TEQI = (0x0C << 16) | OPC_REGIMM,
OPC_TNEI = (0x0E << 16) | OPC_REGIMM,
+ OPC_SIGRIE = (0x17 << 16) | OPC_REGIMM,
OPC_SYNCI = (0x1F << 16) | OPC_REGIMM,
OPC_DAHI = (0x06 << 16) | OPC_REGIMM,
OPC_EVPE = 0x01 | (1 << 5) | OPC_MFMC0,
OPC_DI = (0 << 5) | (0x0C << 11) | OPC_MFMC0,
OPC_EI = (1 << 5) | (0x0C << 11) | OPC_MFMC0,
+ OPC_DVP = 0x04 | (0 << 3) | (1 << 5) | (0 << 11) | OPC_MFMC0,
+ OPC_EVP = 0x04 | (0 << 3) | (0 << 5) | (0 << 11) | OPC_MFMC0,
};
/* Coprocessor 0 (with rs == C0) */
};
/* global register indices */
-static TCGv_ptr cpu_env;
+static TCGv_env cpu_env;
static TCGv cpu_gpr[32], cpu_PC;
static TCGv cpu_HI[MIPS_DSP_ACC], cpu_LO[MIPS_DSP_ACC];
static TCGv cpu_dspctrl, btarget, bcond;
bool mvh;
int CP0_LLAddr_shift;
bool ps;
+ bool vp;
} DisasContext;
enum {
t0 = tcg_temp_new();
gen_load_gpr(t0, rt);
if (bp == 0) {
- tcg_gen_mov_tl(cpu_gpr[rd], t0);
+ switch (opc) {
+ case OPC_ALIGN:
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DALIGN:
+ tcg_gen_mov_tl(cpu_gpr[rd], t0);
+ break;
+#endif
+ }
} else {
TCGv t1 = tcg_temp_new();
gen_load_gpr(t1, rs);
gen_helper_mfc0_mvpconf1(arg, cpu_env);
rn = "MVPConf1";
break;
+ case 4:
+ CP0_CHECK(ctx->vp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPControl));
+ rn = "VPControl";
+ break;
default:
goto cp0_unimplemented;
}
}
rn = "EntryLo1";
break;
+ case 1:
+ CP0_CHECK(ctx->vp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_GlobalNumber));
+ rn = "GlobalNumber";
+ break;
default:
goto cp0_unimplemented;
}
/* ignored */
rn = "MVPConf1";
break;
+ case 4:
+ CP0_CHECK(ctx->vp);
+ /* ignored */
+ rn = "VPControl";
+ break;
default:
goto cp0_unimplemented;
}
gen_helper_mtc0_entrylo1(cpu_env, arg);
rn = "EntryLo1";
break;
+ case 1:
+ CP0_CHECK(ctx->vp);
+ /* ignored */
+ rn = "GlobalNumber";
+ break;
default:
goto cp0_unimplemented;
}
gen_helper_mfc0_mvpconf1(arg, cpu_env);
rn = "MVPConf1";
break;
+ case 4:
+ CP0_CHECK(ctx->vp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPControl));
+ rn = "VPControl";
+ break;
default:
goto cp0_unimplemented;
}
tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryLo1));
rn = "EntryLo1";
break;
+ case 1:
+ CP0_CHECK(ctx->vp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_GlobalNumber));
+ rn = "GlobalNumber";
+ break;
default:
goto cp0_unimplemented;
}
/* ignored */
rn = "MVPConf1";
break;
+ case 4:
+ CP0_CHECK(ctx->vp);
+ /* ignored */
+ rn = "VPControl";
+ break;
default:
goto cp0_unimplemented;
}
gen_helper_dmtc0_entrylo1(cpu_env, arg);
rn = "EntryLo1";
break;
+ case 1:
+ CP0_CHECK(ctx->vp);
+ /* ignored */
+ rn = "GlobalNumber";
+ break;
default:
goto cp0_unimplemented;
}
}
}
-static void gen_rdhwr(DisasContext *ctx, int rt, int rd)
+static void gen_rdhwr(DisasContext *ctx, int rt, int rd, int sel)
{
TCGv t0;
gen_helper_rdhwr_ccres(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
+ case 4:
+ check_insn(ctx, ISA_MIPS32R6);
+ if (sel != 0) {
+ /* Performance counter registers are not implemented other than
+ * control register 0.
+ */
+ generate_exception(ctx, EXCP_RI);
+ }
+ gen_helper_rdhwr_performance(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ break;
+ case 5:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_helper_rdhwr_xnp(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ break;
case 29:
#if defined(CONFIG_USER_ONLY)
tcg_gen_ld_tl(t0, cpu_env,
ROTR = 0x3,
SELEQZ = 0x5,
SELNEZ = 0x6,
+ R6_RDHWR = 0x7,
SLLV = 0x0,
SRLV = 0x1,
MODU = 0x7,
/* The following can be distinguished by their lower 6 bits. */
+ BREAK32 = 0x07,
INS = 0x0c,
LSA = 0x0f,
ALIGN = 0x1f,
EXT = 0x2c,
- POOL32AXF = 0x3c
+ POOL32AXF = 0x3c,
+ SIGRIE = 0x3f
};
/* POOL32AXF encoding of minor opcode field extension */
gen_cl(ctx, mips32_op, rt, rs);
break;
case RDHWR:
- gen_rdhwr(ctx, rt, rs);
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_rdhwr(ctx, rt, rs, 0);
break;
case WSBH:
gen_bshfl(ctx, OPC_WSBH, rs, rt);
check_insn(ctx, ISA_MIPS32R6);
gen_cond_move(ctx, OPC_SELNEZ, rd, rs, rt);
break;
+ case R6_RDHWR:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_rdhwr(ctx, rt, rs, extract32(ctx->opcode, 11, 3));
+ break;
default:
goto pool32a_invalid;
}
case POOL32AXF:
gen_pool32axf(env, ctx, rt, rs);
break;
- case 0x07:
+ case BREAK32:
generate_exception_end(ctx, EXCP_BREAK);
break;
+ case SIGRIE:
+ check_insn(ctx, ISA_MIPS32R6);
+ generate_exception_end(ctx, EXCP_RI);
+ break;
default:
pool32a_invalid:
MIPS_INVAL("pool32a");
break;
#endif
case OPC_RDHWR:
- gen_rdhwr(ctx, rt, rd);
+ gen_rdhwr(ctx, rt, rd, extract32(ctx->opcode, 6, 3));
break;
case OPC_FORK:
check_insn(ctx, ASE_MT);
check_insn_opc_removed(ctx, ISA_MIPS32R6);
gen_trap(ctx, op1, rs, -1, imm);
break;
+ case OPC_SIGRIE:
+ check_insn(ctx, ISA_MIPS32R6);
+ generate_exception_end(ctx, EXCP_RI);
+ break;
case OPC_SYNCI:
check_insn(ctx, ISA_MIPS32R2);
/* Break the TB to be able to sync copied instructions
gen_helper_evpe(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
+ case OPC_DVP:
+ check_insn(ctx, ISA_MIPS32R6);
+ if (ctx->vp) {
+ gen_helper_dvp(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ }
+ break;
+ case OPC_EVP:
+ check_insn(ctx, ISA_MIPS32R6);
+ if (ctx->vp) {
+ gen_helper_evp(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ }
+ break;
case OPC_DI:
check_insn(ctx, ISA_MIPS32R2);
save_cpu_state(ctx, 1);
ctx.ulri = (env->CP0_Config3 >> CP0C3_ULRI) & 1;
ctx.ps = ((env->active_fpu.fcr0 >> FCR0_PS) & 1) ||
(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F));
+ ctx.vp = (env->CP0_Config5 >> CP0C5_VP) & 1;
restore_cpu_state(env, &ctx);
#ifdef CONFIG_USER_ONLY
ctx.mem_idx = MIPS_HFLAG_UM;
save_cpu_state(&ctx, 1);
ctx.bstate = BS_BRANCH;
gen_helper_raise_exception_debug(cpu_env);
- /* Include the breakpoint location or the tb won't
- * be flushed when it must be. */
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
ctx.pc += 4;
goto done_generating;
}
return;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+
TCGV_UNUSED(cpu_gpr[0]);
for (i = 1; i < 32; i++)
- cpu_gpr[i] = tcg_global_mem_new(TCG_AREG0,
+ cpu_gpr[i] = tcg_global_mem_new(cpu_env,
offsetof(CPUMIPSState, active_tc.gpr[i]),
regnames[i]);
for (i = 0; i < 32; i++) {
int off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[0]);
msa_wr_d[i * 2] =
- tcg_global_mem_new_i64(TCG_AREG0, off, msaregnames[i * 2]);
+ tcg_global_mem_new_i64(cpu_env, off, msaregnames[i * 2]);
/* The scalar floating-point unit (FPU) registers are mapped on
* the MSA vector registers. */
fpu_f64[i] = msa_wr_d[i * 2];
off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[1]);
msa_wr_d[i * 2 + 1] =
- tcg_global_mem_new_i64(TCG_AREG0, off, msaregnames[i * 2 + 1]);
+ tcg_global_mem_new_i64(cpu_env, off, msaregnames[i * 2 + 1]);
}
- cpu_PC = tcg_global_mem_new(TCG_AREG0,
+ cpu_PC = tcg_global_mem_new(cpu_env,
offsetof(CPUMIPSState, active_tc.PC), "PC");
for (i = 0; i < MIPS_DSP_ACC; i++) {
- cpu_HI[i] = tcg_global_mem_new(TCG_AREG0,
+ cpu_HI[i] = tcg_global_mem_new(cpu_env,
offsetof(CPUMIPSState, active_tc.HI[i]),
regnames_HI[i]);
- cpu_LO[i] = tcg_global_mem_new(TCG_AREG0,
+ cpu_LO[i] = tcg_global_mem_new(cpu_env,
offsetof(CPUMIPSState, active_tc.LO[i]),
regnames_LO[i]);
}
- cpu_dspctrl = tcg_global_mem_new(TCG_AREG0,
+ cpu_dspctrl = tcg_global_mem_new(cpu_env,
offsetof(CPUMIPSState, active_tc.DSPControl),
"DSPControl");
- bcond = tcg_global_mem_new(TCG_AREG0,
+ bcond = tcg_global_mem_new(cpu_env,
offsetof(CPUMIPSState, bcond), "bcond");
- btarget = tcg_global_mem_new(TCG_AREG0,
+ btarget = tcg_global_mem_new(cpu_env,
offsetof(CPUMIPSState, btarget), "btarget");
- hflags = tcg_global_mem_new_i32(TCG_AREG0,
+ hflags = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMIPSState, hflags), "hflags");
- fpu_fcr0 = tcg_global_mem_new_i32(TCG_AREG0,
+ fpu_fcr0 = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMIPSState, active_fpu.fcr0),
"fcr0");
- fpu_fcr31 = tcg_global_mem_new_i32(TCG_AREG0,
+ fpu_fcr31 = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMIPSState, active_fpu.fcr31),
"fcr31");
env->CP0_Random = env->tlb->nb_tlb - 1;
env->tlb->tlb_in_use = env->tlb->nb_tlb;
env->CP0_Wired = 0;
+ env->CP0_GlobalNumber = (cs->cpu_index & 0xFF) << CP0GN_VPId;
env->CP0_EBase = (cs->cpu_index & 0x3FF);
if (kvm_enabled()) {
env->CP0_EBase |= 0x40000000;