struct TranslationBlock *tb;
int singlestep_enabled;
int thumb;
+ int bswap_code;
#if !defined(CONFIG_USER_ONLY)
int user;
#endif
for (i = 0; i < 16; i++) {
cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, regs[i]),
+ offsetof(CPUARMState, regs[i]),
regnames[i]);
}
cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, exclusive_addr), "exclusive_addr");
+ offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, exclusive_val), "exclusive_val");
+ offsetof(CPUARMState, exclusive_val), "exclusive_val");
cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, exclusive_high), "exclusive_high");
+ offsetof(CPUARMState, exclusive_high), "exclusive_high");
#ifdef CONFIG_USER_ONLY
cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, exclusive_test), "exclusive_test");
+ offsetof(CPUARMState, exclusive_test), "exclusive_test");
cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, exclusive_info), "exclusive_info");
+ offsetof(CPUARMState, exclusive_info), "exclusive_info");
#endif
#define GEN_HELPER 2
return tmp;
}
-#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
+#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
static inline void store_cpu_offset(TCGv var, int offset)
{
}
#define store_cpu_field(var, name) \
- store_cpu_offset(var, offsetof(CPUState, name))
+ store_cpu_offset(var, offsetof(CPUARMState, name))
/* Set a variable to the value of a CPU register. */
static void load_reg_var(DisasContext *s, TCGv var, int reg)
tcg_temp_free_i32(t1);
}
-#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
+#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, CF))
/* Set CF to the top bit of var. */
static void gen_set_CF_bit31(TCGv var)
/* Set N and Z flags from var. */
static inline void gen_logic_CC(TCGv var)
{
- tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
- tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, NF));
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, ZF));
}
/* T0 += T1 + CF. */
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
case 1:
tmp = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
PAS_OP(s)
tcg_temp_free_ptr(tmp);
break;
case 5:
tmp = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
PAS_OP(u)
tcg_temp_free_ptr(tmp);
break;
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
case 0:
tmp = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
PAS_OP(s)
tcg_temp_free_ptr(tmp);
break;
case 4:
tmp = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
PAS_OP(u)
tcg_temp_free_ptr(tmp);
break;
if (s->thumb != (addr & 1)) {
tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, addr & 1);
- tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
tcg_temp_free_i32(tmp);
}
tcg_gen_movi_i32(cpu_R[15], addr & ~1);
/* Variant of store_reg which uses branch&exchange logic when storing
to r15 in ARM architecture v7 and above. The source must be a temporary
and will be marked as dead. */
-static inline void store_reg_bx(CPUState *env, DisasContext *s,
+static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
int reg, TCGv var)
{
if (reg == 15 && ENABLE_ARCH_7) {
* to r15 in ARM architecture v5T and above. This is used for storing
* the results of a LDR/LDM/POP into r15, and corresponds to the cases
* in the ARM ARM which use the LoadWritePC() pseudocode function. */
-static inline void store_reg_from_load(CPUState *env, DisasContext *s,
+static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
int reg, TCGv var)
{
if (reg == 15 && ENABLE_ARCH_5) {
TCGv_ptr statusptr = tcg_temp_new_ptr();
int offset;
if (neon) {
- offset = offsetof(CPUState, vfp.standard_fp_status);
+ offset = offsetof(CPUARMState, vfp.standard_fp_status);
} else {
- offset = offsetof(CPUState, vfp.fp_status);
+ offset = offsetof(CPUARMState, vfp.fp_status);
}
tcg_gen_addi_ptr(statusptr, cpu_env, offset);
return statusptr;
static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
{
- tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
+ tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
}
static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
{
- tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
+ tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
}
static inline TCGv iwmmxt_load_creg(int reg)
{
TCGv var = tcg_temp_new_i32();
- tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
+ tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
return var;
}
static inline void iwmmxt_store_creg(int reg, TCGv var)
{
- tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
tcg_temp_free_i32(var);
}
/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
(ie. an undefined instruction). */
-static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
+static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
{
int rd, wrd;
int rdhi, rdlo, rd0, rd1, i;
/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
(ie. an undefined instruction). */
-static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
+static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
{
int acc, rd0, rd1, rdhi, rdlo;
TCGv tmp, tmp2;
return 1;
}
-/* Disassemble system coprocessor instruction. Return nonzero if
- instruction is not defined. */
-static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
-{
- TCGv tmp, tmp2;
- uint32_t rd = (insn >> 12) & 0xf;
- uint32_t cp = (insn >> 8) & 0xf;
- if (IS_USER(s)) {
- return 1;
- }
-
- if (insn & ARM_CP_RW_BIT) {
- if (!env->cp[cp].cp_read)
- return 1;
- gen_set_pc_im(s->pc);
- tmp = tcg_temp_new_i32();
- tmp2 = tcg_const_i32(insn);
- gen_helper_get_cp(tmp, cpu_env, tmp2);
- tcg_temp_free(tmp2);
- store_reg(s, rd, tmp);
- } else {
- if (!env->cp[cp].cp_write)
- return 1;
- gen_set_pc_im(s->pc);
- tmp = load_reg(s, rd);
- tmp2 = tcg_const_i32(insn);
- gen_helper_set_cp(cpu_env, tmp2, tmp);
- tcg_temp_free(tmp2);
- tcg_temp_free_i32(tmp);
- }
- return 0;
-}
-
-static int cp15_user_ok(CPUState *env, uint32_t insn)
-{
- int cpn = (insn >> 16) & 0xf;
- int cpm = insn & 0xf;
- int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
-
- if (arm_feature(env, ARM_FEATURE_V7) && cpn == 9) {
- /* Performance monitor registers fall into three categories:
- * (a) always UNDEF in usermode
- * (b) UNDEF only if PMUSERENR.EN is 0
- * (c) always read OK and UNDEF on write (PMUSERENR only)
- */
- if ((cpm == 12 && (op < 6)) ||
- (cpm == 13 && (op < 3))) {
- return env->cp15.c9_pmuserenr;
- } else if (cpm == 14 && op == 0 && (insn & ARM_CP_RW_BIT)) {
- /* PMUSERENR, read only */
- return 1;
- }
- return 0;
- }
-
- if (cpn == 13 && cpm == 0) {
- /* TLS register. */
- if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
- return 1;
- }
- return 0;
-}
-
-static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
-{
- TCGv tmp;
- int cpn = (insn >> 16) & 0xf;
- int cpm = insn & 0xf;
- int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
-
- if (!arm_feature(env, ARM_FEATURE_V6K))
- return 0;
-
- if (!(cpn == 13 && cpm == 0))
- return 0;
-
- if (insn & ARM_CP_RW_BIT) {
- switch (op) {
- case 2:
- tmp = load_cpu_field(cp15.c13_tls1);
- break;
- case 3:
- tmp = load_cpu_field(cp15.c13_tls2);
- break;
- case 4:
- tmp = load_cpu_field(cp15.c13_tls3);
- break;
- default:
- return 0;
- }
- store_reg(s, rd, tmp);
-
- } else {
- tmp = load_reg(s, rd);
- switch (op) {
- case 2:
- store_cpu_field(tmp, cp15.c13_tls1);
- break;
- case 3:
- store_cpu_field(tmp, cp15.c13_tls2);
- break;
- case 4:
- store_cpu_field(tmp, cp15.c13_tls3);
- break;
- default:
- tcg_temp_free_i32(tmp);
- return 0;
- }
- }
- return 1;
-}
-
-/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
- instruction is not defined. */
-static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
-{
- uint32_t rd;
- TCGv tmp, tmp2;
-
- /* M profile cores use memory mapped registers instead of cp15. */
- if (arm_feature(env, ARM_FEATURE_M))
- return 1;
-
- if ((insn & (1 << 25)) == 0) {
- if (insn & (1 << 20)) {
- /* mrrc */
- return 1;
- }
- /* mcrr. Used for block cache operations, so implement as no-op. */
- return 0;
- }
- if ((insn & (1 << 4)) == 0) {
- /* cdp */
- return 1;
- }
- /* We special case a number of cp15 instructions which were used
- * for things which are real instructions in ARMv7. This allows
- * them to work in linux-user mode which doesn't provide functional
- * get_cp15/set_cp15 helpers, and is more efficient anyway.
- */
- switch ((insn & 0x0fff0fff)) {
- case 0x0e070f90:
- /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
- * In v7, this must NOP.
- */
- if (IS_USER(s)) {
- return 1;
- }
- if (!arm_feature(env, ARM_FEATURE_V7)) {
- /* Wait for interrupt. */
- gen_set_pc_im(s->pc);
- s->is_jmp = DISAS_WFI;
- }
- return 0;
- case 0x0e070f58:
- /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
- * so this is slightly over-broad.
- */
- if (!IS_USER(s) && !arm_feature(env, ARM_FEATURE_V6)) {
- /* Wait for interrupt. */
- gen_set_pc_im(s->pc);
- s->is_jmp = DISAS_WFI;
- return 0;
- }
- /* Otherwise continue to handle via helper function.
- * In particular, on v7 and some v6 cores this is one of
- * the VA-PA registers.
- */
- break;
- case 0x0e070f3d:
- /* 0,c7,c13,1: prefetch-by-MVA in v6, NOP in v7 */
- if (arm_feature(env, ARM_FEATURE_V6)) {
- return IS_USER(s) ? 1 : 0;
- }
- break;
- case 0x0e070f95: /* 0,c7,c5,4 : ISB */
- case 0x0e070f9a: /* 0,c7,c10,4: DSB */
- case 0x0e070fba: /* 0,c7,c10,5: DMB */
- /* Barriers in both v6 and v7 */
- if (arm_feature(env, ARM_FEATURE_V6)) {
- return 0;
- }
- break;
- default:
- break;
- }
-
- if (IS_USER(s) && !cp15_user_ok(env, insn)) {
- return 1;
- }
-
- rd = (insn >> 12) & 0xf;
-
- if (cp15_tls_load_store(env, s, insn, rd))
- return 0;
-
- tmp2 = tcg_const_i32(insn);
- if (insn & ARM_CP_RW_BIT) {
- tmp = tcg_temp_new_i32();
- gen_helper_get_cp15(tmp, cpu_env, tmp2);
- /* If the destination register is r15 then sets condition codes. */
- if (rd != 15)
- store_reg(s, rd, tmp);
- else
- tcg_temp_free_i32(tmp);
- } else {
- tmp = load_reg(s, rd);
- gen_helper_set_cp15(cpu_env, tmp2, tmp);
- tcg_temp_free_i32(tmp);
- /* Normally we would always end the TB here, but Linux
- * arch/arm/mach-pxa/sleep.S expects two instructions following
- * an MMU enable to execute from cache. Imitate this behaviour. */
- if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
- (insn & 0x0fff0fff) != 0x0e010f10)
- gen_lookup_tb(s);
- }
- tcg_temp_free_i32(tmp2);
- return 0;
-}
-
#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
#define VFP_SREG(insn, bigbit, smallbit) \
((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
/* Disassemble a VFP instruction. Returns nonzero if an error occurred
(ie. an undefined instruction). */
-static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
+static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
{
uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
int dp, veclen;
case ARM_VFP_MVFR0:
case ARM_VFP_MVFR1:
if (IS_USER(s)
- || !arm_feature(env, ARM_FEATURE_VFP3))
+ || !arm_feature(env, ARM_FEATURE_MVFR))
return 1;
tmp = load_cpu_field(vfp.xregs[rn]);
break;
case 8: /* div: fn / fm */
gen_vfp_div(dp);
break;
+ case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
+ case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
+ case 12: /* VFMA : fd = muladd( fd, fn, fm) */
+ case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
+ /* These are fused multiply-add, and must be done as one
+ * floating point operation with no rounding between the
+ * multiplication and addition steps.
+ * NB that doing the negations here as separate steps is
+ * correct : an input NaN should come out with its sign bit
+ * flipped if it is a negated-input.
+ */
+ if (!arm_feature(env, ARM_FEATURE_VFP4)) {
+ return 1;
+ }
+ if (dp) {
+ TCGv_ptr fpst;
+ TCGv_i64 frd;
+ if (op & 1) {
+ /* VFNMS, VFMS */
+ gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
+ }
+ frd = tcg_temp_new_i64();
+ tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
+ if (op & 2) {
+ /* VFNMA, VFNMS */
+ gen_helper_vfp_negd(frd, frd);
+ }
+ fpst = get_fpstatus_ptr(0);
+ gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
+ cpu_F1d, frd, fpst);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(frd);
+ } else {
+ TCGv_ptr fpst;
+ TCGv_i32 frd;
+ if (op & 1) {
+ /* VFNMS, VFMS */
+ gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
+ }
+ frd = tcg_temp_new_i32();
+ tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
+ if (op & 2) {
+ gen_helper_vfp_negs(frd, frd);
+ }
+ fpst = get_fpstatus_ptr(0);
+ gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
+ cpu_F1s, frd, fpst);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(frd);
+ }
+ break;
case 14: /* fconst */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
}
/* Return the mask of PSR bits set by a MSR instruction. */
-static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
+static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
uint32_t mask;
mask = 0;
/* Translate a NEON load/store element instruction. Return nonzero if the
instruction is invalid. */
-static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
+static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
{
int rd, rn, rm;
int op;
#define NEON_3R_VPMIN 21
#define NEON_3R_VQDMULH_VQRDMULH 22
#define NEON_3R_VPADD 23
+#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
[NEON_3R_VPMIN] = 0x7,
[NEON_3R_VQDMULH_VQRDMULH] = 0x6,
[NEON_3R_VPADD] = 0x7,
+ [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
[NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
[NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
[NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
We process data in a mixture of 32-bit and 64-bit chunks.
Mostly we use 32-bit chunks so we can use normal scalar instructions. */
-static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
+static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
{
int op;
int q;
return 1;
}
break;
+ case NEON_3R_VFM:
+ if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
+ return 1;
+ }
+ break;
default:
break;
}
else
gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
break;
+ case NEON_3R_VFM:
+ {
+ /* VFMA, VFMS: fused multiply-add */
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ TCGv_i32 tmp3 = neon_load_reg(rd, pass);
+ if (size) {
+ /* VFMS */
+ gen_helper_vfp_negs(tmp, tmp);
+ }
+ gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
+ tcg_temp_free_i32(tmp3);
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
default:
abort();
}
return 0;
}
-static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
-{
- int crn = (insn >> 16) & 0xf;
- int crm = insn & 0xf;
- int op1 = (insn >> 21) & 7;
- int op2 = (insn >> 5) & 7;
- int rt = (insn >> 12) & 0xf;
- TCGv tmp;
-
- /* Minimal set of debug registers, since we don't support debug */
- if (op1 == 0 && crn == 0 && op2 == 0) {
- switch (crm) {
- case 0:
- /* DBGDIDR: just RAZ. In particular this means the
- * "debug architecture version" bits will read as
- * a reserved value, which should cause Linux to
- * not try to use the debug hardware.
- */
- tmp = tcg_const_i32(0);
- store_reg(s, rt, tmp);
- return 0;
- case 1:
- case 2:
- /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
- * don't implement memory mapped debug components
- */
- if (ENABLE_ARCH_7) {
- tmp = tcg_const_i32(0);
- store_reg(s, rt, tmp);
- return 0;
- }
- break;
- default:
- break;
- }
- }
-
- if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
- if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
- /* TEECR */
- if (IS_USER(s))
- return 1;
- tmp = load_cpu_field(teecr);
- store_reg(s, rt, tmp);
- return 0;
- }
- if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
- /* TEEHBR */
- if (IS_USER(s) && (env->teecr & 1))
- return 1;
- tmp = load_cpu_field(teehbr);
- store_reg(s, rt, tmp);
- return 0;
- }
- }
- return 1;
-}
-
-static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
+static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
{
- int crn = (insn >> 16) & 0xf;
- int crm = insn & 0xf;
- int op1 = (insn >> 21) & 7;
- int op2 = (insn >> 5) & 7;
- int rt = (insn >> 12) & 0xf;
- TCGv tmp;
-
- if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
- if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
- /* TEECR */
- if (IS_USER(s))
- return 1;
- tmp = load_reg(s, rt);
- gen_helper_set_teecr(cpu_env, tmp);
- tcg_temp_free_i32(tmp);
- return 0;
- }
- if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
- /* TEEHBR */
- if (IS_USER(s) && (env->teecr & 1))
- return 1;
- tmp = load_reg(s, rt);
- store_cpu_field(tmp, teehbr);
- return 0;
- }
- }
- return 1;
-}
-
-static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
-{
- int cpnum;
+ int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
+ const ARMCPRegInfo *ri;
+ ARMCPU *cpu = arm_env_get_cpu(env);
cpnum = (insn >> 8) & 0xf;
if (arm_feature(env, ARM_FEATURE_XSCALE)
&& ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
return 1;
+ /* First check for coprocessor space used for actual instructions */
switch (cpnum) {
case 0:
case 1:
case 10:
case 11:
return disas_vfp_insn (env, s, insn);
- case 14:
- /* Coprocessors 7-15 are architecturally reserved by ARM.
- Unfortunately Intel decided to ignore this. */
- if (arm_feature(env, ARM_FEATURE_XSCALE))
- goto board;
- if (insn & (1 << 20))
- return disas_cp14_read(env, s, insn);
- else
- return disas_cp14_write(env, s, insn);
- case 15:
- return disas_cp15_insn (env, s, insn);
default:
- board:
- /* Unknown coprocessor. See if the board has hooked it. */
- return disas_cp_insn (env, s, insn);
+ break;
+ }
+
+ /* Otherwise treat as a generic register access */
+ is64 = (insn & (1 << 25)) == 0;
+ if (!is64 && ((insn & (1 << 4)) == 0)) {
+ /* cdp */
+ return 1;
+ }
+
+ crm = insn & 0xf;
+ if (is64) {
+ crn = 0;
+ opc1 = (insn >> 4) & 0xf;
+ opc2 = 0;
+ rt2 = (insn >> 16) & 0xf;
+ } else {
+ crn = (insn >> 16) & 0xf;
+ opc1 = (insn >> 21) & 7;
+ opc2 = (insn >> 5) & 7;
+ rt2 = 0;
}
+ isread = (insn >> 20) & 1;
+ rt = (insn >> 12) & 0xf;
+
+ ri = get_arm_cp_reginfo(cpu,
+ ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
+ if (ri) {
+ /* Check access permissions */
+ if (!cp_access_ok(env, ri, isread)) {
+ return 1;
+ }
+
+ /* Handle special cases first */
+ switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
+ case ARM_CP_NOP:
+ return 0;
+ case ARM_CP_WFI:
+ if (isread) {
+ return 1;
+ }
+ gen_set_pc_im(s->pc);
+ s->is_jmp = DISAS_WFI;
+ break;
+ default:
+ break;
+ }
+
+ if (isread) {
+ /* Read */
+ if (is64) {
+ TCGv_i64 tmp64;
+ TCGv_i32 tmp;
+ if (ri->type & ARM_CP_CONST) {
+ tmp64 = tcg_const_i64(ri->resetvalue);
+ } else if (ri->readfn) {
+ TCGv_ptr tmpptr;
+ gen_set_pc_im(s->pc);
+ tmp64 = tcg_temp_new_i64();
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
+ tcg_temp_free_ptr(tmpptr);
+ } else {
+ tmp64 = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
+ }
+ tmp = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(tmp, tmp64);
+ store_reg(s, rt, tmp);
+ tcg_gen_shri_i64(tmp64, tmp64, 32);
+ tcg_gen_trunc_i64_i32(tmp, tmp64);
+ store_reg(s, rt2, tmp);
+ } else {
+ TCGv tmp;
+ if (ri->type & ARM_CP_CONST) {
+ tmp = tcg_const_i32(ri->resetvalue);
+ } else if (ri->readfn) {
+ TCGv_ptr tmpptr;
+ gen_set_pc_im(s->pc);
+ tmp = tcg_temp_new_i32();
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
+ tcg_temp_free_ptr(tmpptr);
+ } else {
+ tmp = load_cpu_offset(ri->fieldoffset);
+ }
+ if (rt == 15) {
+ /* Destination register of r15 for 32 bit loads sets
+ * the condition codes from the high 4 bits of the value
+ */
+ gen_set_nzcv(tmp);
+ tcg_temp_free_i32(tmp);
+ } else {
+ store_reg(s, rt, tmp);
+ }
+ }
+ } else {
+ /* Write */
+ if (ri->type & ARM_CP_CONST) {
+ /* If not forbidden by access permissions, treat as WI */
+ return 0;
+ }
+
+ if (is64) {
+ TCGv tmplo, tmphi;
+ TCGv_i64 tmp64 = tcg_temp_new_i64();
+ tmplo = load_reg(s, rt);
+ tmphi = load_reg(s, rt2);
+ tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
+ tcg_temp_free_i32(tmplo);
+ tcg_temp_free_i32(tmphi);
+ if (ri->writefn) {
+ TCGv_ptr tmpptr = tcg_const_ptr(ri);
+ gen_set_pc_im(s->pc);
+ gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
+ tcg_temp_free_ptr(tmpptr);
+ } else {
+ tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
+ }
+ tcg_temp_free_i64(tmp64);
+ } else {
+ if (ri->writefn) {
+ TCGv tmp;
+ TCGv_ptr tmpptr;
+ gen_set_pc_im(s->pc);
+ tmp = load_reg(s, rt);
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
+ tcg_temp_free_ptr(tmpptr);
+ tcg_temp_free_i32(tmp);
+ } else {
+ TCGv tmp = load_reg(s, rt);
+ store_cpu_offset(tmp, ri->fieldoffset);
+ }
+ }
+ /* We default to ending the TB on a coprocessor register write,
+ * but allow this to be suppressed by the register definition
+ * (usually only necessary to work around guest bugs).
+ */
+ if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) {
+ gen_lookup_tb(s);
+ }
+ }
+ return 0;
+ }
+
+ return 1;
}
}
#endif
-static void disas_arm_insn(CPUState * env, DisasContext *s)
+static void disas_arm_insn(CPUARMState * env, DisasContext *s)
{
unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
TCGv tmp;
TCGv addr;
TCGv_i64 tmp64;
- insn = ldl_code(s->pc);
+ insn = arm_ldl_code(s->pc, s->bswap_code);
s->pc += 4;
/* M variants do not implement ARM mode. */
if ((insn & 0x0ffffdff) == 0x01010000) {
ARCH(6);
/* setend */
- if (insn & (1 << 9)) {
- /* BE8 mode not implemented. */
+ if (((insn >> 9) & 1) != s->bswap_code) {
+ /* Dynamic endianness switching not implemented. */
goto illegal_op;
}
return;
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
tmp3 = tcg_temp_new_i32();
- tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
+ tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
tcg_temp_free_i32(tmp3);
tcg_temp_free_i32(tmp2);
}
break;
case 2: /* Multiplies (Type 3). */
- tmp = load_reg(s, rm);
- tmp2 = load_reg(s, rs);
- if (insn & (1 << 20)) {
+ switch ((insn >> 20) & 0x7) {
+ case 5:
+ if (((insn >> 6) ^ (insn >> 7)) & 1) {
+ /* op2 not 00x or 11x : UNDEF */
+ goto illegal_op;
+ }
/* Signed multiply most significant [accumulate].
(SMMUL, SMMLA, SMMLS) */
+ tmp = load_reg(s, rm);
+ tmp2 = load_reg(s, rs);
tmp64 = gen_muls_i64_i32(tmp, tmp2);
if (rd != 15) {
tcg_gen_trunc_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
store_reg(s, rn, tmp);
- } else {
+ break;
+ case 0:
+ case 4:
+ /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
+ if (insn & (1 << 7)) {
+ goto illegal_op;
+ }
+ tmp = load_reg(s, rm);
+ tmp2 = load_reg(s, rs);
if (insn & (1 << 5))
gen_swap_half(tmp2);
gen_smul_dual(tmp, tmp2);
}
store_reg(s, rn, tmp);
}
+ break;
+ case 1:
+ case 3:
+ /* SDIV, UDIV */
+ if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
+ goto illegal_op;
+ }
+ if (((insn >> 5) & 7) || (rd != 15)) {
+ goto illegal_op;
+ }
+ tmp = load_reg(s, rm);
+ tmp2 = load_reg(s, rs);
+ if (insn & (1 << 21)) {
+ gen_helper_udiv(tmp, tmp, tmp2);
+ } else {
+ gen_helper_sdiv(tmp, tmp, tmp2);
+ }
+ tcg_temp_free_i32(tmp2);
+ store_reg(s, rn, tmp);
+ break;
+ default:
+ goto illegal_op;
}
break;
case 3:
/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
is not legal. */
-static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
+static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
{
uint32_t insn, imm, shift, offset;
uint32_t rd, rn, rm, rs;
/* Fall through to 32-bit decode. */
}
- insn = lduw_code(s->pc);
+ insn = arm_lduw_code(s->pc, s->bswap_code);
s->pc += 2;
insn |= (uint32_t)insn_hw1 << 16;
case 0x10: /* sel */
tmp2 = load_reg(s, rm);
tmp3 = tcg_temp_new_i32();
- tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
+ tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
tcg_temp_free_i32(tmp3);
tcg_temp_free_i32(tmp2);
tmp2 = load_reg(s, rm);
if ((op & 0x50) == 0x10) {
/* sdiv, udiv */
- if (!arm_feature(env, ARM_FEATURE_DIV))
+ if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
goto illegal_op;
+ }
if (op & 0x20)
gen_helper_udiv(tmp, tmp, tmp2);
else
goto illegal_op;
}
if (rn == 15) {
- /* UNPREDICTABLE or unallocated hint */
+ /* UNPREDICTABLE, unallocated hint or
+ * PLD/PLDW/PLI (literal)
+ */
return 0;
}
if (op1 & 1) {
- return 0; /* PLD* or unallocated hint */
+ return 0; /* PLD/PLDW/PLI or unallocated hint */
}
if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
- return 0; /* PLD* or unallocated hint */
+ return 0; /* PLD/PLDW/PLI or unallocated hint */
}
/* UNDEF space, or an UNPREDICTABLE */
return 1;
return 1;
}
-static void disas_thumb_insn(CPUState *env, DisasContext *s)
+static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
{
uint32_t val, insn, op, rm, rn, rd, shift, cond;
int32_t offset;
}
}
- insn = lduw_code(s->pc);
+ insn = arm_lduw_code(s->pc, s->bswap_code);
s->pc += 2;
switch (insn >> 12) {
store_reg(s, rd, tmp);
break;
- case 6: /* cps */
- ARCH(6);
- if (IS_USER(s))
+ case 6:
+ switch ((insn >> 5) & 7) {
+ case 2:
+ /* setend */
+ ARCH(6);
+ if (((insn >> 3) & 1) != s->bswap_code) {
+ /* Dynamic endianness switching not implemented. */
+ goto illegal_op;
+ }
break;
- if (IS_M(env)) {
- tmp = tcg_const_i32((insn & (1 << 4)) != 0);
- /* PRIMASK */
- if (insn & 1) {
- addr = tcg_const_i32(16);
- gen_helper_v7m_msr(cpu_env, addr, tmp);
- tcg_temp_free_i32(addr);
+ case 3:
+ /* cps */
+ ARCH(6);
+ if (IS_USER(s)) {
+ break;
}
- /* FAULTMASK */
- if (insn & 2) {
- addr = tcg_const_i32(17);
- gen_helper_v7m_msr(cpu_env, addr, tmp);
- tcg_temp_free_i32(addr);
+ if (IS_M(env)) {
+ tmp = tcg_const_i32((insn & (1 << 4)) != 0);
+ /* FAULTMASK */
+ if (insn & 1) {
+ addr = tcg_const_i32(19);
+ gen_helper_v7m_msr(cpu_env, addr, tmp);
+ tcg_temp_free_i32(addr);
+ }
+ /* PRIMASK */
+ if (insn & 2) {
+ addr = tcg_const_i32(16);
+ gen_helper_v7m_msr(cpu_env, addr, tmp);
+ tcg_temp_free_i32(addr);
+ }
+ tcg_temp_free_i32(tmp);
+ gen_lookup_tb(s);
+ } else {
+ if (insn & (1 << 4)) {
+ shift = CPSR_A | CPSR_I | CPSR_F;
+ } else {
+ shift = 0;
+ }
+ gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
}
- tcg_temp_free_i32(tmp);
- gen_lookup_tb(s);
- } else {
- if (insn & (1 << 4))
- shift = CPSR_A | CPSR_I | CPSR_F;
- else
- shift = 0;
- gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
+ break;
+ default:
+ goto undef;
}
break;
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
basic block 'tb'. If search_pc is TRUE, also generate PC
information for each intermediate instruction. */
-static inline void gen_intermediate_code_internal(CPUState *env,
+static inline void gen_intermediate_code_internal(CPUARMState *env,
TranslationBlock *tb,
int search_pc)
{
dc->singlestep_enabled = env->singlestep_enabled;
dc->condjmp = 0;
dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
+ dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
#if !defined(CONFIG_USER_ONLY)
/* A note on handling of the condexec (IT) bits:
*
* We want to avoid the overhead of having to write the updated condexec
- * bits back to the CPUState for every instruction in an IT block. So:
+ * bits back to the CPUARMState for every instruction in an IT block. So:
* (1) if the condexec bits are not already zero then we write
- * zero back into the CPUState now. This avoids complications trying
+ * zero back into the CPUARMState now. This avoids complications trying
* to do it at the end of the block. (For example if we don't do this
* it's hard to identify whether we can safely skip writing condexec
* at the end of the TB, which we definitely want to do for the case
* where a TB doesn't do anything with the IT state at all.)
* (2) if we are going to leave the TB then we call gen_set_condexec()
- * which will write the correct value into CPUState if zero is wrong.
+ * which will write the correct value into CPUARMState if zero is wrong.
* This is done both for leaving the TB at the end, and for leaving
* it because of an exception we know will happen, which is done in
* gen_exception_insn(). The latter is necessary because we need to
* leave the TB with the PC/IT state just prior to execution of the
* instruction which caused the exception.
* (3) if we leave the TB unexpectedly (eg a data abort on a load)
- * then the CPUState will be wrong and we need to reset it.
+ * then the CPUARMState will be wrong and we need to reset it.
* This is handled in the same way as restoration of the
* PC in these situations: we will be called again with search_pc=1
* and generate a mapping of the condexec bits for each PC in
*
* Note that there are no instructions which can read the condexec
* bits, and none which can write non-static values to them, so
- * we don't need to care about whether CPUState is correct in the
+ * we don't need to care about whether CPUARMState is correct in the
* middle of a TB.
*/
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
- log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
+ log_target_disas(pc_start, dc->pc - pc_start,
+ dc->thumb | (dc->bswap_code << 1));
qemu_log("\n");
}
#endif
}
}
-void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(env, tb, 0);
}
-void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(env, tb, 1);
}
"???", "???", "???", "und", "???", "???", "???", "sys"
};
-void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
+void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
int flags)
{
int i;
#endif
}
-void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
+void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
{
env->regs[15] = gen_opc_pc[pc_pos];
env->condexec_bits = gen_opc_condexec_bits[pc_pos];