#include <inttypes.h>
#include "cpu.h"
+#include "internals.h"
#include "disas/disas.h"
#include "tcg-op.h"
#include "qemu/log.h"
#include "qemu/bitops.h"
+#include "arm_ldst.h"
-#include "helper.h"
-#define GEN_HELPER 1
-#include "helper.h"
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
/* Set NZCV flags from the high 4 bits of var. */
#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
-static void gen_exception(int excp)
+static void gen_exception_internal(int excp)
{
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, excp);
- gen_helper_exception(cpu_env, tmp);
- tcg_temp_free_i32(tmp);
+ TCGv_i32 tcg_excp = tcg_const_i32(excp);
+
+ assert(excp_is_internal(excp));
+ gen_helper_exception_internal(cpu_env, tcg_excp);
+ tcg_temp_free_i32(tcg_excp);
+}
+
+static void gen_exception(int excp, uint32_t syndrome)
+{
+ TCGv_i32 tcg_excp = tcg_const_i32(excp);
+ TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
+
+ gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn);
+ tcg_temp_free_i32(tcg_syn);
+ tcg_temp_free_i32(tcg_excp);
}
static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
tcg_gen_movi_i32(cpu_R[15], val);
}
+static inline void
+gen_set_condexec (DisasContext *s)
+{
+ if (s->condexec_mask) {
+ uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, val);
+ store_cpu_field(tmp, condexec_bits);
+ }
+}
+
+static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
+{
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc - offset);
+ gen_exception_internal(excp);
+ s->is_jmp = DISAS_JUMP;
+}
+
+static void gen_exception_insn(DisasContext *s, int offset, int excp, int syn)
+{
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc - offset);
+ gen_exception(excp, syn);
+ s->is_jmp = DISAS_JUMP;
+}
+
/* Force a TB lookup after an instruction that changes the CPU state. */
static inline void gen_lookup_tb(DisasContext *s)
{
static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
{
if (dp) {
- gen_aa32_ld64(cpu_F0d, addr, IS_USER(s));
+ gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
} else {
- gen_aa32_ld32u(cpu_F0s, addr, IS_USER(s));
+ gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
}
}
static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
{
if (dp) {
- gen_aa32_st64(cpu_F0d, addr, IS_USER(s));
+ gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
} else {
- gen_aa32_st32(cpu_F0s, addr, IS_USER(s));
+ gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
}
}
IWMMXT_OP_ENV(avgw0)
IWMMXT_OP_ENV(avgw1)
-IWMMXT_OP(msadb)
-
IWMMXT_OP_ENV(packuw)
IWMMXT_OP_ENV(packul)
IWMMXT_OP_ENV(packuq)
if (insn & ARM_CP_RW_BIT) {
if ((insn >> 28) == 0xf) { /* WLDRW wCx */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
iwmmxt_store_creg(wrd, tmp);
} else {
i = 1;
if (insn & (1 << 8)) {
if (insn & (1 << 22)) { /* WLDRD */
- gen_aa32_ld64(cpu_M0, addr, IS_USER(s));
+ gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
i = 0;
} else { /* WLDRW wRd */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
}
} else {
tmp = tcg_temp_new_i32();
if (insn & (1 << 22)) { /* WLDRH */
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
} else { /* WLDRB */
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
}
}
if (i) {
} else {
if ((insn >> 28) == 0xf) { /* WSTRW wCx */
tmp = iwmmxt_load_creg(wrd);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
} else {
gen_op_iwmmxt_movq_M0_wRn(wrd);
tmp = tcg_temp_new_i32();
if (insn & (1 << 8)) {
if (insn & (1 << 22)) { /* WSTRD */
- gen_aa32_st64(cpu_M0, addr, IS_USER(s));
+ gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
} else { /* WSTRW wRd */
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
}
} else {
if (insn & (1 << 22)) { /* WSTRH */
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
- gen_aa32_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, get_mem_index(s));
} else { /* WSTRB */
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
- gen_aa32_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, get_mem_index(s));
}
}
}
TCGv_i32 tmp = tcg_temp_new_i32();
switch (size) {
case 0:
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
gen_neon_dup_u8(tmp, 0);
break;
case 1:
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
gen_neon_dup_low16(tmp);
break;
case 2:
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
break;
default: /* Avoid compiler warnings. */
abort();
if (!arm_feature(env, ARM_FEATURE_VFP))
return 1;
+ /* FIXME: this access check should not take precedence over UNDEF
+ * for invalid encodings; we will generate incorrect syndrome information
+ * for attempts to execute invalid vfp/neon encodings with FP disabled.
+ */
+ if (!s->cpacr_fpen) {
+ gen_exception_insn(s, 4, EXCP_UDEF,
+ syn_fp_access_trap(1, 0xe, s->thumb));
+ return 0;
+ }
+
if (!s->vfp_enabled) {
/* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
if ((insn & 0x0fe00fff) != 0x0ee00a10)
return 1;
rn = (insn >> 16) & 0xf;
- if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
- && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
+ if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
+ && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
return 1;
+ }
}
if (extract32(insn, 28, 4) == 0xf) {
gen_helper_vfp_get_fpscr(tmp, cpu_env);
}
break;
+ case ARM_VFP_MVFR2:
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ return 1;
+ }
+ /* fall through */
case ARM_VFP_MVFR0:
case ARM_VFP_MVFR1:
if (IS_USER(s)
s->is_jmp = DISAS_UPDATE;
}
-static inline void
-gen_set_condexec (DisasContext *s)
-{
- if (s->condexec_mask) {
- uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, val);
- store_cpu_field(tmp, condexec_bits);
- }
-}
-
-static void gen_exception_insn(DisasContext *s, int offset, int excp)
-{
- gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - offset);
- gen_exception(excp);
- s->is_jmp = DISAS_JUMP;
-}
-
static void gen_nop_hint(DisasContext *s, int val)
{
switch (val) {
TCGv_i32 tmp2;
TCGv_i64 tmp64;
+ /* FIXME: this access check should not take precedence over UNDEF
+ * for invalid encodings; we will generate incorrect syndrome information
+ * for attempts to execute invalid vfp/neon encodings with FP disabled.
+ */
+ if (!s->cpacr_fpen) {
+ gen_exception_insn(s, 4, EXCP_UDEF,
+ syn_fp_access_trap(1, 0xe, s->thumb));
+ return 0;
+ }
+
if (!s->vfp_enabled)
return 1;
VFP_DREG_D(rd, insn);
if (size == 3) {
tmp64 = tcg_temp_new_i64();
if (load) {
- gen_aa32_ld64(tmp64, addr, IS_USER(s));
+ gen_aa32_ld64(tmp64, addr, get_mem_index(s));
neon_store_reg64(tmp64, rd);
} else {
neon_load_reg64(tmp64, rd);
- gen_aa32_st64(tmp64, addr, IS_USER(s));
+ gen_aa32_st64(tmp64, addr, get_mem_index(s));
}
tcg_temp_free_i64(tmp64);
tcg_gen_addi_i32(addr, addr, stride);
if (size == 2) {
if (load) {
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
neon_store_reg(rd, pass, tmp);
} else {
tmp = neon_load_reg(rd, pass);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_gen_addi_i32(addr, addr, stride);
} else if (size == 1) {
if (load) {
tmp = tcg_temp_new_i32();
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
tcg_gen_addi_i32(addr, addr, stride);
tmp2 = tcg_temp_new_i32();
- gen_aa32_ld16u(tmp2, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp2, addr, get_mem_index(s));
tcg_gen_addi_i32(addr, addr, stride);
tcg_gen_shli_i32(tmp2, tmp2, 16);
tcg_gen_or_i32(tmp, tmp, tmp2);
tmp = neon_load_reg(rd, pass);
tmp2 = tcg_temp_new_i32();
tcg_gen_shri_i32(tmp2, tmp, 16);
- gen_aa32_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
tcg_gen_addi_i32(addr, addr, stride);
- gen_aa32_st16(tmp2, addr, IS_USER(s));
+ gen_aa32_st16(tmp2, addr, get_mem_index(s));
tcg_temp_free_i32(tmp2);
tcg_gen_addi_i32(addr, addr, stride);
}
TCGV_UNUSED_I32(tmp2);
for (n = 0; n < 4; n++) {
tmp = tcg_temp_new_i32();
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
tcg_gen_addi_i32(addr, addr, stride);
if (n == 0) {
tmp2 = tmp;
} else {
tcg_gen_shri_i32(tmp, tmp2, n * 8);
}
- gen_aa32_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
tcg_gen_addi_i32(addr, addr, stride);
}
tmp = tcg_temp_new_i32();
switch (size) {
case 0:
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
break;
case 1:
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
break;
case 2:
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
break;
default: /* Avoid compiler warnings. */
abort();
tcg_gen_shri_i32(tmp, tmp, shift);
switch (size) {
case 0:
- gen_aa32_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, get_mem_index(s));
break;
case 1:
- gen_aa32_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, get_mem_index(s));
break;
case 2:
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
break;
}
tcg_temp_free_i32(tmp);
#define NEON_3R_VPMIN 21
#define NEON_3R_VQDMULH_VQRDMULH 22
#define NEON_3R_VPADD 23
+#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
[NEON_3R_VPMIN] = 0x7,
[NEON_3R_VQDMULH_VQRDMULH] = 0x6,
[NEON_3R_VPADD] = 0x7,
+ [NEON_3R_SHA] = 0xf, /* size field encodes op type */
[NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
[NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
[NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
#define NEON_2RM_VCEQ0 18
#define NEON_2RM_VCLE0 19
#define NEON_2RM_VCLT0 20
+#define NEON_2RM_SHA1H 21
#define NEON_2RM_VABS 22
#define NEON_2RM_VNEG 23
#define NEON_2RM_VCGT0_F 24
#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
#define NEON_2RM_VSHLL 38
+#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
#define NEON_2RM_VRINTN 40
#define NEON_2RM_VRINTX 41
#define NEON_2RM_VRINTA 42
[NEON_2RM_VCEQ0] = 0x7,
[NEON_2RM_VCLE0] = 0x7,
[NEON_2RM_VCLT0] = 0x7,
+ [NEON_2RM_SHA1H] = 0x4,
[NEON_2RM_VABS] = 0x7,
[NEON_2RM_VNEG] = 0x7,
[NEON_2RM_VCGT0_F] = 0x4,
[NEON_2RM_VMOVN] = 0x7,
[NEON_2RM_VQMOVN] = 0x7,
[NEON_2RM_VSHLL] = 0x7,
+ [NEON_2RM_SHA1SU1] = 0x4,
[NEON_2RM_VRINTN] = 0x4,
[NEON_2RM_VRINTX] = 0x4,
[NEON_2RM_VRINTA] = 0x4,
TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
TCGv_i64 tmp64;
+ /* FIXME: this access check should not take precedence over UNDEF
+ * for invalid encodings; we will generate incorrect syndrome information
+ * for attempts to execute invalid vfp/neon encodings with FP disabled.
+ */
+ if (!s->cpacr_fpen) {
+ gen_exception_insn(s, 4, EXCP_UDEF,
+ syn_fp_access_trap(1, 0xe, s->thumb));
+ return 0;
+ }
+
if (!s->vfp_enabled)
return 1;
q = (insn & (1 << 6)) != 0;
if (q && ((rd | rn | rm) & 1)) {
return 1;
}
+ /*
+ * The SHA-1/SHA-256 3-register instructions require special treatment
+ * here, as their size field is overloaded as an op type selector, and
+ * they all consume their input in a single pass.
+ */
+ if (op == NEON_3R_SHA) {
+ if (!q) {
+ return 1;
+ }
+ if (!u) { /* SHA-1 */
+ if (!arm_feature(env, ARM_FEATURE_V8_SHA1)) {
+ return 1;
+ }
+ tmp = tcg_const_i32(rd);
+ tmp2 = tcg_const_i32(rn);
+ tmp3 = tcg_const_i32(rm);
+ tmp4 = tcg_const_i32(size);
+ gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
+ tcg_temp_free_i32(tmp4);
+ } else { /* SHA-256 */
+ if (!arm_feature(env, ARM_FEATURE_V8_SHA256) || size == 3) {
+ return 1;
+ }
+ tmp = tcg_const_i32(rd);
+ tmp2 = tcg_const_i32(rn);
+ tmp3 = tcg_const_i32(rm);
+ switch (size) {
+ case 0:
+ gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
+ break;
+ case 1:
+ gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
+ break;
+ case 2:
+ gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
+ break;
+ }
+ }
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp3);
+ return 0;
+ }
if (size == 3 && op != NEON_3R_LOGIC) {
/* 64-bit element instructions. */
for (pass = 0; pass < (q ? 2 : 1); pass++) {
int src1_wide;
int src2_wide;
int prewiden;
- /* undefreq: bit 0 : UNDEF if size != 0
- * bit 1 : UNDEF if size == 0
- * bit 2 : UNDEF if U == 1
- * Note that [1:0] set implies 'always UNDEF'
+ /* undefreq: bit 0 : UNDEF if size == 0
+ * bit 1 : UNDEF if size == 1
+ * bit 2 : UNDEF if size == 2
+ * bit 3 : UNDEF if U == 1
+ * Note that [2:0] set implies 'always UNDEF'
*/
int undefreq;
/* prewiden, src1_wide, src2_wide, undefreq */
{0, 1, 1, 0}, /* VSUBHN */
{0, 0, 0, 0}, /* VABDL */
{0, 0, 0, 0}, /* VMLAL */
- {0, 0, 0, 6}, /* VQDMLAL */
+ {0, 0, 0, 9}, /* VQDMLAL */
{0, 0, 0, 0}, /* VMLSL */
- {0, 0, 0, 6}, /* VQDMLSL */
+ {0, 0, 0, 9}, /* VQDMLSL */
{0, 0, 0, 0}, /* Integer VMULL */
- {0, 0, 0, 2}, /* VQDMULL */
- {0, 0, 0, 5}, /* Polynomial VMULL */
- {0, 0, 0, 3}, /* Reserved: always UNDEF */
+ {0, 0, 0, 1}, /* VQDMULL */
+ {0, 0, 0, 0xa}, /* Polynomial VMULL */
+ {0, 0, 0, 7}, /* Reserved: always UNDEF */
};
prewiden = neon_3reg_wide[op][0];
src2_wide = neon_3reg_wide[op][2];
undefreq = neon_3reg_wide[op][3];
- if (((undefreq & 1) && (size != 0)) ||
- ((undefreq & 2) && (size == 0)) ||
- ((undefreq & 4) && u)) {
+ if ((undefreq & (1 << size)) ||
+ ((undefreq & 8) && u)) {
return 1;
}
if ((src1_wide && (rn & 1)) ||
return 1;
}
+ /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
+ * outside the loop below as it only performs a single pass.
+ */
+ if (op == 14 && size == 2) {
+ TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
+
+ if (!arm_feature(env, ARM_FEATURE_V8_PMULL)) {
+ return 1;
+ }
+ tcg_rn = tcg_temp_new_i64();
+ tcg_rm = tcg_temp_new_i64();
+ tcg_rd = tcg_temp_new_i64();
+ neon_load_reg64(tcg_rn, rn);
+ neon_load_reg64(tcg_rm, rm);
+ gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
+ neon_store_reg64(tcg_rd, rd);
+ gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
+ neon_store_reg64(tcg_rd, rd + 1);
+ tcg_temp_free_i64(tcg_rn);
+ tcg_temp_free_i64(tcg_rm);
+ tcg_temp_free_i64(tcg_rd);
+ return 0;
+ }
+
/* Avoid overlapping operands. Wide source operands are
always aligned so will never overlap with wide
destinations in problematic ways. */
tcg_temp_free_i32(tmp2);
tcg_temp_free_i32(tmp3);
break;
+ case NEON_2RM_SHA1H:
+ if (!arm_feature(env, ARM_FEATURE_V8_SHA1)
+ || ((rm | rd) & 1)) {
+ return 1;
+ }
+ tmp = tcg_const_i32(rd);
+ tmp2 = tcg_const_i32(rm);
+
+ gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
+
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ break;
+ case NEON_2RM_SHA1SU1:
+ if ((rm | rd) & 1) {
+ return 1;
+ }
+ /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
+ if (q) {
+ if (!arm_feature(env, ARM_FEATURE_V8_SHA256)) {
+ return 1;
+ }
+ } else if (!arm_feature(env, ARM_FEATURE_V8_SHA1)) {
+ return 1;
+ }
+ tmp = tcg_const_i32(rd);
+ tmp2 = tcg_const_i32(rm);
+ if (q) {
+ gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
+ } else {
+ gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
+ }
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ break;
default:
elementwise:
for (pass = 0; pass < (q ? 4 : 2); pass++) {
* runtime; this may result in an exception.
*/
TCGv_ptr tmpptr;
+ TCGv_i32 tcg_syn;
+ uint32_t syndrome;
+
+ /* Note that since we are an implementation which takes an
+ * exception on a trapped conditional instruction only if the
+ * instruction passes its condition code check, we can take
+ * advantage of the clause in the ARM ARM that allows us to set
+ * the COND field in the instruction to 0xE in all cases.
+ * We could fish the actual condition out of the insn (ARM)
+ * or the condexec bits (Thumb) but it isn't necessary.
+ */
+ switch (cpnum) {
+ case 14:
+ if (is64) {
+ syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
+ isread, s->thumb);
+ } else {
+ syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
+ rt, isread, s->thumb);
+ }
+ break;
+ case 15:
+ if (is64) {
+ syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
+ isread, s->thumb);
+ } else {
+ syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
+ rt, isread, s->thumb);
+ }
+ break;
+ default:
+ /* ARMv8 defines that only coprocessors 14 and 15 exist,
+ * so this can only happen if this is an ARMv7 or earlier CPU,
+ * in which case the syndrome information won't actually be
+ * guest visible.
+ */
+ assert(!arm_feature(env, ARM_FEATURE_V8));
+ syndrome = syn_uncategorized();
+ break;
+ }
+
gen_set_pc_im(s, s->pc);
tmpptr = tcg_const_ptr(ri);
- gen_helper_access_check_cp_reg(cpu_env, tmpptr);
+ tcg_syn = tcg_const_i32(syndrome);
+ gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
tcg_temp_free_ptr(tmpptr);
+ tcg_temp_free_i32(tcg_syn);
}
/* Handle special cases first */
switch (size) {
case 0:
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
break;
case 1:
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
break;
case 2:
case 3:
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
break;
default:
abort();
TCGv_i32 tmp3 = tcg_temp_new_i32();
tcg_gen_addi_i32(tmp2, addr, 4);
- gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
+ gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
tcg_temp_free_i32(tmp2);
tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
store_reg(s, rt2, tmp3);
tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
tcg_gen_movi_i32(cpu_exclusive_info,
size | (rd << 4) | (rt << 8) | (rt2 << 12));
- gen_exception_insn(s, 4, EXCP_STREX);
+ gen_exception_internal_insn(s, 4, EXCP_STREX);
}
#else
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
tmp = tcg_temp_new_i32();
switch (size) {
case 0:
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
break;
case 1:
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
break;
case 2:
case 3:
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
break;
default:
abort();
TCGv_i32 tmp2 = tcg_temp_new_i32();
TCGv_i32 tmp3 = tcg_temp_new_i32();
tcg_gen_addi_i32(tmp2, addr, 4);
- gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
+ gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
tcg_temp_free_i32(tmp2);
tcg_gen_concat_i32_i64(val64, tmp, tmp3);
tcg_temp_free_i32(tmp3);
tmp = load_reg(s, rt);
switch (size) {
case 0:
- gen_aa32_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, get_mem_index(s));
break;
case 1:
- gen_aa32_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, get_mem_index(s));
break;
case 2:
case 3:
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
break;
default:
abort();
if (size == 3) {
tcg_gen_addi_i32(addr, addr, 4);
tmp = load_reg(s, rt2);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_gen_movi_i32(cpu_R[rd], 0);
}
tcg_gen_addi_i32(addr, addr, offset);
tmp = load_reg(s, 14);
- gen_aa32_st32(tmp, addr, 0);
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
tmp = load_cpu_field(spsr);
tcg_gen_addi_i32(addr, addr, 4);
- gen_aa32_st32(tmp, addr, 0);
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
if (writeback) {
switch (amode) {
tcg_gen_addi_i32(addr, addr, offset);
/* Load PC into tmp and CPSR into tmp2. */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, 0);
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
tcg_gen_addi_i32(addr, addr, 4);
tmp2 = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp2, addr, 0);
+ gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
if (insn & (1 << 21)) {
/* Base writeback. */
switch (i) {
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
+ if (op1 == 0) {
+ tcg_gen_andi_i32(tmp2, tmp2, 0xff);
+ } else if (op1 == 1) {
+ tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
+ }
tmp3 = tcg_const_i32(1 << op1);
if (c & 0x2) {
gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
store_reg(s, rd, tmp);
break;
case 7:
+ {
+ int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
/* SMC instruction (op1 == 3)
and undefined instructions (op1 == 0 || op1 == 2)
will trap */
}
/* bkpt */
ARCH(5);
- gen_exception_insn(s, 4, EXCP_BKPT);
+ gen_exception_insn(s, 4, EXCP_BKPT, syn_aa32_bkpt(imm16, false));
break;
+ }
case 0x8: /* signed multiply */
case 0xa:
case 0xc:
tmp = tcg_temp_new_i32();
switch (op1) {
case 0: /* lda */
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
break;
case 2: /* ldab */
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
break;
case 3: /* ldah */
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
break;
default:
abort();
tmp = load_reg(s, rm);
switch (op1) {
case 0: /* stl */
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
break;
case 2: /* stlb */
- gen_aa32_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, get_mem_index(s));
break;
case 3: /* stlh */
- gen_aa32_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, get_mem_index(s));
break;
default:
abort();
tmp = load_reg(s, rm);
tmp2 = tcg_temp_new_i32();
if (insn & (1 << 22)) {
- gen_aa32_ld8u(tmp2, addr, IS_USER(s));
- gen_aa32_st8(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp2, addr, get_mem_index(s));
+ gen_aa32_st8(tmp, addr, get_mem_index(s));
} else {
- gen_aa32_ld32u(tmp2, addr, IS_USER(s));
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
}
tcg_temp_free_i32(tmp);
tcg_temp_free_i32(addr);
tmp = tcg_temp_new_i32();
switch(sh) {
case 1:
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
break;
case 2:
- gen_aa32_ld8s(tmp, addr, IS_USER(s));
+ gen_aa32_ld8s(tmp, addr, get_mem_index(s));
break;
default:
case 3:
- gen_aa32_ld16s(tmp, addr, IS_USER(s));
+ gen_aa32_ld16s(tmp, addr, get_mem_index(s));
break;
}
load = 1;
if (sh & 1) {
/* store */
tmp = load_reg(s, rd);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = load_reg(s, rd + 1);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
load = 0;
} else {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
store_reg(s, rd, tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
rd++;
load = 1;
}
} else {
/* store */
tmp = load_reg(s, rd);
- gen_aa32_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
load = 0;
}
if (insn & (1 << 5))
gen_swap_half(tmp2);
gen_smul_dual(tmp, tmp2);
- if (insn & (1 << 6)) {
- /* This subtraction cannot overflow. */
- tcg_gen_sub_i32(tmp, tmp, tmp2);
- } else {
- /* This addition cannot overflow 32 bits;
- * however it may overflow considered as a signed
- * operation, in which case we must set the Q flag.
- */
- gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
- }
- tcg_temp_free_i32(tmp2);
if (insn & (1 << 22)) {
/* smlald, smlsld */
+ TCGv_i64 tmp64_2;
+
tmp64 = tcg_temp_new_i64();
+ tmp64_2 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(tmp64, tmp);
+ tcg_gen_ext_i32_i64(tmp64_2, tmp2);
tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ if (insn & (1 << 6)) {
+ tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
+ } else {
+ tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
+ }
+ tcg_temp_free_i64(tmp64_2);
gen_addq(s, tmp64, rd, rn);
gen_storeq_reg(s, rd, rn, tmp64);
tcg_temp_free_i64(tmp64);
} else {
/* smuad, smusd, smlad, smlsd */
+ if (insn & (1 << 6)) {
+ /* This subtraction cannot overflow. */
+ tcg_gen_sub_i32(tmp, tmp, tmp2);
+ } else {
+ /* This addition cannot overflow 32 bits;
+ * however it may overflow considered as a
+ * signed operation, in which case we must set
+ * the Q flag.
+ */
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
+ }
+ tcg_temp_free_i32(tmp2);
if (rd != 15)
{
tmp2 = load_reg(s, rd);
rn = (insn >> 16) & 0xf;
rd = (insn >> 12) & 0xf;
tmp2 = load_reg(s, rn);
- i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
+ if ((insn & 0x01200000) == 0x00200000) {
+ /* ldrt/strt */
+ i = MMU_USER_IDX;
+ } else {
+ i = get_mem_index(s);
+ }
if (insn & (1 << 24))
gen_add_data_offset(s, insn, tmp2);
if (insn & (1 << 20)) {
if (insn & (1 << 20)) {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
if (user) {
tmp2 = tcg_const_i32(i);
gen_helper_set_user_reg(cpu_env, tmp2, tmp);
} else {
tmp = load_reg(s, i);
}
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
j++;
case 0xf:
/* swi */
gen_set_pc_im(s, s->pc);
+ s->svc_imm = extract32(insn, 0, 24);
s->is_jmp = DISAS_SWI;
break;
default:
illegal_op:
- gen_exception_insn(s, 4, EXCP_UDEF);
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
break;
}
}
if (insn & (1 << 20)) {
/* ldrd */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
store_reg(s, rs, tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
store_reg(s, rd, tmp);
} else {
/* strd */
tmp = load_reg(s, rs);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = load_reg(s, rd);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
if (insn & (1 << 21)) {
tcg_gen_add_i32(addr, addr, tmp);
tcg_temp_free_i32(tmp);
tmp = tcg_temp_new_i32();
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
} else { /* tbb */
tcg_temp_free_i32(tmp);
tmp = tcg_temp_new_i32();
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
}
tcg_temp_free_i32(addr);
tcg_gen_shli_i32(tmp, tmp, 1);
tmp = tcg_temp_new_i32();
switch (op) {
case 0: /* ldab */
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
break;
case 1: /* ldah */
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
break;
case 2: /* lda */
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
break;
default:
abort();
tmp = load_reg(s, rs);
switch (op) {
case 0: /* stlb */
- gen_aa32_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, get_mem_index(s));
break;
case 1: /* stlh */
- gen_aa32_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, get_mem_index(s));
break;
case 2: /* stl */
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
break;
default:
abort();
tcg_gen_addi_i32(addr, addr, -8);
/* Load PC into tmp and CPSR into tmp2. */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, 0);
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
tcg_gen_addi_i32(addr, addr, 4);
tmp2 = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp2, addr, 0);
+ gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
if (insn & (1 << 21)) {
/* Base writeback. */
if (insn & (1 << 24)) {
if (insn & (1 << 20)) {
/* Load. */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
if (i == 15) {
gen_bx(s, tmp);
} else if (i == rn) {
} else {
/* Store. */
tmp = load_reg(s, i);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_gen_addi_i32(addr, addr, 4);
}
tmp2 = load_reg(s, rm);
+ if (sz == 0) {
+ tcg_gen_andi_i32(tmp2, tmp2, 0xff);
+ } else if (sz == 1) {
+ tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
+ }
tmp3 = tcg_const_i32(1 << sz);
if (c) {
gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
{
int postinc = 0;
int writeback = 0;
- int user;
+ int memidx;
if ((insn & 0x01100000) == 0x01000000) {
if (disas_neon_ls_insn(env, s, insn))
goto illegal_op;
return 1;
}
}
- user = IS_USER(s);
+ memidx = get_mem_index(s);
if (rn == 15) {
addr = tcg_temp_new_i32();
/* PC relative. */
break;
case 0xe: /* User privilege. */
tcg_gen_addi_i32(addr, addr, imm);
- user = 1;
+ memidx = MMU_USER_IDX;
break;
case 0x9: /* Post-decrement. */
imm = -imm;
tmp = tcg_temp_new_i32();
switch (op) {
case 0:
- gen_aa32_ld8u(tmp, addr, user);
+ gen_aa32_ld8u(tmp, addr, memidx);
break;
case 4:
- gen_aa32_ld8s(tmp, addr, user);
+ gen_aa32_ld8s(tmp, addr, memidx);
break;
case 1:
- gen_aa32_ld16u(tmp, addr, user);
+ gen_aa32_ld16u(tmp, addr, memidx);
break;
case 5:
- gen_aa32_ld16s(tmp, addr, user);
+ gen_aa32_ld16s(tmp, addr, memidx);
break;
case 2:
- gen_aa32_ld32u(tmp, addr, user);
+ gen_aa32_ld32u(tmp, addr, memidx);
break;
default:
tcg_temp_free_i32(tmp);
tmp = load_reg(s, rs);
switch (op) {
case 0:
- gen_aa32_st8(tmp, addr, user);
+ gen_aa32_st8(tmp, addr, memidx);
break;
case 1:
- gen_aa32_st16(tmp, addr, user);
+ gen_aa32_st16(tmp, addr, memidx);
break;
case 2:
- gen_aa32_st32(tmp, addr, user);
+ gen_aa32_st32(tmp, addr, memidx);
break;
default:
tcg_temp_free_i32(tmp);
addr = tcg_temp_new_i32();
tcg_gen_movi_i32(addr, val);
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(addr);
store_reg(s, rd, tmp);
break;
switch (op) {
case 0: /* str */
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
break;
case 1: /* strh */
- gen_aa32_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, get_mem_index(s));
break;
case 2: /* strb */
- gen_aa32_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, get_mem_index(s));
break;
case 3: /* ldrsb */
- gen_aa32_ld8s(tmp, addr, IS_USER(s));
+ gen_aa32_ld8s(tmp, addr, get_mem_index(s));
break;
case 4: /* ldr */
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
break;
case 5: /* ldrh */
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
break;
case 6: /* ldrb */
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
break;
case 7: /* ldrsh */
- gen_aa32_ld16s(tmp, addr, IS_USER(s));
+ gen_aa32_ld16s(tmp, addr, get_mem_index(s));
break;
}
if (op >= 3) { /* load */
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
store_reg(s, rd, tmp);
} else {
/* store */
tmp = load_reg(s, rd);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
store_reg(s, rd, tmp);
} else {
/* store */
tmp = load_reg(s, rd);
- gen_aa32_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
store_reg(s, rd, tmp);
} else {
/* store */
tmp = load_reg(s, rd);
- gen_aa32_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
store_reg(s, rd, tmp);
} else {
/* store */
tmp = load_reg(s, rd);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
if (insn & (1 << 11)) {
/* pop */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
store_reg(s, i, tmp);
} else {
/* push */
tmp = load_reg(s, i);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
/* advance to the next address. */
if (insn & (1 << 11)) {
/* pop pc */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
/* don't set the pc until the rest of the instruction
has completed */
} else {
/* push lr */
tmp = load_reg(s, 14);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_gen_addi_i32(addr, addr, 4);
break;
case 0xe: /* bkpt */
+ {
+ int imm8 = extract32(insn, 0, 8);
ARCH(5);
- gen_exception_insn(s, 2, EXCP_BKPT);
+ gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true));
break;
+ }
case 0xa: /* rev */
ARCH(6);
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
if (i == rn) {
loaded_var = tmp;
} else {
} else {
/* store */
tmp = load_reg(s, i);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
/* advance to the next address */
if (cond == 0xf) {
/* swi */
gen_set_pc_im(s, s->pc);
+ s->svc_imm = extract32(insn, 0, 8);
s->is_jmp = DISAS_SWI;
break;
}
}
return;
undef32:
- gen_exception_insn(s, 4, EXCP_UDEF);
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
return;
illegal_op:
undef:
- gen_exception_insn(s, 2, EXCP_UDEF);
+ gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized());
}
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
#if !defined(CONFIG_USER_ONLY)
dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
#endif
+ dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
if (dc->pc >= 0xffff0000) {
/* We always get here via a jump, so know we are not in a
conditional execution block. */
- gen_exception(EXCP_KERNEL_TRAP);
+ gen_exception_internal(EXCP_KERNEL_TRAP);
dc->is_jmp = DISAS_UPDATE;
break;
}
if (dc->pc >= 0xfffffff0 && IS_M(env)) {
/* We always get here via a jump, so know we are not in a
conditional execution block. */
- gen_exception(EXCP_EXCEPTION_EXIT);
+ gen_exception_internal(EXCP_EXCEPTION_EXIT);
dc->is_jmp = DISAS_UPDATE;
break;
}
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
- gen_exception_insn(dc, 0, EXCP_DEBUG);
+ gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
/* Advance PC so that clearing the breakpoint will
invalidate this TB. */
dc->pc += 2;
if (dc->condjmp) {
gen_set_condexec(dc);
if (dc->is_jmp == DISAS_SWI) {
- gen_exception(EXCP_SWI);
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
} else {
- gen_exception(EXCP_DEBUG);
+ gen_exception_internal(EXCP_DEBUG);
}
gen_set_label(dc->condlabel);
}
}
gen_set_condexec(dc);
if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
- gen_exception(EXCP_SWI);
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
} else {
/* FIXME: Single stepping a WFI insn will not halt
the CPU. */
- gen_exception(EXCP_DEBUG);
+ gen_exception_internal(EXCP_DEBUG);
}
} else {
/* While branches must always occur at the end of an IT block,
gen_helper_wfe(cpu_env);
break;
case DISAS_SWI:
- gen_exception(EXCP_SWI);
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
break;
}
if (dc->condjmp) {
}
static const char *cpu_mode_names[16] = {
- "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
- "???", "???", "???", "und", "???", "???", "???", "sys"
+ "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
+ "???", "???", "hyp", "und", "???", "???", "???", "sys"
};
void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
int i;
uint32_t psr;
+ if (is_a64(env)) {
+ aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
+ return;
+ }
+
for(i=0;i<16;i++) {
cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
if ((i % 4) == 3)