#include "trace-tcg.h"
-#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
-#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
+#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
+#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
/* currently all emulated v5 cores are also v5TE, so don't bother */
-#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
+#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
#define ENABLE_ARCH_5J 0
-#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
-#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
-#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
-#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
-#define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
+#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
+#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
+#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
+#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
+#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
a64_translate_init();
}
+static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
+{
+ /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
+ * insns:
+ * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
+ * otherwise, access as if at PL0.
+ */
+ switch (s->mmu_idx) {
+ case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
+ case ARMMMUIdx_S12NSE0:
+ case ARMMMUIdx_S12NSE1:
+ return ARMMMUIdx_S12NSE0;
+ case ARMMMUIdx_S1E3:
+ case ARMMMUIdx_S1SE0:
+ case ARMMMUIdx_S1SE1:
+ return ARMMMUIdx_S1SE0;
+ case ARMMMUIdx_S2NS:
+ default:
+ g_assert_not_reached();
+ }
+}
+
static inline TCGv_i32 load_cpu_offset(int offset)
{
TCGv_i32 tmp = tcg_temp_new_i32();
tcg_temp_free_i32(tcg_excp);
}
-static void gen_exception(int excp, uint32_t syndrome)
+static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
{
TCGv_i32 tcg_excp = tcg_const_i32(excp);
TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
+ TCGv_i32 tcg_el = tcg_const_i32(target_el);
- gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn);
+ gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
+ tcg_syn, tcg_el);
+
+ tcg_temp_free_i32(tcg_el);
tcg_temp_free_i32(tcg_syn);
tcg_temp_free_i32(tcg_excp);
}
+static void gen_ss_advance(DisasContext *s)
+{
+ /* If the singlestep state is Active-not-pending, advance to
+ * Active-pending.
+ */
+ if (s->ss_active) {
+ s->pstate_ss = 0;
+ gen_helper_clear_pstate_ss(cpu_env);
+ }
+}
+
+static void gen_step_complete_exception(DisasContext *s)
+{
+ /* We just completed step of an insn. Move from Active-not-pending
+ * to Active-pending, and then also take the swstep exception.
+ * This corresponds to making the (IMPDEF) choice to prioritize
+ * swstep exceptions over asynchronous exceptions taken to an exception
+ * level where debug is disabled. This choice has the advantage that
+ * we do not need to maintain internal state corresponding to the
+ * ISV/EX syndrome bits between completion of the step and generation
+ * of the exception, and our syndrome information is always correct.
+ */
+ gen_ss_advance(s);
+ gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
+ default_exception_el(s));
+ s->is_jmp = DISAS_EXC;
+}
+
static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
{
TCGv_i32 tmp1 = tcg_temp_new_i32();
* generate a conditional branch based on ARM condition code cc.
* This is common between ARM and Aarch64 targets.
*/
-void arm_gen_test_cc(int cc, int label)
+void arm_gen_test_cc(int cc, TCGLabel *label)
{
TCGv_i32 tmp;
- int inv;
+ TCGLabel *inv;
switch (cc) {
case 0: /* eq: Z */
/* Variant of store_reg which uses branch&exchange logic when storing
to r15 in ARM architecture v7 and above. The source must be a temporary
and will be marked as dead. */
-static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
- int reg, TCGv_i32 var)
+static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
{
if (reg == 15 && ENABLE_ARCH_7) {
gen_bx(s, var);
* to r15 in ARM architecture v5T and above. This is used for storing
* the results of a LDR/LDM/POP into r15, and corresponds to the cases
* in the ARM ARM which use the LoadWritePC() pseudocode function. */
-static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
- int reg, TCGv_i32 var)
+static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
{
if (reg == 15 && ENABLE_ARCH_5) {
gen_bx(s, var);
tcg_gen_movi_i32(cpu_R[15], val);
}
+static inline void gen_hvc(DisasContext *s, int imm16)
+{
+ /* The pre HVC helper handles cases when HVC gets trapped
+ * as an undefined insn by runtime configuration (ie before
+ * the insn really executes).
+ */
+ gen_set_pc_im(s, s->pc - 4);
+ gen_helper_pre_hvc(cpu_env);
+ /* Otherwise we will treat this as a real exception which
+ * happens after execution of the insn. (The distinction matters
+ * for the PC value reported to the exception handler and also
+ * for single stepping.)
+ */
+ s->svc_imm = imm16;
+ gen_set_pc_im(s, s->pc);
+ s->is_jmp = DISAS_HVC;
+}
+
+static inline void gen_smc(DisasContext *s)
+{
+ /* As with HVC, we may take an exception either before or after
+ * the insn executes.
+ */
+ TCGv_i32 tmp;
+
+ gen_set_pc_im(s, s->pc - 4);
+ tmp = tcg_const_i32(syn_aa32_smc());
+ gen_helper_pre_smc(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_set_pc_im(s, s->pc);
+ s->is_jmp = DISAS_SMC;
+}
+
static inline void
gen_set_condexec (DisasContext *s)
{
s->is_jmp = DISAS_JUMP;
}
-static void gen_exception_insn(DisasContext *s, int offset, int excp, int syn)
+static void gen_exception_insn(DisasContext *s, int offset, int excp,
+ int syn, uint32_t target_el)
{
gen_set_condexec(s);
gen_set_pc_im(s, s->pc - offset);
- gen_exception(excp, syn);
+ gen_exception(excp, syn, target_el);
s->is_jmp = DISAS_JUMP;
}
/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
(ie. an undefined instruction). */
-static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
+static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
{
int rd, wrd;
int rdhi, rdlo, rd0, rd1, i;
/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
(ie. an undefined instruction). */
-static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
+static int disas_dsp_insn(DisasContext *s, uint32_t insn)
{
int acc, rd0, rd1, rdhi, rdlo;
TCGv_i32 tmp, tmp2;
#define VFP_SREG(insn, bigbit, smallbit) \
((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
- if (arm_feature(env, ARM_FEATURE_VFP3)) { \
+ if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
reg = (((insn) >> (bigbit)) & 0x0f) \
| (((insn) >> ((smallbit) - 4)) & 0x10); \
} else { \
FPROUNDING_NEGINF,
};
-static int disas_vfp_v8_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
+static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
{
uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
- if (!arm_feature(env, ARM_FEATURE_V8)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
return 1;
}
/* Disassemble a VFP instruction. Returns nonzero if an error occurred
(ie. an undefined instruction). */
-static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
+static int disas_vfp_insn(DisasContext *s, uint32_t insn)
{
uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
int dp, veclen;
TCGv_i32 tmp;
TCGv_i32 tmp2;
- if (!arm_feature(env, ARM_FEATURE_VFP))
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
return 1;
+ }
/* FIXME: this access check should not take precedence over UNDEF
* for invalid encodings; we will generate incorrect syndrome information
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
- if (!s->cpacr_fpen) {
+ if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, s->thumb));
+ syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
return 0;
}
/* Encodings with T=1 (Thumb) or unconditional (ARM):
* only used in v8 and above.
*/
- return disas_vfp_v8_insn(env, s, insn);
+ return disas_vfp_v8_insn(s, insn);
}
dp = ((insn & 0xf00) == 0xb00);
if (insn & 0xf)
return 1;
if (insn & 0x00c00060
- && !arm_feature(env, ARM_FEATURE_NEON))
+ && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
return 1;
+ }
pass = (insn >> 21) & 1;
if (insn & (1 << 22)) {
VFP3 restricts all id registers to privileged
accesses. */
if (IS_USER(s)
- && arm_feature(env, ARM_FEATURE_VFP3))
+ && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
return 1;
+ }
tmp = load_cpu_field(vfp.xregs[rn]);
break;
case ARM_VFP_FPEXC:
case ARM_VFP_FPINST2:
/* Not present in VFP3. */
if (IS_USER(s)
- || arm_feature(env, ARM_FEATURE_VFP3))
+ || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
return 1;
+ }
tmp = load_cpu_field(vfp.xregs[rn]);
break;
case ARM_VFP_FPSCR:
}
break;
case ARM_VFP_MVFR2:
- if (!arm_feature(env, ARM_FEATURE_V8)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
return 1;
}
/* fall through */
case ARM_VFP_MVFR0:
case ARM_VFP_MVFR1:
if (IS_USER(s)
- || !arm_feature(env, ARM_FEATURE_MVFR))
+ || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
return 1;
+ }
tmp = load_cpu_field(vfp.xregs[rn]);
break;
default:
break;
case ARM_VFP_FPINST:
case ARM_VFP_FPINST2:
+ if (IS_USER(s)) {
+ return 1;
+ }
tmp = load_reg(s, rd);
store_cpu_field(tmp, vfp.xregs[rn]);
break;
* UNPREDICTABLE if bit 8 is set prior to ARMv8
* (we choose to UNDEF)
*/
- if ((dp && !arm_feature(env, ARM_FEATURE_V8)) ||
- !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
+ if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
+ !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
return 1;
}
if (!extract32(rn, 1, 1)) {
* correct : an input NaN should come out with its sign bit
* flipped if it is a negated-input.
*/
- if (!arm_feature(env, ARM_FEATURE_VFP4)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
return 1;
}
if (dp) {
}
break;
case 14: /* fconst */
- if (!arm_feature(env, ARM_FEATURE_VFP3))
- return 1;
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
n = (insn << 12) & 0x80000000;
i = ((insn >> 12) & 0x70) | (insn & 0xf);
gen_vfp_sito(dp, 0);
break;
case 20: /* fshto */
- if (!arm_feature(env, ARM_FEATURE_VFP3))
- return 1;
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
gen_vfp_shto(dp, 16 - rm, 0);
break;
case 21: /* fslto */
- if (!arm_feature(env, ARM_FEATURE_VFP3))
- return 1;
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
gen_vfp_slto(dp, 32 - rm, 0);
break;
case 22: /* fuhto */
- if (!arm_feature(env, ARM_FEATURE_VFP3))
- return 1;
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
gen_vfp_uhto(dp, 16 - rm, 0);
break;
case 23: /* fulto */
- if (!arm_feature(env, ARM_FEATURE_VFP3))
- return 1;
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
gen_vfp_ulto(dp, 32 - rm, 0);
break;
case 24: /* ftoui */
gen_vfp_tosiz(dp, 0);
break;
case 28: /* ftosh */
- if (!arm_feature(env, ARM_FEATURE_VFP3))
- return 1;
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
gen_vfp_tosh(dp, 16 - rm, 0);
break;
case 29: /* ftosl */
- if (!arm_feature(env, ARM_FEATURE_VFP3))
- return 1;
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
gen_vfp_tosl(dp, 32 - rm, 0);
break;
case 30: /* ftouh */
- if (!arm_feature(env, ARM_FEATURE_VFP3))
- return 1;
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
gen_vfp_touh(dp, 16 - rm, 0);
break;
case 31: /* ftoul */
- if (!arm_feature(env, ARM_FEATURE_VFP3))
- return 1;
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
gen_vfp_toul(dp, 32 - rm, 0);
break;
default: /* undefined */
static inline void gen_jmp (DisasContext *s, uint32_t dest)
{
- if (unlikely(s->singlestep_enabled)) {
+ if (unlikely(s->singlestep_enabled || s->ss_active)) {
/* An indirect jump so that we still trigger the debug exception. */
if (s->thumb)
dest |= 1;
}
/* Return the mask of PSR bits set by a MSR instruction. */
-static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
+static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
+{
uint32_t mask;
mask = 0;
/* Mask out undefined bits. */
mask &= ~CPSR_RESERVED;
- if (!arm_feature(env, ARM_FEATURE_V4T))
+ if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
mask &= ~CPSR_T;
- if (!arm_feature(env, ARM_FEATURE_V5))
+ }
+ if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
mask &= ~CPSR_Q; /* V5TE in reality*/
- if (!arm_feature(env, ARM_FEATURE_V6))
+ }
+ if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
mask &= ~(CPSR_E | CPSR_GE);
- if (!arm_feature(env, ARM_FEATURE_THUMB2))
+ }
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
mask &= ~CPSR_IT;
+ }
/* Mask out execution state and reserved bits. */
if (!spsr) {
mask &= ~(CPSR_EXEC | CPSR_RESERVED);
/* Translate a NEON load/store element instruction. Return nonzero if the
instruction is invalid. */
-static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
+static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
{
int rd, rn, rm;
int op;
* for invalid encodings; we will generate incorrect syndrome information
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
- if (!s->cpacr_fpen) {
+ if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, s->thumb));
+ syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
return 0;
}
We process data in a mixture of 32-bit and 64-bit chunks.
Mostly we use 32-bit chunks so we can use normal scalar instructions. */
-static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
+static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
{
int op;
int q;
* for invalid encodings; we will generate incorrect syndrome information
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
- if (!s->cpacr_fpen) {
+ if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, s->thumb));
+ syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
return 0;
}
return 1;
}
if (!u) { /* SHA-1 */
- if (!arm_feature(env, ARM_FEATURE_V8_SHA1)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
return 1;
}
tmp = tcg_const_i32(rd);
gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
tcg_temp_free_i32(tmp4);
} else { /* SHA-256 */
- if (!arm_feature(env, ARM_FEATURE_V8_SHA256) || size == 3) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
return 1;
}
tmp = tcg_const_i32(rd);
break;
case NEON_3R_FLOAT_MISC:
/* VMAXNM/VMINNM in ARMv8 */
- if (u && !arm_feature(env, ARM_FEATURE_V8)) {
+ if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
return 1;
}
break;
}
break;
case NEON_3R_VFM:
- if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
return 1;
}
break;
if (op == 14 && size == 2) {
TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
- if (!arm_feature(env, ARM_FEATURE_V8_PMULL)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
return 1;
}
tcg_rn = tcg_temp_new_i64();
}
break;
case NEON_2RM_VCVT_F16_F32:
- if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
q || (rm & 1)) {
return 1;
}
tcg_temp_free_i32(tmp);
break;
case NEON_2RM_VCVT_F32_F16:
- if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
q || (rd & 1)) {
return 1;
}
tcg_temp_free_i32(tmp3);
break;
case NEON_2RM_AESE: case NEON_2RM_AESMC:
- if (!arm_feature(env, ARM_FEATURE_V8_AES)
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
|| ((rm | rd) & 1)) {
return 1;
}
tcg_temp_free_i32(tmp3);
break;
case NEON_2RM_SHA1H:
- if (!arm_feature(env, ARM_FEATURE_V8_SHA1)
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
|| ((rm | rd) & 1)) {
return 1;
}
}
/* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
if (q) {
- if (!arm_feature(env, ARM_FEATURE_V8_SHA256)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
return 1;
}
- } else if (!arm_feature(env, ARM_FEATURE_V8_SHA1)) {
+ } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
return 1;
}
tmp = tcg_const_i32(rd);
return 0;
}
-static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
+static int disas_coproc_insn(DisasContext *s, uint32_t insn)
{
int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
const ARMCPRegInfo *ri;
cpnum = (insn >> 8) & 0xf;
- if (arm_feature(env, ARM_FEATURE_XSCALE)
- && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
- return 1;
-
- /* First check for coprocessor space used for actual instructions */
- switch (cpnum) {
- case 0:
- case 1:
- if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
- return disas_iwmmxt_insn(env, s, insn);
- } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
- return disas_dsp_insn(env, s, insn);
- }
- return 1;
- default:
- break;
+
+ /* First check for coprocessor space used for XScale/iwMMXt insns */
+ if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
+ if (extract32(s->c15_cpar, cpnum, 1) == 0) {
+ return 1;
+ }
+ if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
+ return disas_iwmmxt_insn(s, insn);
+ } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
+ return disas_dsp_insn(s, insn);
+ }
+ return 1;
}
/* Otherwise treat as a generic register access */
rt = (insn >> 12) & 0xf;
ri = get_arm_cp_reginfo(s->cp_regs,
- ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
+ ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
if (ri) {
/* Check access permissions */
- if (!cp_access_ok(s->current_pl, ri, isread)) {
+ if (!cp_access_ok(s->current_el, ri, isread)) {
return 1;
}
- if (ri->accessfn) {
+ if (ri->accessfn ||
+ (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
/* Emit code to perform further access permissions checks at
* runtime; this may result in an exception.
+ * Note that on XScale all cp0..c13 registers do an access check
+ * call in order to handle c15_cpar.
*/
TCGv_ptr tmpptr;
TCGv_i32 tcg_syn;
* in which case the syndrome information won't actually be
* guest visible.
*/
- assert(!arm_feature(env, ARM_FEATURE_V8));
+ assert(!arm_dc_feature(s, ARM_FEATURE_V8));
syndrome = syn_uncategorized();
break;
}
break;
}
- if (use_icount && (ri->type & ARM_CP_IO)) {
+ if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
gen_io_start();
}
}
}
- if (use_icount && (ri->type & ARM_CP_IO)) {
+ if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */
gen_io_end();
gen_lookup_tb(s);
*/
if (is64) {
qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
- "64 bit system register cp:%d opc1: %d crm:%d\n",
- isread ? "read" : "write", cpnum, opc1, crm);
+ "64 bit system register cp:%d opc1: %d crm:%d "
+ "(%s)\n",
+ isread ? "read" : "write", cpnum, opc1, crm,
+ s->ns ? "non-secure" : "secure");
} else {
qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
- "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d\n",
- isread ? "read" : "write", cpnum, opc1, crn, crm, opc2);
+ "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
+ "(%s)\n",
+ isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
+ s->ns ? "non-secure" : "secure");
}
return 1;
{
TCGv_i32 tmp = tcg_temp_new_i32();
+ s->is_ldex = true;
+
switch (size) {
case 0:
gen_aa32_ld8u(tmp, addr, get_mem_index(s));
{
TCGv_i32 tmp;
TCGv_i64 val64, extaddr;
- int done_label;
- int fail_label;
+ TCGLabel *done_label;
+ TCGLabel *fail_label;
/* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
[addr] = {Rt};
tcg_temp_free_i32(addr);
}
-static void disas_arm_insn(CPUARMState * env, DisasContext *s)
+static void disas_arm_insn(DisasContext *s, unsigned int insn)
{
- unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
+ unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
TCGv_i32 tmp;
TCGv_i32 tmp2;
TCGv_i32 tmp3;
TCGv_i32 addr;
TCGv_i64 tmp64;
- insn = arm_ldl_code(env, s->pc, s->bswap_code);
- s->pc += 4;
-
/* M variants do not implement ARM mode. */
- if (IS_M(env))
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
goto illegal_op;
+ }
cond = insn >> 28;
if (cond == 0xf){
/* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
/* Unconditional instructions. */
if (((insn >> 25) & 7) == 1) {
/* NEON Data processing. */
- if (!arm_feature(env, ARM_FEATURE_NEON))
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
goto illegal_op;
+ }
- if (disas_neon_data_insn(env, s, insn))
+ if (disas_neon_data_insn(s, insn)) {
goto illegal_op;
+ }
return;
}
if ((insn & 0x0f100000) == 0x04000000) {
/* NEON load/store. */
- if (!arm_feature(env, ARM_FEATURE_NEON))
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
goto illegal_op;
+ }
- if (disas_neon_ls_insn(env, s, insn))
+ if (disas_neon_ls_insn(s, insn)) {
goto illegal_op;
+ }
return;
}
if ((insn & 0x0f000e10) == 0x0e000a00) {
/* VFP. */
- if (disas_vfp_insn(env, s, insn)) {
+ if (disas_vfp_insn(s, insn)) {
goto illegal_op;
}
return;
((insn & 0x0f30f010) == 0x0710f000)) {
if ((insn & (1 << 22)) == 0) {
/* PLDW; v7MP */
- if (!arm_feature(env, ARM_FEATURE_V7MP)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
goto illegal_op;
}
}
}
if (((insn & 0x0f700000) == 0x04100000) ||
((insn & 0x0f700010) == 0x06100000)) {
- if (!arm_feature(env, ARM_FEATURE_V7MP)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
goto illegal_op;
}
return; /* v7MP: Unallocated memory hint: must NOP */
gen_bx_im(s, val);
return;
} else if ((insn & 0x0e000f00) == 0x0c000100) {
- if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
+ if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
/* iWMMXt register transfer. */
- if (env->cp15.c15_cpar & (1 << 1))
- if (!disas_iwmmxt_insn(env, s, insn))
+ if (extract32(s->c15_cpar, 1, 1)) {
+ if (!disas_iwmmxt_insn(s, insn)) {
return;
+ }
+ }
}
} else if ((insn & 0x0fe00000) == 0x0c400000) {
/* Coprocessor double register transfer. */
if (shift)
val = (val >> shift) | (val << (32 - shift));
i = ((insn & (1 << 22)) != 0);
- if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
+ if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
+ i, val)) {
goto illegal_op;
+ }
}
}
} else if ((insn & 0x0f900000) == 0x01000000
/* PSR = reg */
tmp = load_reg(s, rm);
i = ((op1 & 2) != 0);
- if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
+ if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
goto illegal_op;
} else {
/* reg = PSR */
* op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
* Bits 8, 10 and 11 should be zero.
*/
- if (!arm_feature(env, ARM_FEATURE_CRC) || op1 == 0x3 ||
+ if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
(c & 0xd) != 0) {
goto illegal_op;
}
case 7:
{
int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
- /* SMC instruction (op1 == 3)
- and undefined instructions (op1 == 0 || op1 == 2)
- will trap */
- if (op1 != 1) {
+ switch (op1) {
+ case 1:
+ /* bkpt */
+ ARCH(5);
+ gen_exception_insn(s, 4, EXCP_BKPT,
+ syn_aa32_bkpt(imm16, false),
+ default_exception_el(s));
+ break;
+ case 2:
+ /* Hypervisor call (v7) */
+ ARCH(7);
+ if (IS_USER(s)) {
+ goto illegal_op;
+ }
+ gen_hvc(s, imm16);
+ break;
+ case 3:
+ /* Secure monitor call (v6+) */
+ ARCH(6K);
+ if (IS_USER(s)) {
+ goto illegal_op;
+ }
+ gen_smc(s);
+ break;
+ default:
goto illegal_op;
}
- /* bkpt */
- ARCH(5);
- gen_exception_insn(s, 4, EXCP_BKPT, syn_aa32_bkpt(imm16, false));
break;
}
case 0x8: /* signed multiply */
if (logic_cc) {
gen_logic_CC(tmp);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
case 0x01:
tcg_gen_xor_i32(tmp, tmp, tmp2);
if (logic_cc) {
gen_logic_CC(tmp);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
case 0x02:
if (set_cc && rd == 15) {
} else {
tcg_gen_sub_i32(tmp, tmp, tmp2);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
}
break;
case 0x03:
} else {
tcg_gen_sub_i32(tmp, tmp2, tmp);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
case 0x04:
if (set_cc) {
} else {
tcg_gen_add_i32(tmp, tmp, tmp2);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
case 0x05:
if (set_cc) {
} else {
gen_add_carry(tmp, tmp, tmp2);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
case 0x06:
if (set_cc) {
} else {
gen_sub_carry(tmp, tmp, tmp2);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
case 0x07:
if (set_cc) {
} else {
gen_sub_carry(tmp, tmp2, tmp);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
case 0x08:
if (set_cc) {
if (logic_cc) {
gen_logic_CC(tmp);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
case 0x0d:
if (logic_cc && rd == 15) {
if (logic_cc) {
gen_logic_CC(tmp2);
}
- store_reg_bx(env, s, rd, tmp2);
+ store_reg_bx(s, rd, tmp2);
}
break;
case 0x0e:
if (logic_cc) {
gen_logic_CC(tmp);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
default:
case 0x0f:
if (logic_cc) {
gen_logic_CC(tmp2);
}
- store_reg_bx(env, s, rd, tmp2);
+ store_reg_bx(s, rd, tmp2);
break;
}
if (op1 != 0x0f && op1 != 0x0d) {
case 1:
case 3:
/* SDIV, UDIV */
- if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
goto illegal_op;
}
if (((insn >> 5) & 7) || (rd != 15)) {
ARCH(6T2);
shift = (insn >> 7) & 0x1f;
i = (insn >> 16) & 0x1f;
+ if (i < shift) {
+ /* UNPREDICTABLE; we choose to UNDEF */
+ goto illegal_op;
+ }
i = i + 1 - shift;
if (rm == 15) {
tmp = tcg_temp_new_i32();
tmp2 = load_reg(s, rn);
if ((insn & 0x01200000) == 0x00200000) {
/* ldrt/strt */
- i = MMU_USER_IDX;
+ i = get_a32_user_mem_index(s);
} else {
i = get_mem_index(s);
}
}
if (insn & (1 << 20)) {
/* Complete the load. */
- store_reg_from_load(env, s, rd, tmp);
+ store_reg_from_load(s, rd, tmp);
}
break;
case 0x08:
case 0x09:
{
- int j, n, user, loaded_base;
+ int j, n, loaded_base;
+ bool exc_return = false;
+ bool is_load = extract32(insn, 20, 1);
+ bool user = false;
TCGv_i32 loaded_var;
/* load/store multiple words */
/* XXX: store correct base if write back */
- user = 0;
if (insn & (1 << 22)) {
+ /* LDM (user), LDM (exception return) and STM (user) */
if (IS_USER(s))
goto illegal_op; /* only usable in supervisor mode */
- if ((insn & (1 << 15)) == 0)
- user = 1;
+ if (is_load && extract32(insn, 15, 1)) {
+ exc_return = true;
+ } else {
+ user = true;
+ }
}
rn = (insn >> 16) & 0xf;
addr = load_reg(s, rn);
j = 0;
for(i=0;i<16;i++) {
if (insn & (1 << i)) {
- if (insn & (1 << 20)) {
+ if (is_load) {
/* load */
tmp = tcg_temp_new_i32();
gen_aa32_ld32u(tmp, addr, get_mem_index(s));
loaded_var = tmp;
loaded_base = 1;
} else {
- store_reg_from_load(env, s, i, tmp);
+ store_reg_from_load(s, i, tmp);
}
} else {
/* store */
if (loaded_base) {
store_reg(s, rn, loaded_var);
}
- if ((insn & (1 << 22)) && !user) {
+ if (exc_return) {
/* Restore CPSR from SPSR. */
tmp = load_cpu_field(spsr);
gen_set_cpsr(tmp, CPSR_ERET_MASK);
case 0xe:
if (((insn >> 8) & 0xe) == 10) {
/* VFP. */
- if (disas_vfp_insn(env, s, insn)) {
+ if (disas_vfp_insn(s, insn)) {
goto illegal_op;
}
- } else if (disas_coproc_insn(env, s, insn)) {
+ } else if (disas_coproc_insn(s, insn)) {
/* Coprocessor. */
goto illegal_op;
}
break;
default:
illegal_op:
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
break;
}
}
int conds;
int logic_cc;
- if (!(arm_feature(env, ARM_FEATURE_THUMB2)
- || arm_feature (env, ARM_FEATURE_M))) {
+ if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
+ || arm_dc_feature(s, ARM_FEATURE_M))) {
/* Thumb-1 cores may need to treat bl and blx as a pair of
16-bit instructions to get correct prefetch abort behavior. */
insn = insn_hw1;
/* Load/store multiple, RFE, SRS. */
if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
/* RFE, SRS: not available in user mode or on M profile */
- if (IS_USER(s) || IS_M(env)) {
+ if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
goto illegal_op;
}
if (insn & (1 << 20)) {
gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
if (logic_cc)
gen_logic_CC(tmp);
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
case 1: /* Sign/zero extend. */
tmp = load_reg(s, rm);
uint32_t sz = op & 0x3;
uint32_t c = op & 0x8;
- if (!arm_feature(env, ARM_FEATURE_CRC)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
goto illegal_op;
}
tmp2 = load_reg(s, rm);
if ((op & 0x50) == 0x10) {
/* sdiv, udiv */
- if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
goto illegal_op;
}
if (op & 0x20)
if (((insn >> 24) & 3) == 3) {
/* Translate into the equivalent ARM encoding. */
insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
- if (disas_neon_data_insn(env, s, insn))
+ if (disas_neon_data_insn(s, insn)) {
goto illegal_op;
+ }
} else if (((insn >> 8) & 0xe) == 10) {
- if (disas_vfp_insn(env, s, insn)) {
+ if (disas_vfp_insn(s, insn)) {
goto illegal_op;
}
} else {
if (insn & (1 << 28))
goto illegal_op;
- if (disas_coproc_insn (env, s, insn))
+ if (disas_coproc_insn(s, insn)) {
goto illegal_op;
+ }
}
break;
case 8: case 9: case 10: case 11:
goto illegal_op;
if (insn & (1 << 26)) {
- /* Secure monitor call (v6Z) */
- qemu_log_mask(LOG_UNIMP,
- "arm: unimplemented secure monitor call\n");
- goto illegal_op; /* not implemented. */
+ if (!(insn & (1 << 20))) {
+ /* Hypervisor call (v7) */
+ int imm16 = extract32(insn, 16, 4) << 12
+ | extract32(insn, 0, 12);
+ ARCH(7);
+ if (IS_USER(s)) {
+ goto illegal_op;
+ }
+ gen_hvc(s, imm16);
+ } else {
+ /* Secure monitor call (v6+) */
+ ARCH(6K);
+ if (IS_USER(s)) {
+ goto illegal_op;
+ }
+ gen_smc(s);
+ }
} else {
op = (insn >> 20) & 7;
switch (op) {
case 0: /* msr cpsr. */
- if (IS_M(env)) {
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
tmp = load_reg(s, rn);
addr = tcg_const_i32(insn & 0xff);
gen_helper_v7m_msr(cpu_env, addr, tmp);
}
/* fall through */
case 1: /* msr spsr. */
- if (IS_M(env))
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
goto illegal_op;
+ }
tmp = load_reg(s, rn);
if (gen_set_psr(s,
- msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
+ msr_mask(s, (insn >> 8) & 0xf, op == 1),
op == 1, tmp))
goto illegal_op;
break;
break;
case 6: /* mrs cpsr. */
tmp = tcg_temp_new_i32();
- if (IS_M(env)) {
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
addr = tcg_const_i32(insn & 0xff);
gen_helper_v7m_mrs(tmp, cpu_env, addr);
tcg_temp_free_i32(addr);
break;
case 7: /* mrs spsr. */
/* Not accessible in user mode. */
- if (IS_USER(s) || IS_M(env))
+ if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
goto illegal_op;
+ }
tmp = load_cpu_field(spsr);
store_reg(s, rd, tmp);
break;
int writeback = 0;
int memidx;
if ((insn & 0x01100000) == 0x01000000) {
- if (disas_neon_ls_insn(env, s, insn))
+ if (disas_neon_ls_insn(s, insn)) {
goto illegal_op;
+ }
break;
}
op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
break;
case 0xe: /* User privilege. */
tcg_gen_addi_i32(addr, addr, imm);
- memidx = MMU_USER_IDX;
+ memidx = get_a32_user_mem_index(s);
break;
case 0x9: /* Post-decrement. */
imm = -imm;
store_reg(s, 13, addr);
/* set the new PC value */
if ((insn & 0x0900) == 0x0900) {
- store_reg_from_load(env, s, 15, tmp);
+ store_reg_from_load(s, 15, tmp);
}
break;
{
int imm8 = extract32(insn, 0, 8);
ARCH(5);
- gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true));
+ gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
+ default_exception_el(s));
break;
}
if (IS_USER(s)) {
break;
}
- if (IS_M(env)) {
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
tmp = tcg_const_i32((insn & (1 << 4)) != 0);
/* FAULTMASK */
if (insn & 1) {
}
return;
undef32:
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
return;
illegal_op:
undef:
- gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized());
+ gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
}
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
CPUARMState *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
CPUBreakpoint *bp;
- uint16_t *gen_opc_end;
int j, lj;
target_ulong pc_start;
target_ulong next_page_start;
dc->tb = tb;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
-
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
dc->singlestep_enabled = cs->singlestep_enabled;
dc->condjmp = 0;
dc->aarch64 = 0;
+ dc->el3_is_aa64 = arm_el_is_aa64(env, 3);
dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
+ dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
+ dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
#if !defined(CONFIG_USER_ONLY)
- dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
+ dc->user = (dc->current_el == 0);
#endif
- dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
+ dc->ns = ARM_TBFLAG_NS(tb->flags);
+ dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
+ dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
dc->cp_regs = cpu->cp_regs;
- dc->current_pl = arm_current_pl(env);
dc->features = env->features;
+ /* Single step state. The code-generation logic here is:
+ * SS_ACTIVE == 0:
+ * generate code with no special handling for single-stepping (except
+ * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
+ * this happens anyway because those changes are all system register or
+ * PSTATE writes).
+ * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
+ * emit code for one insn
+ * emit code to clear PSTATE.SS
+ * emit code to generate software step exception for completed step
+ * end TB (as usual for having generated an exception)
+ * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
+ * emit code to generate a software step exception
+ * end the TB
+ */
+ dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
+ dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
+ dc->is_ldex = false;
+ dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
+
cpu_F0s = tcg_temp_new_i32();
cpu_F1s = tcg_temp_new_i32();
cpu_F0d = tcg_temp_new_i64();
if (max_insns == 0)
max_insns = CF_COUNT_MASK;
- gen_tb_start();
+ gen_tb_start(tb);
tcg_clear_temp_count();
break;
}
#else
- if (dc->pc >= 0xfffffff0 && IS_M(env)) {
+ if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
/* We always get here via a jump, so know we are not in a
conditional execution block. */
gen_exception_internal(EXCP_EXCEPTION_EXIT);
}
}
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
tcg_gen_debug_insn_start(dc->pc);
}
+ if (dc->ss_active && !dc->pstate_ss) {
+ /* Singlestep state is Active-pending.
+ * If we're in this state at the start of a TB then either
+ * a) we just took an exception to an EL which is being debugged
+ * and this is the first insn in the exception handler
+ * b) debug exceptions were masked and we just unmasked them
+ * without changing EL (eg by clearing PSTATE.D)
+ * In either case we're going to take a swstep exception in the
+ * "did not step an insn" case, and so the syndrome ISV and EX
+ * bits should be zero.
+ */
+ assert(num_insns == 0);
+ gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
+ default_exception_el(dc));
+ goto done_generating;
+ }
+
if (dc->thumb) {
disas_thumb_insn(env, dc);
if (dc->condexec_mask) {
}
}
} else {
- disas_arm_insn(env, dc);
+ unsigned int insn = arm_ldl_code(env, dc->pc, dc->bswap_code);
+ dc->pc += 4;
+ disas_arm_insn(dc, insn);
}
if (dc->condjmp && !dc->is_jmp) {
* Also stop translation when a page boundary is reached. This
* ensures prefetch aborts occur at the right place. */
num_insns ++;
- } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
+ } while (!dc->is_jmp && !tcg_op_buf_full() &&
!cs->singlestep_enabled &&
!singlestep &&
+ !dc->ss_active &&
dc->pc < next_page_start &&
num_insns < max_insns);
/* At this stage dc->condjmp will only be set when the skipped
instruction was a conditional branch or trap, and the PC has
already been written. */
- if (unlikely(cs->singlestep_enabled)) {
+ if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
/* Make sure the pc is updated, and raise a debug exception. */
if (dc->condjmp) {
gen_set_condexec(dc);
if (dc->is_jmp == DISAS_SWI) {
- gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
+ gen_ss_advance(dc);
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
+ default_exception_el(dc));
+ } else if (dc->is_jmp == DISAS_HVC) {
+ gen_ss_advance(dc);
+ gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
+ } else if (dc->is_jmp == DISAS_SMC) {
+ gen_ss_advance(dc);
+ gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
+ } else if (dc->ss_active) {
+ gen_step_complete_exception(dc);
} else {
gen_exception_internal(EXCP_DEBUG);
}
}
gen_set_condexec(dc);
if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
- gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
+ gen_ss_advance(dc);
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
+ default_exception_el(dc));
+ } else if (dc->is_jmp == DISAS_HVC && !dc->condjmp) {
+ gen_ss_advance(dc);
+ gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
+ } else if (dc->is_jmp == DISAS_SMC && !dc->condjmp) {
+ gen_ss_advance(dc);
+ gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
+ } else if (dc->ss_active) {
+ gen_step_complete_exception(dc);
} else {
/* FIXME: Single stepping a WFI insn will not halt
the CPU. */
gen_helper_wfe(cpu_env);
break;
case DISAS_SWI:
- gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
+ default_exception_el(dc));
+ break;
+ case DISAS_HVC:
+ gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
+ break;
+ case DISAS_SMC:
+ gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
break;
}
if (dc->condjmp) {
done_generating:
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
}
#endif
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;