#include "tcg-op-gvec.h"
#include "qemu/log.h"
#include "qemu/bitops.h"
-#include "qemu/qemu-print.h"
#include "arm_ldst.h"
#include "hw/semihosting/semihost.h"
TCGv_i64 cpu_exclusive_addr;
TCGv_i64 cpu_exclusive_val;
-/* FIXME: These should be removed. */
-static TCGv_i32 cpu_F0s, cpu_F1s;
-static TCGv_i64 cpu_F0d, cpu_F1d;
-
#include "exec/gen-icount.h"
static const char * const regnames[] =
/* Function prototypes for gen_ functions calling Neon helpers. */
typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
TCGv_i32, TCGv_i32);
+/* Function prototypes for gen_ functions for fix point conversions */
+typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
/* initialize TCG globals. */
void arm_translate_init(void)
#define store_cpu_field(var, name) \
store_cpu_offset(var, offsetof(CPUARMState, name))
+/* The architectural value of PC. */
+static uint32_t read_pc(DisasContext *s)
+{
+ return s->pc_curr + (s->thumb ? 4 : 8);
+}
+
/* Set a variable to the value of a CPU register. */
static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
{
if (reg == 15) {
- uint32_t addr;
- /* normally, since we updated PC, we need only to add one insn */
- if (s->thumb)
- addr = (long)s->pc + 2;
- else
- addr = (long)s->pc + 4;
- tcg_gen_movi_i32(var, addr);
+ tcg_gen_movi_i32(var, read_pc(s));
} else {
tcg_gen_mov_i32(var, cpu_R[reg]);
}
return tmp;
}
+/*
+ * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
+ * This is used for load/store for which use of PC implies (literal),
+ * or ADD that implies ADR.
+ */
+static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ if (reg == 15) {
+ tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
+ } else {
+ tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
+ }
+ return tmp;
+}
+
/* Set a CPU register. The source must be a temporary and will be
marked as dead. */
static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
tcg_temp_free_i32(tcg_excp);
}
-static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
-{
- TCGv_i32 tcg_excp = tcg_const_i32(excp);
- TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
- TCGv_i32 tcg_el = tcg_const_i32(target_el);
-
- gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
- tcg_syn, tcg_el);
-
- tcg_temp_free_i32(tcg_el);
- tcg_temp_free_i32(tcg_syn);
- tcg_temp_free_i32(tcg_excp);
-}
-
static void gen_step_complete_exception(DisasContext *s)
{
/* We just completed step of an insn. Move from Active-not-pending
* of the exception, and our syndrome information is always correct.
*/
gen_ss_advance(s);
- gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
- default_exception_el(s));
+ gen_swstep_exception(s, 1, s->is_ldex);
s->base.is_jmp = DISAS_NORETURN;
}
static void shifter_out_im(TCGv_i32 var, int shift)
{
- if (shift == 0) {
- tcg_gen_andi_i32(cpu_CF, var, 1);
- } else {
- tcg_gen_shri_i32(cpu_CF, var, shift);
- if (shift != 31) {
- tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
- }
- }
+ tcg_gen_extract_i32(cpu_CF, var, shift, 1);
}
/* Shift by immediate. Includes special handling for shift == 0. */
* We do however need to set the PC, because the blxns helper reads it.
* The blxns helper may throw an exception.
*/
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
gen_helper_v7m_blxns(cpu_env, var);
tcg_temp_free_i32(var);
s->base.is_jmp = DISAS_EXIT;
* as an undefined insn by runtime configuration (ie before
* the insn really executes).
*/
- gen_set_pc_im(s, s->pc - 4);
+ gen_set_pc_im(s, s->pc_curr);
gen_helper_pre_hvc(cpu_env);
/* Otherwise we will treat this as a real exception which
* happens after execution of the insn. (The distinction matters
* for single stepping.)
*/
s->svc_imm = imm16;
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->base.is_jmp = DISAS_HVC;
}
*/
TCGv_i32 tmp;
- gen_set_pc_im(s, s->pc - 4);
+ gen_set_pc_im(s, s->pc_curr);
tmp = tcg_const_i32(syn_aa32_smc());
gen_helper_pre_smc(cpu_env, tmp);
tcg_temp_free_i32(tmp);
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->base.is_jmp = DISAS_SMC;
}
-static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
+static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
{
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - offset);
+ gen_set_pc_im(s, pc);
gen_exception_internal(excp);
s->base.is_jmp = DISAS_NORETURN;
}
-static void gen_exception_insn(DisasContext *s, int offset, int excp,
+static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
int syn, uint32_t target_el)
{
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - offset);
+ gen_set_pc_im(s, pc);
gen_exception(excp, syn, target_el);
s->base.is_jmp = DISAS_NORETURN;
}
-static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
+static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
{
TCGv_i32 tcg_syn;
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - offset);
+ gen_set_pc_im(s, s->pc_curr);
tcg_syn = tcg_const_i32(syn);
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
tcg_temp_free_i32(tcg_syn);
s->base.is_jmp = DISAS_NORETURN;
}
+void unallocated_encoding(DisasContext *s)
+{
+ /* Unallocated and reserved encodings are uncategorized */
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
+}
+
/* Force a TB lookup after an instruction that changes the CPU state. */
static inline void gen_lookup_tb(DisasContext *s)
{
- tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
+ tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
s->base.is_jmp = DISAS_EXIT;
}
s->current_el != 0 &&
#endif
(imm == (s->thumb ? 0x3c : 0xf000))) {
- gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
+ gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST);
return;
}
- gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
- default_exception_el(s));
+ unallocated_encoding(s);
}
static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
return statusptr;
}
-static inline void gen_vfp_abs(int dp)
-{
- if (dp)
- gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
- else
- gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
-}
-
-static inline void gen_vfp_neg(int dp)
-{
- if (dp)
- gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
- else
- gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
-}
-
-static inline void gen_vfp_cmp(int dp)
-{
- if (dp)
- gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
- else
- gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
-}
-
-static inline void gen_vfp_cmpe(int dp)
-{
- if (dp)
- gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
- else
- gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
-}
-
-static inline void gen_vfp_F1_ld0(int dp)
-{
- if (dp)
- tcg_gen_movi_i64(cpu_F1d, 0);
- else
- tcg_gen_movi_i32(cpu_F1s, 0);
-}
-
-#define VFP_GEN_ITOF(name) \
-static inline void gen_vfp_##name(int dp, int neon) \
-{ \
- TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
- if (dp) { \
- gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
- } else { \
- gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
- } \
- tcg_temp_free_ptr(statusptr); \
-}
-
-VFP_GEN_ITOF(uito)
-VFP_GEN_ITOF(sito)
-#undef VFP_GEN_ITOF
-
-#define VFP_GEN_FTOI(name) \
-static inline void gen_vfp_##name(int dp, int neon) \
-{ \
- TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
- if (dp) { \
- gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
- } else { \
- gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
- } \
- tcg_temp_free_ptr(statusptr); \
-}
-
-VFP_GEN_FTOI(toui)
-VFP_GEN_FTOI(touiz)
-VFP_GEN_FTOI(tosi)
-VFP_GEN_FTOI(tosiz)
-#undef VFP_GEN_FTOI
-
-#define VFP_GEN_FIX(name, round) \
-static inline void gen_vfp_##name(int dp, int shift, int neon) \
-{ \
- TCGv_i32 tmp_shift = tcg_const_i32(shift); \
- TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
- if (dp) { \
- gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
- statusptr); \
- } else { \
- gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
- statusptr); \
- } \
- tcg_temp_free_i32(tmp_shift); \
- tcg_temp_free_ptr(statusptr); \
-}
-VFP_GEN_FIX(tosh, _round_to_zero)
-VFP_GEN_FIX(tosl, _round_to_zero)
-VFP_GEN_FIX(touh, _round_to_zero)
-VFP_GEN_FIX(toul, _round_to_zero)
-VFP_GEN_FIX(shto, )
-VFP_GEN_FIX(slto, )
-VFP_GEN_FIX(uhto, )
-VFP_GEN_FIX(ulto, )
-#undef VFP_GEN_FIX
-
static inline long vfp_reg_offset(bool dp, unsigned reg)
{
if (dp) {
return ret;
}
-#define tcg_gen_ld_f32 tcg_gen_ld_i32
-#define tcg_gen_ld_f64 tcg_gen_ld_i64
-#define tcg_gen_st_f32 tcg_gen_st_i32
-#define tcg_gen_st_f64 tcg_gen_st_i64
-
-static inline void gen_mov_F0_vreg(int dp, int reg)
-{
- if (dp)
- tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
- else
- tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
-}
-
-static inline void gen_mov_F1_vreg(int dp, int reg)
-{
- if (dp)
- tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
- else
- tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
-}
-
-static inline void gen_mov_vreg_F0(int dp, int reg)
-{
- if (dp)
- tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
- else
- tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
-}
-
#define ARM_CP_RW_BIT (1 << 20)
/* Include the VFP decoder */
#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
-/* Move between integer and VFP cores. */
-static TCGv_i32 gen_vfp_mrs(void)
-{
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_mov_i32(tmp, cpu_F0s);
- return tmp;
-}
-
-static void gen_vfp_msr(TCGv_i32 tmp)
-{
- tcg_gen_mov_i32(cpu_F0s, tmp);
- tcg_temp_free_i32(tmp);
-}
-
static void gen_neon_dup_low16(TCGv_i32 var)
{
TCGv_i32 tmp = tcg_temp_new_i32();
*/
static int disas_vfp_insn(DisasContext *s, uint32_t insn)
{
- uint32_t rd, rn, rm, op, delta_d, delta_m, bank_mask;
- int dp, veclen;
- TCGv_i32 tmp;
- TCGv_i32 tmp2;
-
if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
return 1;
}
return 0;
}
}
-
- if (extract32(insn, 28, 4) == 0xf) {
- /*
- * Encodings with T=1 (Thumb) or unconditional (ARM): these
- * were all handled by the decodetree decoder, so any insn
- * patterns which get here must be UNDEF.
- */
- return 1;
- }
-
- /*
- * FIXME: this access check should not take precedence over UNDEF
- * for invalid encodings; we will generate incorrect syndrome information
- * for attempts to execute invalid vfp/neon encodings with FP disabled.
- */
- if (!vfp_access_check(s)) {
- return 0;
- }
-
- dp = ((insn & 0xf00) == 0xb00);
- switch ((insn >> 24) & 0xf) {
- case 0xe:
- if (insn & (1 << 4)) {
- /* already handled by decodetree */
- return 1;
- } else {
- /* data processing */
- bool rd_is_dp = dp;
- bool rm_is_dp = dp;
- bool no_output = false;
-
- /* The opcode is in bits 23, 21, 20 and 6. */
- op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
- rn = VFP_SREG_N(insn);
-
- switch (op) {
- case 0 ... 14:
- /* Already handled by decodetree */
- return 1;
- case 15:
- switch (rn) {
- case 0 ... 3:
- /* Already handled by decodetree */
- return 1;
- default:
- break;
- }
- default:
- break;
- }
-
- if (op == 15) {
- /* rn is opcode, encoded as per VFP_SREG_N. */
- switch (rn) {
- case 0x04: /* vcvtb.f64.f16, vcvtb.f32.f16 */
- case 0x05: /* vcvtt.f64.f16, vcvtt.f32.f16 */
- /*
- * VCVTB, VCVTT: only present with the halfprec extension
- * UNPREDICTABLE if bit 8 is set prior to ARMv8
- * (we choose to UNDEF)
- */
- if (dp) {
- if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
- return 1;
- }
- } else {
- if (!dc_isar_feature(aa32_fp16_spconv, s)) {
- return 1;
- }
- }
- rm_is_dp = false;
- break;
- case 0x06: /* vcvtb.f16.f32, vcvtb.f16.f64 */
- case 0x07: /* vcvtt.f16.f32, vcvtt.f16.f64 */
- if (dp) {
- if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
- return 1;
- }
- } else {
- if (!dc_isar_feature(aa32_fp16_spconv, s)) {
- return 1;
- }
- }
- rd_is_dp = false;
- break;
-
- case 0x08: case 0x0a: /* vcmp, vcmpz */
- case 0x09: case 0x0b: /* vcmpe, vcmpez */
- no_output = true;
- break;
-
- case 0x0c: /* vrintr */
- case 0x0d: /* vrintz */
- case 0x0e: /* vrintx */
- break;
-
- case 0x0f: /* vcvt double<->single */
- rd_is_dp = !dp;
- break;
-
- case 0x10: /* vcvt.fxx.u32 */
- case 0x11: /* vcvt.fxx.s32 */
- rm_is_dp = false;
- break;
- case 0x18: /* vcvtr.u32.fxx */
- case 0x19: /* vcvtz.u32.fxx */
- case 0x1a: /* vcvtr.s32.fxx */
- case 0x1b: /* vcvtz.s32.fxx */
- rd_is_dp = false;
- break;
-
- case 0x14: /* vcvt fp <-> fixed */
- case 0x15:
- case 0x16:
- case 0x17:
- case 0x1c:
- case 0x1d:
- case 0x1e:
- case 0x1f:
- if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
- return 1;
- }
- /* Immediate frac_bits has same format as SREG_M. */
- rm_is_dp = false;
- break;
-
- case 0x13: /* vjcvt */
- if (!dp || !dc_isar_feature(aa32_jscvt, s)) {
- return 1;
- }
- rd_is_dp = false;
- break;
-
- default:
- return 1;
- }
- } else if (dp) {
- /* rn is register number */
- VFP_DREG_N(rn, insn);
- }
-
- if (rd_is_dp) {
- VFP_DREG_D(rd, insn);
- } else {
- rd = VFP_SREG_D(insn);
- }
- if (rm_is_dp) {
- VFP_DREG_M(rm, insn);
- } else {
- rm = VFP_SREG_M(insn);
- }
-
- veclen = s->vec_len;
- if (op == 15 && rn > 3) {
- veclen = 0;
- }
-
- /* Shut up compiler warnings. */
- delta_m = 0;
- delta_d = 0;
- bank_mask = 0;
-
- if (veclen > 0) {
- if (dp)
- bank_mask = 0xc;
- else
- bank_mask = 0x18;
-
- /* Figure out what type of vector operation this is. */
- if ((rd & bank_mask) == 0) {
- /* scalar */
- veclen = 0;
- } else {
- if (dp)
- delta_d = (s->vec_stride >> 1) + 1;
- else
- delta_d = s->vec_stride + 1;
-
- if ((rm & bank_mask) == 0) {
- /* mixed scalar/vector */
- delta_m = 0;
- } else {
- /* vector */
- delta_m = delta_d;
- }
- }
- }
-
- /* Load the initial operands. */
- if (op == 15) {
- switch (rn) {
- case 0x08: case 0x09: /* Compare */
- gen_mov_F0_vreg(dp, rd);
- gen_mov_F1_vreg(dp, rm);
- break;
- case 0x0a: case 0x0b: /* Compare with zero */
- gen_mov_F0_vreg(dp, rd);
- gen_vfp_F1_ld0(dp);
- break;
- case 0x14: /* vcvt fp <-> fixed */
- case 0x15:
- case 0x16:
- case 0x17:
- case 0x1c:
- case 0x1d:
- case 0x1e:
- case 0x1f:
- /* Source and destination the same. */
- gen_mov_F0_vreg(dp, rd);
- break;
- default:
- /* One source operand. */
- gen_mov_F0_vreg(rm_is_dp, rm);
- break;
- }
- } else {
- /* Two source operands. */
- gen_mov_F0_vreg(dp, rn);
- gen_mov_F1_vreg(dp, rm);
- }
-
- for (;;) {
- /* Perform the calculation. */
- switch (op) {
- case 15: /* extension space */
- switch (rn) {
- case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
- {
- TCGv_ptr fpst = get_fpstatus_ptr(false);
- TCGv_i32 ahp_mode = get_ahp_flag();
- tmp = gen_vfp_mrs();
- tcg_gen_ext16u_i32(tmp, tmp);
- if (dp) {
- gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
- fpst, ahp_mode);
- } else {
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
- fpst, ahp_mode);
- }
- tcg_temp_free_i32(ahp_mode);
- tcg_temp_free_ptr(fpst);
- tcg_temp_free_i32(tmp);
- break;
- }
- case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
- {
- TCGv_ptr fpst = get_fpstatus_ptr(false);
- TCGv_i32 ahp = get_ahp_flag();
- tmp = gen_vfp_mrs();
- tcg_gen_shri_i32(tmp, tmp, 16);
- if (dp) {
- gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
- fpst, ahp);
- } else {
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
- fpst, ahp);
- }
- tcg_temp_free_i32(tmp);
- tcg_temp_free_i32(ahp);
- tcg_temp_free_ptr(fpst);
- break;
- }
- case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
- {
- TCGv_ptr fpst = get_fpstatus_ptr(false);
- TCGv_i32 ahp = get_ahp_flag();
- tmp = tcg_temp_new_i32();
-
- if (dp) {
- gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
- fpst, ahp);
- } else {
- gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
- fpst, ahp);
- }
- tcg_temp_free_i32(ahp);
- tcg_temp_free_ptr(fpst);
- gen_mov_F0_vreg(0, rd);
- tmp2 = gen_vfp_mrs();
- tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
- tcg_gen_or_i32(tmp, tmp, tmp2);
- tcg_temp_free_i32(tmp2);
- gen_vfp_msr(tmp);
- break;
- }
- case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
- {
- TCGv_ptr fpst = get_fpstatus_ptr(false);
- TCGv_i32 ahp = get_ahp_flag();
- tmp = tcg_temp_new_i32();
- if (dp) {
- gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
- fpst, ahp);
- } else {
- gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
- fpst, ahp);
- }
- tcg_temp_free_i32(ahp);
- tcg_temp_free_ptr(fpst);
- tcg_gen_shli_i32(tmp, tmp, 16);
- gen_mov_F0_vreg(0, rd);
- tmp2 = gen_vfp_mrs();
- tcg_gen_ext16u_i32(tmp2, tmp2);
- tcg_gen_or_i32(tmp, tmp, tmp2);
- tcg_temp_free_i32(tmp2);
- gen_vfp_msr(tmp);
- break;
- }
- case 8: /* cmp */
- gen_vfp_cmp(dp);
- break;
- case 9: /* cmpe */
- gen_vfp_cmpe(dp);
- break;
- case 10: /* cmpz */
- gen_vfp_cmp(dp);
- break;
- case 11: /* cmpez */
- gen_vfp_F1_ld0(dp);
- gen_vfp_cmpe(dp);
- break;
- case 12: /* vrintr */
- {
- TCGv_ptr fpst = get_fpstatus_ptr(0);
- if (dp) {
- gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
- } else {
- gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
- }
- tcg_temp_free_ptr(fpst);
- break;
- }
- case 13: /* vrintz */
- {
- TCGv_ptr fpst = get_fpstatus_ptr(0);
- TCGv_i32 tcg_rmode;
- tcg_rmode = tcg_const_i32(float_round_to_zero);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
- if (dp) {
- gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
- } else {
- gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
- }
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
- tcg_temp_free_i32(tcg_rmode);
- tcg_temp_free_ptr(fpst);
- break;
- }
- case 14: /* vrintx */
- {
- TCGv_ptr fpst = get_fpstatus_ptr(0);
- if (dp) {
- gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
- } else {
- gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
- }
- tcg_temp_free_ptr(fpst);
- break;
- }
- case 15: /* single<->double conversion */
- if (dp) {
- gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
- } else {
- gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
- }
- break;
- case 16: /* fuito */
- gen_vfp_uito(dp, 0);
- break;
- case 17: /* fsito */
- gen_vfp_sito(dp, 0);
- break;
- case 19: /* vjcvt */
- gen_helper_vjcvt(cpu_F0s, cpu_F0d, cpu_env);
- break;
- case 20: /* fshto */
- gen_vfp_shto(dp, 16 - rm, 0);
- break;
- case 21: /* fslto */
- gen_vfp_slto(dp, 32 - rm, 0);
- break;
- case 22: /* fuhto */
- gen_vfp_uhto(dp, 16 - rm, 0);
- break;
- case 23: /* fulto */
- gen_vfp_ulto(dp, 32 - rm, 0);
- break;
- case 24: /* ftoui */
- gen_vfp_toui(dp, 0);
- break;
- case 25: /* ftouiz */
- gen_vfp_touiz(dp, 0);
- break;
- case 26: /* ftosi */
- gen_vfp_tosi(dp, 0);
- break;
- case 27: /* ftosiz */
- gen_vfp_tosiz(dp, 0);
- break;
- case 28: /* ftosh */
- gen_vfp_tosh(dp, 16 - rm, 0);
- break;
- case 29: /* ftosl */
- gen_vfp_tosl(dp, 32 - rm, 0);
- break;
- case 30: /* ftouh */
- gen_vfp_touh(dp, 16 - rm, 0);
- break;
- case 31: /* ftoul */
- gen_vfp_toul(dp, 32 - rm, 0);
- break;
- default: /* undefined */
- g_assert_not_reached();
- }
- break;
- default: /* undefined */
- return 1;
- }
-
- /* Write back the result, if any. */
- if (!no_output) {
- gen_mov_vreg_F0(rd_is_dp, rd);
- }
-
- /* break out of the loop if we have finished */
- if (veclen == 0) {
- break;
- }
-
- if (op == 15 && delta_m == 0) {
- /* single source one-many */
- while (veclen--) {
- rd = ((rd + delta_d) & (bank_mask - 1))
- | (rd & bank_mask);
- gen_mov_vreg_F0(dp, rd);
- }
- break;
- }
- /* Setup the next operands. */
- veclen--;
- rd = ((rd + delta_d) & (bank_mask - 1))
- | (rd & bank_mask);
-
- if (op == 15) {
- /* One source operand. */
- rm = ((rm + delta_m) & (bank_mask - 1))
- | (rm & bank_mask);
- gen_mov_F0_vreg(dp, rm);
- } else {
- /* Two source operands. */
- rn = ((rn + delta_d) & (bank_mask - 1))
- | (rn & bank_mask);
- gen_mov_F0_vreg(dp, rn);
- if (delta_m) {
- rm = ((rm + delta_m) & (bank_mask - 1))
- | (rm & bank_mask);
- gen_mov_F1_vreg(dp, rm);
- }
- }
- }
- }
- break;
- case 0xc:
- case 0xd:
- /* Already handled by decodetree */
- return 1;
- default:
- /* Should never happen. */
- return 1;
- }
- return 0;
+ /* If the decodetree decoder didn't handle this insn, it must be UNDEF */
+ return 1;
}
static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
{
#ifndef CONFIG_USER_ONLY
return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
- ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+ ((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
undef:
/* If we get here then some access check did not pass */
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_uncategorized(), exc_target);
return false;
}
/* Sync state because msr_banked() can raise exceptions */
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - 4);
+ gen_set_pc_im(s, s->pc_curr);
tcg_reg = load_reg(s, rn);
tcg_tgtmode = tcg_const_i32(tgtmode);
tcg_regno = tcg_const_i32(regno);
/* Sync state because mrs_banked() can raise exceptions */
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - 4);
+ gen_set_pc_im(s, s->pc_curr);
tcg_reg = tcg_temp_new_i32();
tcg_tgtmode = tcg_const_i32(tgtmode);
tcg_regno = tcg_const_i32(regno);
*/
case 1: /* yield */
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->base.is_jmp = DISAS_YIELD;
}
break;
case 3: /* wfi */
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->base.is_jmp = DISAS_WFI;
break;
case 2: /* wfe */
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->base.is_jmp = DISAS_WFE;
}
break;
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
if (s->fp_excp_el) {
- gen_exception_insn(s, 4, EXCP_UDEF,
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
#define NEON_2RM_VCVT_SF 62
#define NEON_2RM_VCVT_UF 63
-static int neon_2rm_is_float_op(int op)
-{
- /* Return true if this neon 2reg-misc op is float-to-float */
- return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
- (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
- op == NEON_2RM_VRINTM ||
- (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
- op >= NEON_2RM_VRECPE_F);
-}
-
static bool neon_2rm_is_v8_op(int op)
{
/* Return true if this neon 2reg-misc op is ARMv8 and up */
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
if (s->fp_excp_el) {
- gen_exception_insn(s, 4, EXCP_UDEF,
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
}
} else if (op >= 14) {
/* VCVT fixed-point. */
+ TCGv_ptr fpst;
+ TCGv_i32 shiftv;
+ VFPGenFixPointFn *fn;
+
if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
return 1;
}
+
+ if (!(op & 1)) {
+ if (u) {
+ fn = gen_helper_vfp_ultos;
+ } else {
+ fn = gen_helper_vfp_sltos;
+ }
+ } else {
+ if (u) {
+ fn = gen_helper_vfp_touls_round_to_zero;
+ } else {
+ fn = gen_helper_vfp_tosls_round_to_zero;
+ }
+ }
+
/* We have already masked out the must-be-1 top bit of imm6,
* hence this 32-shift where the ARM ARM has 64-imm6.
*/
shift = 32 - shift;
+ fpst = get_fpstatus_ptr(1);
+ shiftv = tcg_const_i32(shift);
for (pass = 0; pass < (q ? 4 : 2); pass++) {
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
- if (!(op & 1)) {
- if (u)
- gen_vfp_ulto(0, shift, 1);
- else
- gen_vfp_slto(0, shift, 1);
- } else {
- if (u)
- gen_vfp_toul(0, shift, 1);
- else
- gen_vfp_tosl(0, shift, 1);
- }
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
+ TCGv_i32 tmpf = neon_load_reg(rm, pass);
+ fn(tmpf, tmpf, shiftv, fpst);
+ neon_store_reg(rd, pass, tmpf);
}
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(shiftv);
} else {
return 1;
}
q || (rm & 1)) {
return 1;
}
- tmp = tcg_temp_new_i32();
- tmp2 = tcg_temp_new_i32();
fpst = get_fpstatus_ptr(true);
ahp = get_ahp_flag();
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
- gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
- gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
+ tmp = neon_load_reg(rm, 0);
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
+ tmp2 = neon_load_reg(rm, 1);
+ gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
tcg_gen_shli_i32(tmp2, tmp2, 16);
tcg_gen_or_i32(tmp2, tmp2, tmp);
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
- gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
+ tcg_temp_free_i32(tmp);
+ tmp = neon_load_reg(rm, 2);
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
+ tmp3 = neon_load_reg(rm, 3);
neon_store_reg(rd, 0, tmp2);
- tmp2 = tcg_temp_new_i32();
- gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
- tcg_gen_shli_i32(tmp2, tmp2, 16);
- tcg_gen_or_i32(tmp2, tmp2, tmp);
- neon_store_reg(rd, 1, tmp2);
+ gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
+ tcg_gen_shli_i32(tmp3, tmp3, 16);
+ tcg_gen_or_i32(tmp3, tmp3, tmp);
+ neon_store_reg(rd, 1, tmp3);
tcg_temp_free_i32(tmp);
tcg_temp_free_i32(ahp);
tcg_temp_free_ptr(fpst);
tmp = neon_load_reg(rm, 0);
tmp2 = neon_load_reg(rm, 1);
tcg_gen_ext16u_i32(tmp3, tmp);
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
- tcg_gen_shri_i32(tmp3, tmp, 16);
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
- tcg_temp_free_i32(tmp);
+ gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
+ neon_store_reg(rd, 0, tmp3);
+ tcg_gen_shri_i32(tmp, tmp, 16);
+ gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
+ neon_store_reg(rd, 1, tmp);
+ tmp3 = tcg_temp_new_i32();
tcg_gen_ext16u_i32(tmp3, tmp2);
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
- tcg_gen_shri_i32(tmp3, tmp2, 16);
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
- tcg_temp_free_i32(tmp2);
- tcg_temp_free_i32(tmp3);
+ gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
+ neon_store_reg(rd, 2, tmp3);
+ tcg_gen_shri_i32(tmp2, tmp2, 16);
+ gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
+ neon_store_reg(rd, 3, tmp2);
tcg_temp_free_i32(ahp);
tcg_temp_free_ptr(fpst);
break;
default:
elementwise:
for (pass = 0; pass < (q ? 4 : 2); pass++) {
- if (neon_2rm_is_float_op(op)) {
- tcg_gen_ld_f32(cpu_F0s, cpu_env,
- neon_reg_offset(rm, pass));
- tmp = NULL;
- } else {
- tmp = neon_load_reg(rm, pass);
- }
+ tmp = neon_load_reg(rm, pass);
switch (op) {
case NEON_2RM_VREV32:
switch (size) {
break;
}
case NEON_2RM_VABS_F:
- gen_vfp_abs(0);
+ gen_helper_vfp_abss(tmp, tmp);
break;
case NEON_2RM_VNEG_F:
- gen_vfp_neg(0);
+ gen_helper_vfp_negs(tmp, tmp);
break;
case NEON_2RM_VSWP:
tmp2 = neon_load_reg(rd, pass);
tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
cpu_env);
- gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
+ gen_helper_rints(tmp, tmp, fpstatus);
gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
cpu_env);
tcg_temp_free_ptr(fpstatus);
case NEON_2RM_VRINTX:
{
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
- gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
+ gen_helper_rints_exact(tmp, tmp, fpstatus);
tcg_temp_free_ptr(fpstatus);
break;
}
cpu_env);
if (is_signed) {
- gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
+ gen_helper_vfp_tosls(tmp, tmp,
tcg_shift, fpst);
} else {
- gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
+ gen_helper_vfp_touls(tmp, tmp,
tcg_shift, fpst);
}
case NEON_2RM_VRECPE_F:
{
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
- gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
+ gen_helper_recpe_f32(tmp, tmp, fpstatus);
tcg_temp_free_ptr(fpstatus);
break;
}
case NEON_2RM_VRSQRTE_F:
{
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
- gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
+ gen_helper_rsqrte_f32(tmp, tmp, fpstatus);
tcg_temp_free_ptr(fpstatus);
break;
}
case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
- gen_vfp_sito(0, 1);
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_vfp_sitos(tmp, tmp, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
- gen_vfp_uito(0, 1);
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_vfp_uitos(tmp, tmp, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
- gen_vfp_tosiz(0, 1);
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_vfp_tosizs(tmp, tmp, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
- gen_vfp_touiz(0, 1);
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_vfp_touizs(tmp, tmp, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
default:
/* Reserved op values were caught by the
* neon_2rm_sizes[] check earlier.
*/
abort();
}
- if (neon_2rm_is_float_op(op)) {
- tcg_gen_st_f32(cpu_F0s, cpu_env,
- neon_reg_offset(rd, pass));
- } else {
- neon_store_reg(rd, pass, tmp);
- }
+ neon_store_reg(rd, pass, tmp);
}
break;
}
}
if (s->fp_excp_el) {
- gen_exception_insn(s, 4, EXCP_UDEF,
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
off_rm = vfp_reg_offset(0, rm);
}
if (s->fp_excp_el) {
- gen_exception_insn(s, 4, EXCP_UDEF,
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
}
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - 4);
+ gen_set_pc_im(s, s->pc_curr);
tmpptr = tcg_const_ptr(ri);
tcg_syn = tcg_const_i32(syndrome);
tcg_isread = tcg_const_i32(isread);
if (isread) {
return 1;
}
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->base.is_jmp = DISAS_WFI;
return 0;
default:
* For the UNPREDICTABLE cases we choose to UNDEF.
*/
if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3);
return;
}
}
if (undef) {
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
- default_exception_el(s));
+ unallocated_encoding(s);
return;
}
tmp = tcg_const_i32(mode);
/* get_r13_banked() will raise an exception if called from System mode */
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - 4);
+ gen_set_pc_im(s, s->pc_curr);
gen_helper_get_r13_banked(addr, cpu_env, tmp);
tcg_temp_free_i32(tmp);
switch (amode) {
* UsageFault exception.
*/
if (arm_dc_feature(s, ARM_FEATURE_M)) {
- gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
+ gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
default_exception_el(s));
return;
}
* self-modifying code correctly and also to take
* any pending interrupts immediately.
*/
- gen_goto_tb(s, 0, s->pc & ~1);
+ gen_goto_tb(s, 0, s->base.pc_next);
return;
case 7: /* sb */
if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
* for TCG; MB and end the TB instead.
*/
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
- gen_goto_tb(s, 0, s->pc & ~1);
+ gen_goto_tb(s, 0, s->base.pc_next);
return;
default:
goto illegal_op;
/* branch link and change to thumb (blx <offset>) */
int32_t offset;
- val = (uint32_t)s->pc;
tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, val);
+ tcg_gen_movi_i32(tmp, s->base.pc_next);
store_reg(s, 14, tmp);
/* Sign-extend the 24-bit offset */
offset = (((int32_t)insn) << 8) >> 8;
+ val = read_pc(s);
/* offset * 4 + bit24 * 2 + (thumb bit) */
val += (offset << 2) | ((insn >> 23) & 2) | 1;
- /* pipeline offset */
- val += 4;
/* protected by ARCH(5); above, near the start of uncond block */
gen_bx_im(s, val);
return;
/* branch link/exchange thumb (blx) */
tmp = load_reg(s, rm);
tmp2 = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp2, s->pc);
+ tcg_gen_movi_i32(tmp2, s->base.pc_next);
store_reg(s, 14, tmp2);
gen_bx(s, tmp);
break;
tmp = load_reg(s, rm);
tmp2 = load_reg(s, rn);
if (op1 & 2)
- gen_helper_double_saturate(tmp2, cpu_env, tmp2);
+ gen_helper_add_saturate(tmp2, cpu_env, tmp2, tmp2);
if (op1 & 1)
gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
else
case 1:
/* bkpt */
ARCH(5);
- gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
+ gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm16, false));
break;
case 2:
/* Hypervisor call (v7) */
shift = (insn >> 7) & 0x1f;
if (insn & (1 << 6)) {
/* pkhtb */
- if (shift == 0)
+ if (shift == 0) {
shift = 31;
+ }
tcg_gen_sari_i32(tmp2, tmp2, shift);
- tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
- tcg_gen_ext16u_i32(tmp2, tmp2);
+ tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
} else {
/* pkhbt */
- if (shift)
- tcg_gen_shli_i32(tmp2, tmp2, shift);
- tcg_gen_ext16u_i32(tmp, tmp);
- tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
+ tcg_gen_shli_i32(tmp2, tmp2, shift);
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
}
- tcg_gen_or_i32(tmp, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
} else if ((insn & 0x00200020) == 0x00200000) {
loaded_base = 0;
loaded_var = NULL;
n = 0;
- for(i=0;i<16;i++) {
+ for (i = 0; i < 16; i++) {
if (insn & (1 << i))
n++;
}
}
}
j = 0;
- for(i=0;i<16;i++) {
+ for (i = 0; i < 16; i++) {
if (insn & (1 << i)) {
if (is_load) {
/* load */
} else {
/* store */
if (i == 15) {
- /* special case: r15 = PC + 8 */
- val = (long)s->pc + 4;
tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, val);
+ tcg_gen_movi_i32(tmp, read_pc(s));
} else if (user) {
tmp = tcg_temp_new_i32();
tmp2 = tcg_const_i32(i);
int32_t offset;
/* branch (and link) */
- val = (int32_t)s->pc;
if (insn & (1 << 24)) {
tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, val);
+ tcg_gen_movi_i32(tmp, s->base.pc_next);
store_reg(s, 14, tmp);
}
offset = sextract32(insn << 2, 0, 26);
- val += offset + 4;
- gen_jmp(s, val);
+ gen_jmp(s, read_pc(s) + offset);
}
break;
case 0xc:
break;
case 0xf:
/* swi */
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->svc_imm = extract32(insn, 0, 24);
s->base.is_jmp = DISAS_SWI;
break;
default:
illegal_op:
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
- default_exception_el(s));
+ unallocated_encoding(s);
break;
}
}
}
-static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
+static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
{
- /* Return true if this is a 16 bit instruction. We must be precise
- * about this (matching the decode). We assume that s->pc still
- * points to the first 16 bits of the insn.
+ /*
+ * Return true if this is a 16 bit instruction. We must be precise
+ * about this (matching the decode).
*/
if ((insn >> 11) < 0x1d) {
/* Definitely a 16-bit instruction */
return false;
}
- if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
+ if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
/* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
* is not on the next page; we merge this into a 32-bit
* insn.
*/
bool wback = extract32(insn, 21, 1);
- if (rn == 15) {
- if (insn & (1 << 21)) {
- /* UNPREDICTABLE */
- goto illegal_op;
- }
- addr = tcg_temp_new_i32();
- tcg_gen_movi_i32(addr, s->pc & ~3);
- } else {
- addr = load_reg(s, rn);
+ if (rn == 15 && (insn & (1 << 21))) {
+ /* UNPREDICTABLE */
+ goto illegal_op;
}
+
+ addr = add_reg_for_lit(s, rn, 0);
offset = (insn & 0xff) * 4;
if ((insn & (1 << 23)) == 0) {
offset = -offset;
tcg_temp_free_i32(addr);
} else if ((insn & (7 << 5)) == 0) {
/* Table Branch. */
- if (rn == 15) {
- addr = tcg_temp_new_i32();
- tcg_gen_movi_i32(addr, s->pc);
- } else {
- addr = load_reg(s, rn);
- }
+ addr = load_reg(s, rn);
tmp = load_reg(s, rm);
tcg_gen_add_i32(addr, addr, tmp);
if (insn & (1 << 4)) {
}
tcg_temp_free_i32(addr);
tcg_gen_shli_i32(tmp, tmp, 1);
- tcg_gen_addi_i32(tmp, tmp, s->pc);
+ tcg_gen_addi_i32(tmp, tmp, read_pc(s));
store_reg(s, 15, tmp);
} else {
bool is_lasr = false;
shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
if (insn & (1 << 5)) {
/* pkhtb */
- if (shift == 0)
+ if (shift == 0) {
shift = 31;
+ }
tcg_gen_sari_i32(tmp2, tmp2, shift);
- tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
- tcg_gen_ext16u_i32(tmp2, tmp2);
+ tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
} else {
/* pkhbt */
- if (shift)
- tcg_gen_shli_i32(tmp2, tmp2, shift);
- tcg_gen_ext16u_i32(tmp, tmp);
- tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
+ tcg_gen_shli_i32(tmp2, tmp2, shift);
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
}
- tcg_gen_or_i32(tmp, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
} else {
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
if (op & 1)
- gen_helper_double_saturate(tmp, cpu_env, tmp);
+ gen_helper_add_saturate(tmp, cpu_env, tmp, tmp);
if (op & 2)
gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
else
}
/* All other insns: NOCP */
- gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
default_exception_el(s));
break;
}
if (insn & (1 << 14)) {
/* Branch and link. */
- tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
+ tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
}
- offset += s->pc;
+ offset += read_pc(s);
if (insn & (1 << 12)) {
/* b/bl */
gen_jmp(s, offset);
* and also to take any pending interrupts
* immediately.
*/
- gen_goto_tb(s, 0, s->pc & ~1);
+ gen_goto_tb(s, 0, s->base.pc_next);
break;
case 7: /* sb */
if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
* for TCG; MB and end the TB instead.
*/
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
- gen_goto_tb(s, 0, s->pc & ~1);
+ gen_goto_tb(s, 0, s->base.pc_next);
break;
default:
goto illegal_op;
offset |= (insn & (1 << 11)) << 8;
/* jump to the offset */
- gen_jmp(s, s->pc + offset);
+ gen_jmp(s, read_pc(s) + offset);
}
} else {
/*
store_reg(s, rd, tmp);
} else {
/* Add/sub 12-bit immediate. */
- if (rn == 15) {
- offset = s->pc & ~(uint32_t)3;
- if (insn & (1 << 23))
- offset -= imm;
- else
- offset += imm;
- tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, offset);
- store_reg(s, rd, tmp);
+ if (insn & (1 << 23)) {
+ imm = -imm;
+ }
+ tmp = add_reg_for_lit(s, rn, imm);
+ if (rn == 13 && rd == 13) {
+ /* ADD SP, SP, imm or SUB SP, SP, imm */
+ store_sp_checked(s, tmp);
} else {
- tmp = load_reg(s, rn);
- if (insn & (1 << 23))
- tcg_gen_subi_i32(tmp, tmp, imm);
- else
- tcg_gen_addi_i32(tmp, tmp, imm);
- if (rn == 13 && rd == 13) {
- /* ADD SP, SP, imm or SUB SP, SP, imm */
- store_sp_checked(s, tmp);
- } else {
- store_reg(s, rd, tmp);
- }
+ store_reg(s, rd, tmp);
}
}
}
}
}
memidx = get_mem_index(s);
- if (rn == 15) {
- addr = tcg_temp_new_i32();
- /* PC relative. */
- /* s->pc has already been incremented by 4. */
- imm = s->pc & 0xfffffffc;
- if (insn & (1 << 23))
- imm += insn & 0xfff;
- else
- imm -= insn & 0xfff;
- tcg_gen_movi_i32(addr, imm);
+ imm = insn & 0xfff;
+ if (insn & (1 << 23)) {
+ /* PC relative or Positive offset. */
+ addr = add_reg_for_lit(s, rn, imm);
+ } else if (rn == 15) {
+ /* PC relative with negative offset. */
+ addr = add_reg_for_lit(s, rn, -imm);
} else {
addr = load_reg(s, rn);
- if (insn & (1 << 23)) {
- /* Positive offset. */
- imm = insn & 0xfff;
- tcg_gen_addi_i32(addr, addr, imm);
- } else {
- imm = insn & 0xff;
- switch ((insn >> 8) & 0xf) {
- case 0x0: /* Shifted Register. */
- shift = (insn >> 4) & 0xf;
- if (shift > 3) {
- tcg_temp_free_i32(addr);
- goto illegal_op;
- }
- tmp = load_reg(s, rm);
- if (shift)
- tcg_gen_shli_i32(tmp, tmp, shift);
- tcg_gen_add_i32(addr, addr, tmp);
- tcg_temp_free_i32(tmp);
- break;
- case 0xc: /* Negative offset. */
- tcg_gen_addi_i32(addr, addr, -imm);
- break;
- case 0xe: /* User privilege. */
- tcg_gen_addi_i32(addr, addr, imm);
- memidx = get_a32_user_mem_index(s);
- break;
- case 0x9: /* Post-decrement. */
- imm = -imm;
- /* Fall through. */
- case 0xb: /* Post-increment. */
- postinc = 1;
- writeback = 1;
- break;
- case 0xd: /* Pre-decrement. */
- imm = -imm;
- /* Fall through. */
- case 0xf: /* Pre-increment. */
- writeback = 1;
- break;
- default:
+ imm = insn & 0xff;
+ switch ((insn >> 8) & 0xf) {
+ case 0x0: /* Shifted Register. */
+ shift = (insn >> 4) & 0xf;
+ if (shift > 3) {
tcg_temp_free_i32(addr);
goto illegal_op;
}
+ tmp = load_reg(s, rm);
+ if (shift) {
+ tcg_gen_shli_i32(tmp, tmp, shift);
+ }
+ tcg_gen_add_i32(addr, addr, tmp);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 0xc: /* Negative offset. */
+ tcg_gen_addi_i32(addr, addr, -imm);
+ break;
+ case 0xe: /* User privilege. */
+ tcg_gen_addi_i32(addr, addr, imm);
+ memidx = get_a32_user_mem_index(s);
+ break;
+ case 0x9: /* Post-decrement. */
+ imm = -imm;
+ /* Fall through. */
+ case 0xb: /* Post-increment. */
+ postinc = 1;
+ writeback = 1;
+ break;
+ case 0xd: /* Pre-decrement. */
+ imm = -imm;
+ /* Fall through. */
+ case 0xf: /* Pre-increment. */
+ writeback = 1;
+ break;
+ default:
+ tcg_temp_free_i32(addr);
+ goto illegal_op;
}
}
}
return;
illegal_op:
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
- default_exception_el(s));
+ unallocated_encoding(s);
}
static void disas_thumb_insn(DisasContext *s, uint32_t insn)
if (insn & (1 << 11)) {
rd = (insn >> 8) & 7;
/* load pc-relative. Bit 1 of PC is ignored. */
- val = s->pc + 2 + ((insn & 0xff) * 4);
- val &= ~(uint32_t)2;
- addr = tcg_temp_new_i32();
- tcg_gen_movi_i32(addr, val);
+ addr = add_reg_for_lit(s, 15, (insn & 0xff) * 4);
tmp = tcg_temp_new_i32();
gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
rd | ISSIs16Bit);
/* BLX/BX */
tmp = load_reg(s, rm);
if (link) {
- val = (uint32_t)s->pc | 1;
+ val = (uint32_t)s->base.pc_next | 1;
tmp2 = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp2, val);
store_reg(s, 14, tmp2);
* - Add PC/SP (immediate)
*/
rd = (insn >> 8) & 7;
- if (insn & (1 << 11)) {
- /* SP */
- tmp = load_reg(s, 13);
- } else {
- /* PC. bit 1 is ignored. */
- tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
- }
val = (insn & 0xff) * 4;
- tcg_gen_addi_i32(tmp, tmp, val);
+ tmp = add_reg_for_lit(s, insn & (1 << 11) ? 13 : 15, val);
store_reg(s, rd, tmp);
break;
tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
tcg_temp_free_i32(tmp);
offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
- val = (uint32_t)s->pc + 2;
- val += offset;
- gen_jmp(s, val);
+ gen_jmp(s, read_pc(s) + offset);
break;
case 15: /* IT, nop-hint. */
gen_nop_hint(s, (insn >> 4) & 0xf);
break;
}
- /* If Then. */
+ /*
+ * IT (If-Then)
+ *
+ * Combinations of firstcond and mask which set up an 0b1111
+ * condition are UNPREDICTABLE; we take the CONSTRAINED
+ * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
+ * i.e. both meaning "execute always".
+ */
s->condexec_cond = (insn >> 4) & 0xe;
s->condexec_mask = insn & 0x1f;
/* No actual code generated for this insn, just setup state. */
{
int imm8 = extract32(insn, 0, 8);
ARCH(5);
- gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
+ gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm8, true));
break;
}
if (cond == 0xf) {
/* swi */
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->svc_imm = extract32(insn, 0, 8);
s->base.is_jmp = DISAS_SWI;
break;
arm_skip_unless(s, cond);
/* jump to the offset */
- val = (uint32_t)s->pc + 2;
+ val = read_pc(s);
offset = ((int32_t)insn << 24) >> 24;
val += offset << 1;
gen_jmp(s, val);
tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
tmp2 = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp2, s->pc | 1);
+ tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
store_reg(s, 14, tmp2);
gen_bx(s, tmp);
break;
}
/* unconditional branch */
- val = (uint32_t)s->pc;
+ val = read_pc(s);
offset = ((int32_t)insn << 21) >> 21;
- val += (offset << 1) + 2;
+ val += offset << 1;
gen_jmp(s, val);
break;
tcg_gen_addi_i32(tmp, tmp, offset);
tmp2 = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp2, s->pc | 1);
+ tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
store_reg(s, 14, tmp2);
gen_bx(s, tmp);
} else {
/* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
uint32_t uoffset = ((int32_t)insn << 21) >> 9;
- tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
+ tcg_gen_movi_i32(cpu_R[14], read_pc(s) + uoffset);
}
break;
}
return;
illegal_op:
undef:
- gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
- default_exception_el(s));
+ unallocated_encoding(s);
}
static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
{
- /* Return true if the insn at dc->pc might cross a page boundary.
+ /* Return true if the insn at dc->base.pc_next might cross a page boundary.
* (False positives are OK, false negatives are not.)
* We know this is a Thumb insn, and our caller ensures we are
- * only called if dc->pc is less than 4 bytes from the page
+ * only called if dc->base.pc_next is less than 4 bytes from the page
* boundary, so we cross the page if the first 16 bits indicate
* that this is a 32 bit insn.
*/
- uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
+ uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
- return !thumb_insn_is_16bit(s, insn);
+ return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
}
static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
uint32_t condexec, core_mmu_idx;
dc->isar = &cpu->isar;
- dc->pc = dc->base.pc_first;
dc->condjmp = 0;
dc->aarch64 = 0;
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
dc->is_ldex = false;
- dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
+ }
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
dc->base.max_insns = MIN(dc->base.max_insns, bound);
}
- cpu_F0s = tcg_temp_new_i32();
- cpu_F1s = tcg_temp_new_i32();
- cpu_F0d = tcg_temp_new_i64();
- cpu_F1d = tcg_temp_new_i64();
- cpu_V0 = cpu_F0d;
- cpu_V1 = cpu_F1d;
+ cpu_V0 = tcg_temp_new_i64();
+ cpu_V1 = tcg_temp_new_i64();
/* FIXME: cpu_M0 can probably be the same as cpu_V0. */
cpu_M0 = tcg_temp_new_i64();
}
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- tcg_gen_insn_start(dc->pc,
+ tcg_gen_insn_start(dc->base.pc_next,
(dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
0);
dc->insn_start = tcg_last_op();
if (bp->flags & BP_CPU) {
gen_set_condexec(dc);
- gen_set_pc_im(dc, dc->pc);
+ gen_set_pc_im(dc, dc->base.pc_next);
gen_helper_check_breakpoints(cpu_env);
/* End the TB early; it's likely not going to be executed */
dc->base.is_jmp = DISAS_TOO_MANY;
} else {
- gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
+ gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
/* The address covered by the breakpoint must be
included in [tb->pc, tb->pc + tb->size) in order
to for it to be properly cleared -- thus we
tb->size below does the right thing. */
/* TODO: Advance PC by correct instruction length to
* avoid disassembler error messages */
- dc->pc += 2;
+ dc->base.pc_next += 2;
dc->base.is_jmp = DISAS_NORETURN;
}
{
#ifdef CONFIG_USER_ONLY
/* Intercept jump to the magic kernel page. */
- if (dc->pc >= 0xffff0000) {
+ if (dc->base.pc_next >= 0xffff0000) {
/* We always get here via a jump, so know we are not in a
conditional execution block. */
gen_exception_internal(EXCP_KERNEL_TRAP);
* bits should be zero.
*/
assert(dc->base.num_insns == 1);
- gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
- default_exception_el(dc));
+ gen_swstep_exception(dc, 0, 0);
dc->base.is_jmp = DISAS_NORETURN;
return true;
}
gen_set_label(dc->condlabel);
dc->condjmp = 0;
}
- dc->base.pc_next = dc->pc;
translator_loop_temp_check(&dc->base);
}
return;
}
- insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
+ dc->pc_curr = dc->base.pc_next;
+ insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
dc->insn = insn;
- dc->pc += 4;
+ dc->base.pc_next += 4;
disas_arm_insn(dc, insn);
arm_post_translate_insn(dc);
return;
}
- insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
- is_16bit = thumb_insn_is_16bit(dc, insn);
- dc->pc += 2;
+ dc->pc_curr = dc->base.pc_next;
+ insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
+ is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
+ dc->base.pc_next += 2;
if (!is_16bit) {
- uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
+ uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
insn = insn << 16 | insn2;
- dc->pc += 2;
+ dc->base.pc_next += 2;
}
dc->insn = insn;
if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
uint32_t cond = dc->condexec_cond;
- if (cond != 0x0e) { /* Skip conditional when condition is AL. */
+ /*
+ * Conditionally skip the insn. Note that both 0xe and 0xf mean
+ * "always"; 0xf is not "never".
+ */
+ if (cond < 0x0e) {
arm_skip_unless(dc, cond);
}
}
* but isn't very efficient).
*/
if (dc->base.is_jmp == DISAS_NEXT
- && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
- || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
+ && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
+ || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
&& insn_crosses_page(env, dc)))) {
dc->base.is_jmp = DISAS_TOO_MANY;
}
case DISAS_NEXT:
case DISAS_TOO_MANY:
case DISAS_UPDATE:
- gen_set_pc_im(dc, dc->pc);
+ gen_set_pc_im(dc, dc->base.pc_next);
/* fall through */
default:
/* FIXME: Single stepping a WFI insn will not halt the CPU. */
switch(dc->base.is_jmp) {
case DISAS_NEXT:
case DISAS_TOO_MANY:
- gen_goto_tb(dc, 1, dc->pc);
+ gen_goto_tb(dc, 1, dc->base.pc_next);
break;
case DISAS_JUMP:
gen_goto_ptr();
break;
case DISAS_UPDATE:
- gen_set_pc_im(dc, dc->pc);
+ gen_set_pc_im(dc, dc->base.pc_next);
/* fall through */
default:
/* indicate that the hash table must be used to find the next TB */
gen_set_label(dc->condlabel);
gen_set_condexec(dc);
if (unlikely(is_singlestepping(dc))) {
- gen_set_pc_im(dc, dc->pc);
+ gen_set_pc_im(dc, dc->base.pc_next);
gen_singlestep_exception(dc);
} else {
- gen_goto_tb(dc, 1, dc->pc);
+ gen_goto_tb(dc, 1, dc->base.pc_next);
}
}
-
- /* Functions above can change dc->pc, so re-align db->pc_next */
- dc->base.pc_next = dc->pc;
}
static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
translator_loop(ops, &dc.base, cpu, tb, max_insns);
}
-void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- int i;
-
- if (is_a64(env)) {
- aarch64_cpu_dump_state(cs, f, flags);
- return;
- }
-
- for(i=0;i<16;i++) {
- qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
- if ((i % 4) == 3)
- qemu_fprintf(f, "\n");
- else
- qemu_fprintf(f, " ");
- }
-
- if (arm_feature(env, ARM_FEATURE_M)) {
- uint32_t xpsr = xpsr_read(env);
- const char *mode;
- const char *ns_status = "";
-
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
- ns_status = env->v7m.secure ? "S " : "NS ";
- }
-
- if (xpsr & XPSR_EXCP) {
- mode = "handler";
- } else {
- if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
- mode = "unpriv-thread";
- } else {
- mode = "priv-thread";
- }
- }
-
- qemu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
- xpsr,
- xpsr & XPSR_N ? 'N' : '-',
- xpsr & XPSR_Z ? 'Z' : '-',
- xpsr & XPSR_C ? 'C' : '-',
- xpsr & XPSR_V ? 'V' : '-',
- xpsr & XPSR_T ? 'T' : 'A',
- ns_status,
- mode);
- } else {
- uint32_t psr = cpsr_read(env);
- const char *ns_status = "";
-
- if (arm_feature(env, ARM_FEATURE_EL3) &&
- (psr & CPSR_M) != ARM_CPU_MODE_MON) {
- ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
- }
-
- qemu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
- psr,
- psr & CPSR_N ? 'N' : '-',
- psr & CPSR_Z ? 'Z' : '-',
- psr & CPSR_C ? 'C' : '-',
- psr & CPSR_V ? 'V' : '-',
- psr & CPSR_T ? 'T' : 'A',
- ns_status,
- aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
- }
-
- if (flags & CPU_DUMP_FPU) {
- int numvfpregs = 0;
- if (arm_feature(env, ARM_FEATURE_VFP)) {
- numvfpregs += 16;
- }
- if (arm_feature(env, ARM_FEATURE_VFP3)) {
- numvfpregs += 16;
- }
- for (i = 0; i < numvfpregs; i++) {
- uint64_t v = *aa32_vfp_dreg(env, i);
- qemu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
- i * 2, (uint32_t)v,
- i * 2 + 1, (uint32_t)(v >> 32),
- i, v);
- }
- qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
- }
-}
-
void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
target_ulong *data)
{