#include "tcg-op-gvec.h"
#include "qemu/log.h"
#include "qemu/bitops.h"
-#include "qemu/qemu-print.h"
#include "arm_ldst.h"
#include "hw/semihosting/semihost.h"
TCGv_i64 cpu_exclusive_addr;
TCGv_i64 cpu_exclusive_val;
-/* FIXME: These should be removed. */
-static TCGv_i32 cpu_F0s, cpu_F1s;
-static TCGv_i64 cpu_F0d, cpu_F1d;
-
#include "exec/gen-icount.h"
static const char * const regnames[] =
/* Function prototypes for gen_ functions calling Neon helpers. */
typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
TCGv_i32, TCGv_i32);
+/* Function prototypes for gen_ functions for fix point conversions */
+typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
/* initialize TCG globals. */
void arm_translate_init(void)
#define store_cpu_field(var, name) \
store_cpu_offset(var, offsetof(CPUARMState, name))
+/* The architectural value of PC. */
+static uint32_t read_pc(DisasContext *s)
+{
+ return s->pc_curr + (s->thumb ? 4 : 8);
+}
+
/* Set a variable to the value of a CPU register. */
static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
{
if (reg == 15) {
- uint32_t addr;
- /* normally, since we updated PC, we need only to add one insn */
- if (s->thumb)
- addr = (long)s->pc + 2;
- else
- addr = (long)s->pc + 4;
- tcg_gen_movi_i32(var, addr);
+ tcg_gen_movi_i32(var, read_pc(s));
} else {
tcg_gen_mov_i32(var, cpu_R[reg]);
}
return tmp;
}
+/*
+ * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
+ * This is used for load/store for which use of PC implies (literal),
+ * or ADD that implies ADR.
+ */
+static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ if (reg == 15) {
+ tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
+ } else {
+ tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
+ }
+ return tmp;
+}
+
/* Set a CPU register. The source must be a temporary and will be
marked as dead. */
static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
tcg_temp_free_i32(tcg_excp);
}
-static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
-{
- TCGv_i32 tcg_excp = tcg_const_i32(excp);
- TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
- TCGv_i32 tcg_el = tcg_const_i32(target_el);
-
- gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
- tcg_syn, tcg_el);
-
- tcg_temp_free_i32(tcg_el);
- tcg_temp_free_i32(tcg_syn);
- tcg_temp_free_i32(tcg_excp);
-}
-
static void gen_step_complete_exception(DisasContext *s)
{
/* We just completed step of an insn. Move from Active-not-pending
* of the exception, and our syndrome information is always correct.
*/
gen_ss_advance(s);
- gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
- default_exception_el(s));
+ gen_swstep_exception(s, 1, s->is_ldex);
s->base.is_jmp = DISAS_NORETURN;
}
static void shifter_out_im(TCGv_i32 var, int shift)
{
- if (shift == 0) {
- tcg_gen_andi_i32(cpu_CF, var, 1);
- } else {
- tcg_gen_shri_i32(cpu_CF, var, shift);
- if (shift != 31) {
- tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
- }
- }
+ tcg_gen_extract_i32(cpu_CF, var, shift, 1);
}
/* Shift by immediate. Includes special handling for shift == 0. */
* We do however need to set the PC, because the blxns helper reads it.
* The blxns helper may throw an exception.
*/
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
gen_helper_v7m_blxns(cpu_env, var);
tcg_temp_free_i32(var);
s->base.is_jmp = DISAS_EXIT;
* as an undefined insn by runtime configuration (ie before
* the insn really executes).
*/
- gen_set_pc_im(s, s->pc - 4);
+ gen_set_pc_im(s, s->pc_curr);
gen_helper_pre_hvc(cpu_env);
/* Otherwise we will treat this as a real exception which
* happens after execution of the insn. (The distinction matters
* for single stepping.)
*/
s->svc_imm = imm16;
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->base.is_jmp = DISAS_HVC;
}
*/
TCGv_i32 tmp;
- gen_set_pc_im(s, s->pc - 4);
+ gen_set_pc_im(s, s->pc_curr);
tmp = tcg_const_i32(syn_aa32_smc());
gen_helper_pre_smc(cpu_env, tmp);
tcg_temp_free_i32(tmp);
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->base.is_jmp = DISAS_SMC;
}
-static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
+static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
{
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - offset);
+ gen_set_pc_im(s, pc);
gen_exception_internal(excp);
s->base.is_jmp = DISAS_NORETURN;
}
-static void gen_exception_insn(DisasContext *s, int offset, int excp,
+static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
int syn, uint32_t target_el)
{
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - offset);
+ gen_set_pc_im(s, pc);
gen_exception(excp, syn, target_el);
s->base.is_jmp = DISAS_NORETURN;
}
-static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
+static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
{
TCGv_i32 tcg_syn;
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - offset);
+ gen_set_pc_im(s, s->pc_curr);
tcg_syn = tcg_const_i32(syn);
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
tcg_temp_free_i32(tcg_syn);
s->base.is_jmp = DISAS_NORETURN;
}
+void unallocated_encoding(DisasContext *s)
+{
+ /* Unallocated and reserved encodings are uncategorized */
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
+}
+
/* Force a TB lookup after an instruction that changes the CPU state. */
static inline void gen_lookup_tb(DisasContext *s)
{
- tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
+ tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
s->base.is_jmp = DISAS_EXIT;
}
s->current_el != 0 &&
#endif
(imm == (s->thumb ? 0x3c : 0xf000))) {
- gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
+ gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST);
return;
}
- gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
- default_exception_el(s));
+ unallocated_encoding(s);
}
static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
return statusptr;
}
-#define VFP_GEN_FIX(name, round) \
-static inline void gen_vfp_##name(int dp, int shift, int neon) \
-{ \
- TCGv_i32 tmp_shift = tcg_const_i32(shift); \
- TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
- if (dp) { \
- gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
- statusptr); \
- } else { \
- gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
- statusptr); \
- } \
- tcg_temp_free_i32(tmp_shift); \
- tcg_temp_free_ptr(statusptr); \
-}
-VFP_GEN_FIX(tosl, _round_to_zero)
-VFP_GEN_FIX(toul, _round_to_zero)
-VFP_GEN_FIX(slto, )
-VFP_GEN_FIX(ulto, )
-#undef VFP_GEN_FIX
-
static inline long vfp_reg_offset(bool dp, unsigned reg)
{
if (dp) {
return ret;
}
-#define tcg_gen_ld_f32 tcg_gen_ld_i32
-#define tcg_gen_st_f32 tcg_gen_st_i32
-
#define ARM_CP_RW_BIT (1 << 20)
/* Include the VFP decoder */
{
#ifndef CONFIG_USER_ONLY
return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
- ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+ ((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
undef:
/* If we get here then some access check did not pass */
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_uncategorized(), exc_target);
return false;
}
/* Sync state because msr_banked() can raise exceptions */
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - 4);
+ gen_set_pc_im(s, s->pc_curr);
tcg_reg = load_reg(s, rn);
tcg_tgtmode = tcg_const_i32(tgtmode);
tcg_regno = tcg_const_i32(regno);
/* Sync state because mrs_banked() can raise exceptions */
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - 4);
+ gen_set_pc_im(s, s->pc_curr);
tcg_reg = tcg_temp_new_i32();
tcg_tgtmode = tcg_const_i32(tgtmode);
tcg_regno = tcg_const_i32(regno);
*/
case 1: /* yield */
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->base.is_jmp = DISAS_YIELD;
}
break;
case 3: /* wfi */
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->base.is_jmp = DISAS_WFI;
break;
case 2: /* wfe */
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->base.is_jmp = DISAS_WFE;
}
break;
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
if (s->fp_excp_el) {
- gen_exception_insn(s, 4, EXCP_UDEF,
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
if (s->fp_excp_el) {
- gen_exception_insn(s, 4, EXCP_UDEF,
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
}
} else if (op >= 14) {
/* VCVT fixed-point. */
+ TCGv_ptr fpst;
+ TCGv_i32 shiftv;
+ VFPGenFixPointFn *fn;
+
if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
return 1;
}
+
+ if (!(op & 1)) {
+ if (u) {
+ fn = gen_helper_vfp_ultos;
+ } else {
+ fn = gen_helper_vfp_sltos;
+ }
+ } else {
+ if (u) {
+ fn = gen_helper_vfp_touls_round_to_zero;
+ } else {
+ fn = gen_helper_vfp_tosls_round_to_zero;
+ }
+ }
+
/* We have already masked out the must-be-1 top bit of imm6,
* hence this 32-shift where the ARM ARM has 64-imm6.
*/
shift = 32 - shift;
+ fpst = get_fpstatus_ptr(1);
+ shiftv = tcg_const_i32(shift);
for (pass = 0; pass < (q ? 4 : 2); pass++) {
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
- if (!(op & 1)) {
- if (u)
- gen_vfp_ulto(0, shift, 1);
- else
- gen_vfp_slto(0, shift, 1);
- } else {
- if (u)
- gen_vfp_toul(0, shift, 1);
- else
- gen_vfp_tosl(0, shift, 1);
- }
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
+ TCGv_i32 tmpf = neon_load_reg(rm, pass);
+ fn(tmpf, tmpf, shiftv, fpst);
+ neon_store_reg(rd, pass, tmpf);
}
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(shiftv);
} else {
return 1;
}
q || (rm & 1)) {
return 1;
}
- tmp = tcg_temp_new_i32();
- tmp2 = tcg_temp_new_i32();
fpst = get_fpstatus_ptr(true);
ahp = get_ahp_flag();
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
- gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
- gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
+ tmp = neon_load_reg(rm, 0);
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
+ tmp2 = neon_load_reg(rm, 1);
+ gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
tcg_gen_shli_i32(tmp2, tmp2, 16);
tcg_gen_or_i32(tmp2, tmp2, tmp);
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
- gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
+ tcg_temp_free_i32(tmp);
+ tmp = neon_load_reg(rm, 2);
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
+ tmp3 = neon_load_reg(rm, 3);
neon_store_reg(rd, 0, tmp2);
- tmp2 = tcg_temp_new_i32();
- gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
- tcg_gen_shli_i32(tmp2, tmp2, 16);
- tcg_gen_or_i32(tmp2, tmp2, tmp);
- neon_store_reg(rd, 1, tmp2);
+ gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
+ tcg_gen_shli_i32(tmp3, tmp3, 16);
+ tcg_gen_or_i32(tmp3, tmp3, tmp);
+ neon_store_reg(rd, 1, tmp3);
tcg_temp_free_i32(tmp);
tcg_temp_free_i32(ahp);
tcg_temp_free_ptr(fpst);
tmp = neon_load_reg(rm, 0);
tmp2 = neon_load_reg(rm, 1);
tcg_gen_ext16u_i32(tmp3, tmp);
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
- tcg_gen_shri_i32(tmp3, tmp, 16);
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
- tcg_temp_free_i32(tmp);
+ gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
+ neon_store_reg(rd, 0, tmp3);
+ tcg_gen_shri_i32(tmp, tmp, 16);
+ gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
+ neon_store_reg(rd, 1, tmp);
+ tmp3 = tcg_temp_new_i32();
tcg_gen_ext16u_i32(tmp3, tmp2);
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
- tcg_gen_shri_i32(tmp3, tmp2, 16);
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
- tcg_temp_free_i32(tmp2);
- tcg_temp_free_i32(tmp3);
+ gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
+ neon_store_reg(rd, 2, tmp3);
+ tcg_gen_shri_i32(tmp2, tmp2, 16);
+ gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
+ neon_store_reg(rd, 3, tmp2);
tcg_temp_free_i32(ahp);
tcg_temp_free_ptr(fpst);
break;
}
if (s->fp_excp_el) {
- gen_exception_insn(s, 4, EXCP_UDEF,
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
off_rm = vfp_reg_offset(0, rm);
}
if (s->fp_excp_el) {
- gen_exception_insn(s, 4, EXCP_UDEF,
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
}
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - 4);
+ gen_set_pc_im(s, s->pc_curr);
tmpptr = tcg_const_ptr(ri);
tcg_syn = tcg_const_i32(syndrome);
tcg_isread = tcg_const_i32(isread);
if (isread) {
return 1;
}
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->base.is_jmp = DISAS_WFI;
return 0;
default:
* For the UNPREDICTABLE cases we choose to UNDEF.
*/
if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3);
return;
}
}
if (undef) {
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
- default_exception_el(s));
+ unallocated_encoding(s);
return;
}
tmp = tcg_const_i32(mode);
/* get_r13_banked() will raise an exception if called from System mode */
gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - 4);
+ gen_set_pc_im(s, s->pc_curr);
gen_helper_get_r13_banked(addr, cpu_env, tmp);
tcg_temp_free_i32(tmp);
switch (amode) {
* UsageFault exception.
*/
if (arm_dc_feature(s, ARM_FEATURE_M)) {
- gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
+ gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
default_exception_el(s));
return;
}
* self-modifying code correctly and also to take
* any pending interrupts immediately.
*/
- gen_goto_tb(s, 0, s->pc & ~1);
+ gen_goto_tb(s, 0, s->base.pc_next);
return;
case 7: /* sb */
if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
* for TCG; MB and end the TB instead.
*/
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
- gen_goto_tb(s, 0, s->pc & ~1);
+ gen_goto_tb(s, 0, s->base.pc_next);
return;
default:
goto illegal_op;
/* branch link and change to thumb (blx <offset>) */
int32_t offset;
- val = (uint32_t)s->pc;
tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, val);
+ tcg_gen_movi_i32(tmp, s->base.pc_next);
store_reg(s, 14, tmp);
/* Sign-extend the 24-bit offset */
offset = (((int32_t)insn) << 8) >> 8;
+ val = read_pc(s);
/* offset * 4 + bit24 * 2 + (thumb bit) */
val += (offset << 2) | ((insn >> 23) & 2) | 1;
- /* pipeline offset */
- val += 4;
/* protected by ARCH(5); above, near the start of uncond block */
gen_bx_im(s, val);
return;
/* branch link/exchange thumb (blx) */
tmp = load_reg(s, rm);
tmp2 = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp2, s->pc);
+ tcg_gen_movi_i32(tmp2, s->base.pc_next);
store_reg(s, 14, tmp2);
gen_bx(s, tmp);
break;
tmp = load_reg(s, rm);
tmp2 = load_reg(s, rn);
if (op1 & 2)
- gen_helper_double_saturate(tmp2, cpu_env, tmp2);
+ gen_helper_add_saturate(tmp2, cpu_env, tmp2, tmp2);
if (op1 & 1)
gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
else
case 1:
/* bkpt */
ARCH(5);
- gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
+ gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm16, false));
break;
case 2:
/* Hypervisor call (v7) */
shift = (insn >> 7) & 0x1f;
if (insn & (1 << 6)) {
/* pkhtb */
- if (shift == 0)
+ if (shift == 0) {
shift = 31;
+ }
tcg_gen_sari_i32(tmp2, tmp2, shift);
- tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
- tcg_gen_ext16u_i32(tmp2, tmp2);
+ tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
} else {
/* pkhbt */
- if (shift)
- tcg_gen_shli_i32(tmp2, tmp2, shift);
- tcg_gen_ext16u_i32(tmp, tmp);
- tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
+ tcg_gen_shli_i32(tmp2, tmp2, shift);
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
}
- tcg_gen_or_i32(tmp, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
} else if ((insn & 0x00200020) == 0x00200000) {
loaded_base = 0;
loaded_var = NULL;
n = 0;
- for(i=0;i<16;i++) {
+ for (i = 0; i < 16; i++) {
if (insn & (1 << i))
n++;
}
}
}
j = 0;
- for(i=0;i<16;i++) {
+ for (i = 0; i < 16; i++) {
if (insn & (1 << i)) {
if (is_load) {
/* load */
} else {
/* store */
if (i == 15) {
- /* special case: r15 = PC + 8 */
- val = (long)s->pc + 4;
tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, val);
+ tcg_gen_movi_i32(tmp, read_pc(s));
} else if (user) {
tmp = tcg_temp_new_i32();
tmp2 = tcg_const_i32(i);
int32_t offset;
/* branch (and link) */
- val = (int32_t)s->pc;
if (insn & (1 << 24)) {
tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, val);
+ tcg_gen_movi_i32(tmp, s->base.pc_next);
store_reg(s, 14, tmp);
}
offset = sextract32(insn << 2, 0, 26);
- val += offset + 4;
- gen_jmp(s, val);
+ gen_jmp(s, read_pc(s) + offset);
}
break;
case 0xc:
break;
case 0xf:
/* swi */
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->svc_imm = extract32(insn, 0, 24);
s->base.is_jmp = DISAS_SWI;
break;
default:
illegal_op:
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
- default_exception_el(s));
+ unallocated_encoding(s);
break;
}
}
}
-static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
+static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
{
- /* Return true if this is a 16 bit instruction. We must be precise
- * about this (matching the decode). We assume that s->pc still
- * points to the first 16 bits of the insn.
+ /*
+ * Return true if this is a 16 bit instruction. We must be precise
+ * about this (matching the decode).
*/
if ((insn >> 11) < 0x1d) {
/* Definitely a 16-bit instruction */
return false;
}
- if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
+ if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
/* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
* is not on the next page; we merge this into a 32-bit
* insn.
*/
bool wback = extract32(insn, 21, 1);
- if (rn == 15) {
- if (insn & (1 << 21)) {
- /* UNPREDICTABLE */
- goto illegal_op;
- }
- addr = tcg_temp_new_i32();
- tcg_gen_movi_i32(addr, s->pc & ~3);
- } else {
- addr = load_reg(s, rn);
+ if (rn == 15 && (insn & (1 << 21))) {
+ /* UNPREDICTABLE */
+ goto illegal_op;
}
+
+ addr = add_reg_for_lit(s, rn, 0);
offset = (insn & 0xff) * 4;
if ((insn & (1 << 23)) == 0) {
offset = -offset;
tcg_temp_free_i32(addr);
} else if ((insn & (7 << 5)) == 0) {
/* Table Branch. */
- if (rn == 15) {
- addr = tcg_temp_new_i32();
- tcg_gen_movi_i32(addr, s->pc);
- } else {
- addr = load_reg(s, rn);
- }
+ addr = load_reg(s, rn);
tmp = load_reg(s, rm);
tcg_gen_add_i32(addr, addr, tmp);
if (insn & (1 << 4)) {
}
tcg_temp_free_i32(addr);
tcg_gen_shli_i32(tmp, tmp, 1);
- tcg_gen_addi_i32(tmp, tmp, s->pc);
+ tcg_gen_addi_i32(tmp, tmp, read_pc(s));
store_reg(s, 15, tmp);
} else {
bool is_lasr = false;
shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
if (insn & (1 << 5)) {
/* pkhtb */
- if (shift == 0)
+ if (shift == 0) {
shift = 31;
+ }
tcg_gen_sari_i32(tmp2, tmp2, shift);
- tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
- tcg_gen_ext16u_i32(tmp2, tmp2);
+ tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
} else {
/* pkhbt */
- if (shift)
- tcg_gen_shli_i32(tmp2, tmp2, shift);
- tcg_gen_ext16u_i32(tmp, tmp);
- tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
+ tcg_gen_shli_i32(tmp2, tmp2, shift);
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
}
- tcg_gen_or_i32(tmp, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
} else {
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
if (op & 1)
- gen_helper_double_saturate(tmp, cpu_env, tmp);
+ gen_helper_add_saturate(tmp, cpu_env, tmp, tmp);
if (op & 2)
gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
else
}
/* All other insns: NOCP */
- gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
default_exception_el(s));
break;
}
if (insn & (1 << 14)) {
/* Branch and link. */
- tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
+ tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
}
- offset += s->pc;
+ offset += read_pc(s);
if (insn & (1 << 12)) {
/* b/bl */
gen_jmp(s, offset);
* and also to take any pending interrupts
* immediately.
*/
- gen_goto_tb(s, 0, s->pc & ~1);
+ gen_goto_tb(s, 0, s->base.pc_next);
break;
case 7: /* sb */
if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
* for TCG; MB and end the TB instead.
*/
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
- gen_goto_tb(s, 0, s->pc & ~1);
+ gen_goto_tb(s, 0, s->base.pc_next);
break;
default:
goto illegal_op;
offset |= (insn & (1 << 11)) << 8;
/* jump to the offset */
- gen_jmp(s, s->pc + offset);
+ gen_jmp(s, read_pc(s) + offset);
}
} else {
/*
store_reg(s, rd, tmp);
} else {
/* Add/sub 12-bit immediate. */
- if (rn == 15) {
- offset = s->pc & ~(uint32_t)3;
- if (insn & (1 << 23))
- offset -= imm;
- else
- offset += imm;
- tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, offset);
- store_reg(s, rd, tmp);
+ if (insn & (1 << 23)) {
+ imm = -imm;
+ }
+ tmp = add_reg_for_lit(s, rn, imm);
+ if (rn == 13 && rd == 13) {
+ /* ADD SP, SP, imm or SUB SP, SP, imm */
+ store_sp_checked(s, tmp);
} else {
- tmp = load_reg(s, rn);
- if (insn & (1 << 23))
- tcg_gen_subi_i32(tmp, tmp, imm);
- else
- tcg_gen_addi_i32(tmp, tmp, imm);
- if (rn == 13 && rd == 13) {
- /* ADD SP, SP, imm or SUB SP, SP, imm */
- store_sp_checked(s, tmp);
- } else {
- store_reg(s, rd, tmp);
- }
+ store_reg(s, rd, tmp);
}
}
}
}
}
memidx = get_mem_index(s);
- if (rn == 15) {
- addr = tcg_temp_new_i32();
- /* PC relative. */
- /* s->pc has already been incremented by 4. */
- imm = s->pc & 0xfffffffc;
- if (insn & (1 << 23))
- imm += insn & 0xfff;
- else
- imm -= insn & 0xfff;
- tcg_gen_movi_i32(addr, imm);
+ imm = insn & 0xfff;
+ if (insn & (1 << 23)) {
+ /* PC relative or Positive offset. */
+ addr = add_reg_for_lit(s, rn, imm);
+ } else if (rn == 15) {
+ /* PC relative with negative offset. */
+ addr = add_reg_for_lit(s, rn, -imm);
} else {
addr = load_reg(s, rn);
- if (insn & (1 << 23)) {
- /* Positive offset. */
- imm = insn & 0xfff;
- tcg_gen_addi_i32(addr, addr, imm);
- } else {
- imm = insn & 0xff;
- switch ((insn >> 8) & 0xf) {
- case 0x0: /* Shifted Register. */
- shift = (insn >> 4) & 0xf;
- if (shift > 3) {
- tcg_temp_free_i32(addr);
- goto illegal_op;
- }
- tmp = load_reg(s, rm);
- if (shift)
- tcg_gen_shli_i32(tmp, tmp, shift);
- tcg_gen_add_i32(addr, addr, tmp);
- tcg_temp_free_i32(tmp);
- break;
- case 0xc: /* Negative offset. */
- tcg_gen_addi_i32(addr, addr, -imm);
- break;
- case 0xe: /* User privilege. */
- tcg_gen_addi_i32(addr, addr, imm);
- memidx = get_a32_user_mem_index(s);
- break;
- case 0x9: /* Post-decrement. */
- imm = -imm;
- /* Fall through. */
- case 0xb: /* Post-increment. */
- postinc = 1;
- writeback = 1;
- break;
- case 0xd: /* Pre-decrement. */
- imm = -imm;
- /* Fall through. */
- case 0xf: /* Pre-increment. */
- writeback = 1;
- break;
- default:
+ imm = insn & 0xff;
+ switch ((insn >> 8) & 0xf) {
+ case 0x0: /* Shifted Register. */
+ shift = (insn >> 4) & 0xf;
+ if (shift > 3) {
tcg_temp_free_i32(addr);
goto illegal_op;
}
+ tmp = load_reg(s, rm);
+ if (shift) {
+ tcg_gen_shli_i32(tmp, tmp, shift);
+ }
+ tcg_gen_add_i32(addr, addr, tmp);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 0xc: /* Negative offset. */
+ tcg_gen_addi_i32(addr, addr, -imm);
+ break;
+ case 0xe: /* User privilege. */
+ tcg_gen_addi_i32(addr, addr, imm);
+ memidx = get_a32_user_mem_index(s);
+ break;
+ case 0x9: /* Post-decrement. */
+ imm = -imm;
+ /* Fall through. */
+ case 0xb: /* Post-increment. */
+ postinc = 1;
+ writeback = 1;
+ break;
+ case 0xd: /* Pre-decrement. */
+ imm = -imm;
+ /* Fall through. */
+ case 0xf: /* Pre-increment. */
+ writeback = 1;
+ break;
+ default:
+ tcg_temp_free_i32(addr);
+ goto illegal_op;
}
}
}
return;
illegal_op:
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
- default_exception_el(s));
+ unallocated_encoding(s);
}
static void disas_thumb_insn(DisasContext *s, uint32_t insn)
if (insn & (1 << 11)) {
rd = (insn >> 8) & 7;
/* load pc-relative. Bit 1 of PC is ignored. */
- val = s->pc + 2 + ((insn & 0xff) * 4);
- val &= ~(uint32_t)2;
- addr = tcg_temp_new_i32();
- tcg_gen_movi_i32(addr, val);
+ addr = add_reg_for_lit(s, 15, (insn & 0xff) * 4);
tmp = tcg_temp_new_i32();
gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
rd | ISSIs16Bit);
/* BLX/BX */
tmp = load_reg(s, rm);
if (link) {
- val = (uint32_t)s->pc | 1;
+ val = (uint32_t)s->base.pc_next | 1;
tmp2 = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp2, val);
store_reg(s, 14, tmp2);
* - Add PC/SP (immediate)
*/
rd = (insn >> 8) & 7;
- if (insn & (1 << 11)) {
- /* SP */
- tmp = load_reg(s, 13);
- } else {
- /* PC. bit 1 is ignored. */
- tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
- }
val = (insn & 0xff) * 4;
- tcg_gen_addi_i32(tmp, tmp, val);
+ tmp = add_reg_for_lit(s, insn & (1 << 11) ? 13 : 15, val);
store_reg(s, rd, tmp);
break;
tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
tcg_temp_free_i32(tmp);
offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
- val = (uint32_t)s->pc + 2;
- val += offset;
- gen_jmp(s, val);
+ gen_jmp(s, read_pc(s) + offset);
break;
case 15: /* IT, nop-hint. */
gen_nop_hint(s, (insn >> 4) & 0xf);
break;
}
- /* If Then. */
+ /*
+ * IT (If-Then)
+ *
+ * Combinations of firstcond and mask which set up an 0b1111
+ * condition are UNPREDICTABLE; we take the CONSTRAINED
+ * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
+ * i.e. both meaning "execute always".
+ */
s->condexec_cond = (insn >> 4) & 0xe;
s->condexec_mask = insn & 0x1f;
/* No actual code generated for this insn, just setup state. */
{
int imm8 = extract32(insn, 0, 8);
ARCH(5);
- gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
+ gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm8, true));
break;
}
if (cond == 0xf) {
/* swi */
- gen_set_pc_im(s, s->pc);
+ gen_set_pc_im(s, s->base.pc_next);
s->svc_imm = extract32(insn, 0, 8);
s->base.is_jmp = DISAS_SWI;
break;
arm_skip_unless(s, cond);
/* jump to the offset */
- val = (uint32_t)s->pc + 2;
+ val = read_pc(s);
offset = ((int32_t)insn << 24) >> 24;
val += offset << 1;
gen_jmp(s, val);
tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
tmp2 = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp2, s->pc | 1);
+ tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
store_reg(s, 14, tmp2);
gen_bx(s, tmp);
break;
}
/* unconditional branch */
- val = (uint32_t)s->pc;
+ val = read_pc(s);
offset = ((int32_t)insn << 21) >> 21;
- val += (offset << 1) + 2;
+ val += offset << 1;
gen_jmp(s, val);
break;
tcg_gen_addi_i32(tmp, tmp, offset);
tmp2 = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp2, s->pc | 1);
+ tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
store_reg(s, 14, tmp2);
gen_bx(s, tmp);
} else {
/* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
uint32_t uoffset = ((int32_t)insn << 21) >> 9;
- tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
+ tcg_gen_movi_i32(cpu_R[14], read_pc(s) + uoffset);
}
break;
}
return;
illegal_op:
undef:
- gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
- default_exception_el(s));
+ unallocated_encoding(s);
}
static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
{
- /* Return true if the insn at dc->pc might cross a page boundary.
+ /* Return true if the insn at dc->base.pc_next might cross a page boundary.
* (False positives are OK, false negatives are not.)
* We know this is a Thumb insn, and our caller ensures we are
- * only called if dc->pc is less than 4 bytes from the page
+ * only called if dc->base.pc_next is less than 4 bytes from the page
* boundary, so we cross the page if the first 16 bits indicate
* that this is a 32 bit insn.
*/
- uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
+ uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
- return !thumb_insn_is_16bit(s, insn);
+ return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
}
static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
uint32_t condexec, core_mmu_idx;
dc->isar = &cpu->isar;
- dc->pc = dc->base.pc_first;
dc->condjmp = 0;
dc->aarch64 = 0;
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
dc->is_ldex = false;
- dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
+ }
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
dc->base.max_insns = MIN(dc->base.max_insns, bound);
}
- cpu_F0s = tcg_temp_new_i32();
- cpu_F1s = tcg_temp_new_i32();
- cpu_F0d = tcg_temp_new_i64();
- cpu_F1d = tcg_temp_new_i64();
- cpu_V0 = cpu_F0d;
- cpu_V1 = cpu_F1d;
+ cpu_V0 = tcg_temp_new_i64();
+ cpu_V1 = tcg_temp_new_i64();
/* FIXME: cpu_M0 can probably be the same as cpu_V0. */
cpu_M0 = tcg_temp_new_i64();
}
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- tcg_gen_insn_start(dc->pc,
+ tcg_gen_insn_start(dc->base.pc_next,
(dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
0);
dc->insn_start = tcg_last_op();
if (bp->flags & BP_CPU) {
gen_set_condexec(dc);
- gen_set_pc_im(dc, dc->pc);
+ gen_set_pc_im(dc, dc->base.pc_next);
gen_helper_check_breakpoints(cpu_env);
/* End the TB early; it's likely not going to be executed */
dc->base.is_jmp = DISAS_TOO_MANY;
} else {
- gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
+ gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
/* The address covered by the breakpoint must be
included in [tb->pc, tb->pc + tb->size) in order
to for it to be properly cleared -- thus we
tb->size below does the right thing. */
/* TODO: Advance PC by correct instruction length to
* avoid disassembler error messages */
- dc->pc += 2;
+ dc->base.pc_next += 2;
dc->base.is_jmp = DISAS_NORETURN;
}
{
#ifdef CONFIG_USER_ONLY
/* Intercept jump to the magic kernel page. */
- if (dc->pc >= 0xffff0000) {
+ if (dc->base.pc_next >= 0xffff0000) {
/* We always get here via a jump, so know we are not in a
conditional execution block. */
gen_exception_internal(EXCP_KERNEL_TRAP);
* bits should be zero.
*/
assert(dc->base.num_insns == 1);
- gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
- default_exception_el(dc));
+ gen_swstep_exception(dc, 0, 0);
dc->base.is_jmp = DISAS_NORETURN;
return true;
}
gen_set_label(dc->condlabel);
dc->condjmp = 0;
}
- dc->base.pc_next = dc->pc;
translator_loop_temp_check(&dc->base);
}
return;
}
- insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
+ dc->pc_curr = dc->base.pc_next;
+ insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
dc->insn = insn;
- dc->pc += 4;
+ dc->base.pc_next += 4;
disas_arm_insn(dc, insn);
arm_post_translate_insn(dc);
return;
}
- insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
- is_16bit = thumb_insn_is_16bit(dc, insn);
- dc->pc += 2;
+ dc->pc_curr = dc->base.pc_next;
+ insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
+ is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
+ dc->base.pc_next += 2;
if (!is_16bit) {
- uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
+ uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
insn = insn << 16 | insn2;
- dc->pc += 2;
+ dc->base.pc_next += 2;
}
dc->insn = insn;
if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
uint32_t cond = dc->condexec_cond;
- if (cond != 0x0e) { /* Skip conditional when condition is AL. */
+ /*
+ * Conditionally skip the insn. Note that both 0xe and 0xf mean
+ * "always"; 0xf is not "never".
+ */
+ if (cond < 0x0e) {
arm_skip_unless(dc, cond);
}
}
* but isn't very efficient).
*/
if (dc->base.is_jmp == DISAS_NEXT
- && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
- || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
+ && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
+ || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
&& insn_crosses_page(env, dc)))) {
dc->base.is_jmp = DISAS_TOO_MANY;
}
case DISAS_NEXT:
case DISAS_TOO_MANY:
case DISAS_UPDATE:
- gen_set_pc_im(dc, dc->pc);
+ gen_set_pc_im(dc, dc->base.pc_next);
/* fall through */
default:
/* FIXME: Single stepping a WFI insn will not halt the CPU. */
switch(dc->base.is_jmp) {
case DISAS_NEXT:
case DISAS_TOO_MANY:
- gen_goto_tb(dc, 1, dc->pc);
+ gen_goto_tb(dc, 1, dc->base.pc_next);
break;
case DISAS_JUMP:
gen_goto_ptr();
break;
case DISAS_UPDATE:
- gen_set_pc_im(dc, dc->pc);
+ gen_set_pc_im(dc, dc->base.pc_next);
/* fall through */
default:
/* indicate that the hash table must be used to find the next TB */
gen_set_label(dc->condlabel);
gen_set_condexec(dc);
if (unlikely(is_singlestepping(dc))) {
- gen_set_pc_im(dc, dc->pc);
+ gen_set_pc_im(dc, dc->base.pc_next);
gen_singlestep_exception(dc);
} else {
- gen_goto_tb(dc, 1, dc->pc);
+ gen_goto_tb(dc, 1, dc->base.pc_next);
}
}
-
- /* Functions above can change dc->pc, so re-align db->pc_next */
- dc->base.pc_next = dc->pc;
}
static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
translator_loop(ops, &dc.base, cpu, tb, max_insns);
}
-void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- int i;
-
- if (is_a64(env)) {
- aarch64_cpu_dump_state(cs, f, flags);
- return;
- }
-
- for(i=0;i<16;i++) {
- qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
- if ((i % 4) == 3)
- qemu_fprintf(f, "\n");
- else
- qemu_fprintf(f, " ");
- }
-
- if (arm_feature(env, ARM_FEATURE_M)) {
- uint32_t xpsr = xpsr_read(env);
- const char *mode;
- const char *ns_status = "";
-
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
- ns_status = env->v7m.secure ? "S " : "NS ";
- }
-
- if (xpsr & XPSR_EXCP) {
- mode = "handler";
- } else {
- if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
- mode = "unpriv-thread";
- } else {
- mode = "priv-thread";
- }
- }
-
- qemu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
- xpsr,
- xpsr & XPSR_N ? 'N' : '-',
- xpsr & XPSR_Z ? 'Z' : '-',
- xpsr & XPSR_C ? 'C' : '-',
- xpsr & XPSR_V ? 'V' : '-',
- xpsr & XPSR_T ? 'T' : 'A',
- ns_status,
- mode);
- } else {
- uint32_t psr = cpsr_read(env);
- const char *ns_status = "";
-
- if (arm_feature(env, ARM_FEATURE_EL3) &&
- (psr & CPSR_M) != ARM_CPU_MODE_MON) {
- ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
- }
-
- qemu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
- psr,
- psr & CPSR_N ? 'N' : '-',
- psr & CPSR_Z ? 'Z' : '-',
- psr & CPSR_C ? 'C' : '-',
- psr & CPSR_V ? 'V' : '-',
- psr & CPSR_T ? 'T' : 'A',
- ns_status,
- aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
- }
-
- if (flags & CPU_DUMP_FPU) {
- int numvfpregs = 0;
- if (arm_feature(env, ARM_FEATURE_VFP)) {
- numvfpregs += 16;
- }
- if (arm_feature(env, ARM_FEATURE_VFP3)) {
- numvfpregs += 16;
- }
- for (i = 0; i < numvfpregs; i++) {
- uint64_t v = *aa32_vfp_dreg(env, i);
- qemu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
- i * 2, (uint32_t)v,
- i * 2 + 1, (uint32_t)(v >> 32),
- i, v);
- }
- qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
- }
-}
-
void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
target_ulong *data)
{