#include <inttypes.h>
#include "cpu.h"
-#include "disas.h"
+#include "disas/disas.h"
#include "tcg-op.h"
-#include "qemu-log.h"
+#include "qemu/log.h"
#include "helper.h"
#define GEN_HELPER 1
int condjmp;
/* The label that will be jumped to when the instruction is skipped. */
int condlabel;
- /* Thumb-2 condtional execution bits. */
+ /* Thumb-2 conditional execution bits. */
int condexec_mask;
int condexec_cond;
struct TranslationBlock *tb;
#endif
/* These instructions trap after executing, so defer them until after the
- conditional executions state has been updated. */
+ conditional execution state has been updated. */
#define DISAS_WFI 4
#define DISAS_SWI 5
/* We reuse the same 64-bit temporaries for efficiency. */
static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
static TCGv_i32 cpu_R[16];
+static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
static TCGv_i32 cpu_exclusive_addr;
static TCGv_i32 cpu_exclusive_val;
static TCGv_i32 cpu_exclusive_high;
static TCGv cpu_F0s, cpu_F1s;
static TCGv_i64 cpu_F0d, cpu_F1d;
-#include "gen-icount.h"
+#include "exec/gen-icount.h"
static const char *regnames[] =
{ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
offsetof(CPUARMState, regs[i]),
regnames[i]);
}
+ cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
+ cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
+ cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
+ cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
+
cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
{
if (reg == 15) {
uint32_t addr;
- /* normaly, since we updated PC, we need only to add one insn */
+ /* normally, since we updated PC, we need only to add one insn */
if (s->thumb)
addr = (long)s->pc + 2;
else
static inline void gen_set_cpsr(TCGv var, uint32_t mask)
{
TCGv tmp_mask = tcg_const_i32(mask);
- gen_helper_cpsr_write(var, tmp_mask);
+ gen_helper_cpsr_write(cpu_env, var, tmp_mask);
tcg_temp_free_i32(tmp_mask);
}
/* Set NZCV flags from the high 4 bits of var. */
{
TCGv tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, excp);
- gen_helper_exception(tmp);
+ gen_helper_exception(cpu_env, tmp);
tcg_temp_free_i32(tmp);
}
}
}
-/* Bitfield insertion. Insert val into base. Clobbers base and val. */
-static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
-{
- tcg_gen_andi_i32(val, val, mask);
- tcg_gen_shli_i32(val, val, shift);
- tcg_gen_andi_i32(base, base, ~(mask << shift));
- tcg_gen_or_i32(dest, base, val);
-}
-
/* Return (b << 32) + a. Mark inputs as dead */
static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
{
tcg_temp_free_i32(t1);
}
-#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, CF))
-
/* Set CF to the top bit of var. */
static void gen_set_CF_bit31(TCGv var)
{
- TCGv tmp = tcg_temp_new_i32();
- tcg_gen_shri_i32(tmp, var, 31);
- gen_set_CF(tmp);
- tcg_temp_free_i32(tmp);
+ tcg_gen_shri_i32(cpu_CF, var, 31);
}
/* Set N and Z flags from var. */
static inline void gen_logic_CC(TCGv var)
{
- tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, NF));
- tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, ZF));
+ tcg_gen_mov_i32(cpu_NF, var);
+ tcg_gen_mov_i32(cpu_ZF, var);
}
/* T0 += T1 + CF. */
static void gen_adc(TCGv t0, TCGv t1)
{
- TCGv tmp;
tcg_gen_add_i32(t0, t0, t1);
- tmp = load_cpu_field(CF);
- tcg_gen_add_i32(t0, t0, tmp);
- tcg_temp_free_i32(tmp);
+ tcg_gen_add_i32(t0, t0, cpu_CF);
}
/* dest = T0 + T1 + CF. */
static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
{
- TCGv tmp;
tcg_gen_add_i32(dest, t0, t1);
- tmp = load_cpu_field(CF);
- tcg_gen_add_i32(dest, dest, tmp);
- tcg_temp_free_i32(tmp);
+ tcg_gen_add_i32(dest, dest, cpu_CF);
}
/* dest = T0 - T1 + CF - 1. */
static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
{
- TCGv tmp;
tcg_gen_sub_i32(dest, t0, t1);
- tmp = load_cpu_field(CF);
- tcg_gen_add_i32(dest, dest, tmp);
+ tcg_gen_add_i32(dest, dest, cpu_CF);
tcg_gen_subi_i32(dest, dest, 1);
+}
+
+/* dest = T0 + T1. Compute C, N, V and Z flags */
+static void gen_add_CC(TCGv dest, TCGv t0, TCGv t1)
+{
+ TCGv tmp;
+ tcg_gen_add_i32(cpu_NF, t0, t1);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_setcond_i32(TCG_COND_LTU, cpu_CF, cpu_NF, t0);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, t0, t1);
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
tcg_temp_free_i32(tmp);
+ tcg_gen_mov_i32(dest, cpu_NF);
}
-/* FIXME: Implement this natively. */
-#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
+/* dest = T0 - T1. Compute C, N, V and Z flags */
+static void gen_sub_CC(TCGv dest, TCGv t0, TCGv t1)
+{
+ TCGv tmp;
+ tcg_gen_sub_i32(cpu_NF, t0, t1);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, t0, t1);
+ tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
+ tcg_temp_free_i32(tmp);
+ tcg_gen_mov_i32(dest, cpu_NF);
+}
-static void shifter_out_im(TCGv var, int shift)
+#define GEN_SHIFT(name) \
+static void gen_##name(TCGv dest, TCGv t0, TCGv t1) \
+{ \
+ TCGv tmp1, tmp2, tmp3; \
+ tmp1 = tcg_temp_new_i32(); \
+ tcg_gen_andi_i32(tmp1, t1, 0xff); \
+ tmp2 = tcg_const_i32(0); \
+ tmp3 = tcg_const_i32(0x1f); \
+ tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
+ tcg_temp_free_i32(tmp3); \
+ tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
+ tcg_gen_##name##_i32(dest, tmp2, tmp1); \
+ tcg_temp_free_i32(tmp2); \
+ tcg_temp_free_i32(tmp1); \
+}
+GEN_SHIFT(shl)
+GEN_SHIFT(shr)
+#undef GEN_SHIFT
+
+static void gen_sar(TCGv dest, TCGv t0, TCGv t1)
+{
+ TCGv tmp1, tmp2;
+ tmp1 = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp1, t1, 0xff);
+ tmp2 = tcg_const_i32(0x1f);
+ tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
+ tcg_temp_free_i32(tmp2);
+ tcg_gen_sar_i32(dest, t0, tmp1);
+ tcg_temp_free_i32(tmp1);
+}
+
+static void tcg_gen_abs_i32(TCGv dest, TCGv src)
{
+ TCGv c0 = tcg_const_i32(0);
TCGv tmp = tcg_temp_new_i32();
+ tcg_gen_neg_i32(tmp, src);
+ tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
+ tcg_temp_free_i32(c0);
+ tcg_temp_free_i32(tmp);
+}
+
+static void shifter_out_im(TCGv var, int shift)
+{
if (shift == 0) {
- tcg_gen_andi_i32(tmp, var, 1);
+ tcg_gen_andi_i32(cpu_CF, var, 1);
} else {
- tcg_gen_shri_i32(tmp, var, shift);
- if (shift != 31)
- tcg_gen_andi_i32(tmp, tmp, 1);
+ tcg_gen_shri_i32(cpu_CF, var, shift);
+ if (shift != 31) {
+ tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
+ }
}
- gen_set_CF(tmp);
- tcg_temp_free_i32(tmp);
}
/* Shift by immediate. Includes special handling for shift == 0. */
case 1: /* LSR */
if (shift == 0) {
if (flags) {
- tcg_gen_shri_i32(var, var, 31);
- gen_set_CF(var);
+ tcg_gen_shri_i32(cpu_CF, var, 31);
}
tcg_gen_movi_i32(var, 0);
} else {
shifter_out_im(var, shift - 1);
tcg_gen_rotri_i32(var, var, shift); break;
} else {
- TCGv tmp = load_cpu_field(CF);
+ TCGv tmp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(tmp, cpu_CF, 31);
if (flags)
shifter_out_im(var, 0);
tcg_gen_shri_i32(var, var, 1);
- tcg_gen_shli_i32(tmp, tmp, 31);
tcg_gen_or_i32(var, var, tmp);
tcg_temp_free_i32(tmp);
}
{
if (flags) {
switch (shiftop) {
- case 0: gen_helper_shl_cc(var, var, shift); break;
- case 1: gen_helper_shr_cc(var, var, shift); break;
- case 2: gen_helper_sar_cc(var, var, shift); break;
- case 3: gen_helper_ror_cc(var, var, shift); break;
+ case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
+ case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
+ case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
+ case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
}
} else {
switch (shiftop) {
- case 0: gen_helper_shl(var, var, shift); break;
- case 1: gen_helper_shr(var, var, shift); break;
- case 2: gen_helper_sar(var, var, shift); break;
+ case 0:
+ gen_shl(var, var, shift);
+ break;
+ case 1:
+ gen_shr(var, var, shift);
+ break;
+ case 2:
+ gen_sar(var, var, shift);
+ break;
case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
tcg_gen_rotr_i32(var, var, shift); break;
}
static void gen_test_cc(int cc, int label)
{
TCGv tmp;
- TCGv tmp2;
int inv;
switch (cc) {
case 0: /* eq: Z */
- tmp = load_cpu_field(ZF);
- tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
break;
case 1: /* ne: !Z */
- tmp = load_cpu_field(ZF);
- tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
break;
case 2: /* cs: C */
- tmp = load_cpu_field(CF);
- tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
break;
case 3: /* cc: !C */
- tmp = load_cpu_field(CF);
- tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
break;
case 4: /* mi: N */
- tmp = load_cpu_field(NF);
- tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
+ tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
break;
case 5: /* pl: !N */
- tmp = load_cpu_field(NF);
- tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
+ tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
break;
case 6: /* vs: V */
- tmp = load_cpu_field(VF);
- tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
+ tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
break;
case 7: /* vc: !V */
- tmp = load_cpu_field(VF);
- tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
+ tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
break;
case 8: /* hi: C && !Z */
inv = gen_new_label();
- tmp = load_cpu_field(CF);
- tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
- tcg_temp_free_i32(tmp);
- tmp = load_cpu_field(ZF);
- tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
gen_set_label(inv);
break;
case 9: /* ls: !C || Z */
- tmp = load_cpu_field(CF);
- tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
- tcg_temp_free_i32(tmp);
- tmp = load_cpu_field(ZF);
- tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
break;
case 10: /* ge: N == V -> N ^ V == 0 */
- tmp = load_cpu_field(VF);
- tmp2 = load_cpu_field(NF);
- tcg_gen_xor_i32(tmp, tmp, tmp2);
- tcg_temp_free_i32(tmp2);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
+ tcg_temp_free_i32(tmp);
break;
case 11: /* lt: N != V -> N ^ V != 0 */
- tmp = load_cpu_field(VF);
- tmp2 = load_cpu_field(NF);
- tcg_gen_xor_i32(tmp, tmp, tmp2);
- tcg_temp_free_i32(tmp2);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
+ tcg_temp_free_i32(tmp);
break;
case 12: /* gt: !Z && N == V */
inv = gen_new_label();
- tmp = load_cpu_field(ZF);
- tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
- tcg_temp_free_i32(tmp);
- tmp = load_cpu_field(VF);
- tmp2 = load_cpu_field(NF);
- tcg_gen_xor_i32(tmp, tmp, tmp2);
- tcg_temp_free_i32(tmp2);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
+ tcg_temp_free_i32(tmp);
gen_set_label(inv);
break;
case 13: /* le: Z || N != V */
- tmp = load_cpu_field(ZF);
- tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
- tcg_temp_free_i32(tmp);
- tmp = load_cpu_field(VF);
- tmp2 = load_cpu_field(NF);
- tcg_gen_xor_i32(tmp, tmp, tmp2);
- tcg_temp_free_i32(tmp2);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
+ tcg_temp_free_i32(tmp);
break;
default:
fprintf(stderr, "Bad condition code 0x%x\n", cc);
abort();
}
- tcg_temp_free_i32(tmp);
}
static const uint8_t table_logic_cc[16] = {
return 1;
}
-/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
- instruction is not defined. */
-static int disas_cp15_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
-{
- uint32_t rd;
- TCGv tmp, tmp2;
-
- /* M profile cores use memory mapped registers instead of cp15. */
- if (arm_feature(env, ARM_FEATURE_M))
- return 1;
-
- if ((insn & (1 << 25)) == 0) {
- return 1;
- }
- if ((insn & (1 << 4)) == 0) {
- /* cdp */
- return 1;
- }
-
- if (IS_USER(s)) {
- return 1;
- }
-
- rd = (insn >> 12) & 0xf;
-
- tmp2 = tcg_const_i32(insn);
- if (insn & ARM_CP_RW_BIT) {
- tmp = tcg_temp_new_i32();
- gen_helper_get_cp15(tmp, cpu_env, tmp2);
- /* If the destination register is r15 then sets condition codes. */
- if (rd != 15)
- store_reg(s, rd, tmp);
- else
- tcg_temp_free_i32(tmp);
- } else {
- tmp = load_reg(s, rd);
- gen_helper_set_cp15(cpu_env, tmp2, tmp);
- tcg_temp_free_i32(tmp);
- /* Normally we would always end the TB here, but Linux
- * arch/arm/mach-pxa/sleep.S expects two instructions following
- * an MMU enable to execute from cache. Imitate this behaviour. */
- if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
- (insn & 0x0fff0fff) != 0x0e010f10)
- gen_lookup_tb(s);
- }
- tcg_temp_free_i32(tmp2);
- return 0;
-}
-
#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
#define VFP_SREG(insn, bigbit, smallbit) \
((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
switch (size) {
case 0:
tmp2 = neon_load_reg(rn, pass);
- gen_bfi(tmp, tmp2, tmp, offset, 0xff);
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
tcg_temp_free_i32(tmp2);
break;
case 1:
tmp2 = neon_load_reg(rn, pass);
- gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
tcg_temp_free_i32(tmp2);
break;
case 2:
}
if (size != 2) {
tmp2 = neon_load_reg(rd, pass);
- gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
+ tcg_gen_deposit_i32(tmp, tmp2, tmp,
+ shift, size ? 16 : 8);
tcg_temp_free_i32(tmp2);
}
neon_store_reg(rd, pass, tmp);
switch (size) {
case 0: gen_helper_neon_negl_u16(var, var); break;
case 1: gen_helper_neon_negl_u32(var, var); break;
- case 2: gen_helper_neon_negl_u64(var, var); break;
+ case 2:
+ tcg_gen_neg_i64(var, var);
+ break;
default: abort();
}
}
size--;
}
shift = (insn >> 16) & ((1 << (3 + size)) - 1);
- /* To avoid excessive dumplication of ops we implement shift
+ /* To avoid excessive duplication of ops we implement shift
by immediate using the variable shift operations. */
if (op < 8) {
/* Shift by immediate:
tmp2 = neon_load_reg(rm, 0);
tmp4 = tcg_const_i32(rn);
tmp5 = tcg_const_i32(n);
- gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
+ gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
tcg_temp_free_i32(tmp);
if (insn & (1 << 6)) {
tmp = neon_load_reg(rd, 1);
tcg_gen_movi_i32(tmp, 0);
}
tmp3 = neon_load_reg(rm, 1);
- gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
+ gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
tcg_temp_free_i32(tmp5);
tcg_temp_free_i32(tmp4);
neon_store_reg(rd, 0, tmp2);
}
gen_set_pc_im(s->pc);
s->is_jmp = DISAS_WFI;
- break;
+ return 0;
default:
break;
}
tcg_gen_trunc_i64_i32(tmp, tmp64);
store_reg(s, rt, tmp);
tcg_gen_shri_i64(tmp64, tmp64, 32);
+ tmp = tcg_temp_new_i32();
tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_temp_free_i64(tmp64);
store_reg(s, rt2, tmp);
} else {
TCGv tmp;
return 0;
}
- /* Fallback code: handle coprocessor registers not yet converted
- * to ARMCPRegInfo.
- */
- switch (cpnum) {
- case 15:
- return disas_cp15_insn (env, s, insn);
- default:
- return 1;
- }
+ return 1;
}
/* Load/Store exclusive instructions are implemented by remembering
the value/address loaded, and seeing if these are the same
- when the store is performed. This should be is sufficient to implement
+ when the store is performed. This should be sufficient to implement
the architecturally mandated semantics, and avoids having to monitor
regular stores.
TCGv addr;
TCGv_i64 tmp64;
- insn = arm_ldl_code(s->pc, s->bswap_code);
+ insn = arm_ldl_code(env, s->pc, s->bswap_code);
s->pc += 4;
/* M variants do not implement ARM mode. */
tmp = load_cpu_field(spsr);
} else {
tmp = tcg_temp_new_i32();
- gen_helper_cpsr_read(tmp);
+ gen_helper_cpsr_read(tmp, cpu_env);
}
store_reg(s, rd, tmp);
}
tmp = load_reg(s, rm);
tmp2 = load_reg(s, rn);
if (op1 & 2)
- gen_helper_double_saturate(tmp2, tmp2);
+ gen_helper_double_saturate(tmp2, cpu_env, tmp2);
if (op1 & 1)
- gen_helper_sub_saturate(tmp, tmp, tmp2);
+ gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
else
- gen_helper_add_saturate(tmp, tmp, tmp2);
+ gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
break;
tcg_temp_free_i64(tmp64);
if ((sh & 2) == 0) {
tmp2 = load_reg(s, rn);
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
}
store_reg(s, rd, tmp);
} else {
if (op1 == 0) {
tmp2 = load_reg(s, rn);
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
}
store_reg(s, rd, tmp);
if (IS_USER(s)) {
goto illegal_op;
}
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_sub_CC(tmp, tmp, tmp2);
gen_exception_return(s, tmp);
} else {
if (set_cc) {
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_sub_CC(tmp, tmp, tmp2);
} else {
tcg_gen_sub_i32(tmp, tmp, tmp2);
}
break;
case 0x03:
if (set_cc) {
- gen_helper_sub_cc(tmp, tmp2, tmp);
+ gen_sub_CC(tmp, tmp2, tmp);
} else {
tcg_gen_sub_i32(tmp, tmp2, tmp);
}
break;
case 0x04:
if (set_cc) {
- gen_helper_add_cc(tmp, tmp, tmp2);
+ gen_add_CC(tmp, tmp, tmp2);
} else {
tcg_gen_add_i32(tmp, tmp, tmp2);
}
break;
case 0x05:
if (set_cc) {
- gen_helper_adc_cc(tmp, tmp, tmp2);
+ gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
} else {
gen_add_carry(tmp, tmp, tmp2);
}
break;
case 0x06:
if (set_cc) {
- gen_helper_sbc_cc(tmp, tmp, tmp2);
+ gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
} else {
gen_sub_carry(tmp, tmp, tmp2);
}
break;
case 0x07:
if (set_cc) {
- gen_helper_sbc_cc(tmp, tmp2, tmp);
+ gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
} else {
gen_sub_carry(tmp, tmp2, tmp);
}
break;
case 0x0a:
if (set_cc) {
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_sub_CC(tmp, tmp, tmp2);
}
tcg_temp_free_i32(tmp);
break;
case 0x0b:
if (set_cc) {
- gen_helper_add_cc(tmp, tmp, tmp2);
+ gen_add_CC(tmp, tmp, tmp2);
}
tcg_temp_free_i32(tmp);
break;
sh = (insn >> 16) & 0x1f;
tmp2 = tcg_const_i32(sh);
if (insn & (1 << 22))
- gen_helper_usat(tmp, tmp, tmp2);
+ gen_helper_usat(tmp, cpu_env, tmp, tmp2);
else
- gen_helper_ssat(tmp, tmp, tmp2);
+ gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
} else if ((insn & 0x00300fe0) == 0x00200f20) {
sh = (insn >> 16) & 0x1f;
tmp2 = tcg_const_i32(sh);
if (insn & (1 << 22))
- gen_helper_usat16(tmp, tmp, tmp2);
+ gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
else
- gen_helper_ssat16(tmp, tmp, tmp2);
+ gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
} else if ((insn & 0x00700fe0) == 0x00000fa0) {
* however it may overflow considered as a signed
* operation, in which case we must set the Q flag.
*/
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
}
tcg_temp_free_i32(tmp2);
if (insn & (1 << 22)) {
if (rd != 15)
{
tmp2 = load_reg(s, rd);
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
}
store_reg(s, rn, tmp);
}
if (i != 32) {
tmp2 = load_reg(s, rd);
- gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
tcg_temp_free_i32(tmp2);
}
store_reg(s, rd, tmp);
tmp = gen_ld32(addr, IS_USER(s));
if (user) {
tmp2 = tcg_const_i32(i);
- gen_helper_set_user_reg(tmp2, tmp);
+ gen_helper_set_user_reg(cpu_env, tmp2, tmp);
tcg_temp_free_i32(tmp2);
tcg_temp_free_i32(tmp);
} else if (i == rn) {
} else if (user) {
tmp = tcg_temp_new_i32();
tmp2 = tcg_const_i32(i);
- gen_helper_get_user_reg(tmp, tmp2);
+ gen_helper_get_user_reg(tmp, cpu_env, tmp2);
tcg_temp_free_i32(tmp2);
} else {
tmp = load_reg(s, i);
break;
case 8: /* add */
if (conds)
- gen_helper_add_cc(t0, t0, t1);
+ gen_add_CC(t0, t0, t1);
else
tcg_gen_add_i32(t0, t0, t1);
break;
case 10: /* adc */
if (conds)
- gen_helper_adc_cc(t0, t0, t1);
+ gen_helper_adc_cc(t0, cpu_env, t0, t1);
else
gen_adc(t0, t1);
break;
case 11: /* sbc */
if (conds)
- gen_helper_sbc_cc(t0, t0, t1);
+ gen_helper_sbc_cc(t0, cpu_env, t0, t1);
else
gen_sub_carry(t0, t0, t1);
break;
case 13: /* sub */
if (conds)
- gen_helper_sub_cc(t0, t0, t1);
+ gen_sub_CC(t0, t0, t1);
else
tcg_gen_sub_i32(t0, t0, t1);
break;
case 14: /* rsb */
if (conds)
- gen_helper_sub_cc(t0, t1, t0);
+ gen_sub_CC(t0, t1, t0);
else
tcg_gen_sub_i32(t0, t1, t0);
break;
/* Fall through to 32-bit decode. */
}
- insn = arm_lduw_code(s->pc, s->bswap_code);
+ insn = arm_lduw_code(env, s->pc, s->bswap_code);
s->pc += 2;
insn |= (uint32_t)insn_hw1 << 16;
gen_st32(tmp, addr, 0);
tcg_gen_addi_i32(addr, addr, 4);
tmp = tcg_temp_new_i32();
- gen_helper_cpsr_read(tmp);
+ gen_helper_cpsr_read(tmp, cpu_env);
gen_st32(tmp, addr, 0);
if (insn & (1 << 21)) {
if ((insn & (1 << 24)) == 0) {
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
if (op & 1)
- gen_helper_double_saturate(tmp, tmp);
+ gen_helper_double_saturate(tmp, cpu_env, tmp);
if (op & 2)
- gen_helper_sub_saturate(tmp, tmp2, tmp);
+ gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
else
- gen_helper_add_saturate(tmp, tmp, tmp2);
+ gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
} else {
tmp = load_reg(s, rn);
tcg_temp_free_i32(tmp2);
if (rs != 15) {
tmp2 = load_reg(s, rs);
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
}
break;
* however it may overflow considered as a signed
* operation, in which case we must set the Q flag.
*/
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
}
tcg_temp_free_i32(tmp2);
if (rs != 15)
{
tmp2 = load_reg(s, rs);
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
}
break;
if (rs != 15)
{
tmp2 = load_reg(s, rs);
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
}
break;
gen_helper_v7m_mrs(tmp, cpu_env, addr);
tcg_temp_free_i32(addr);
} else {
- gen_helper_cpsr_read(tmp);
+ gen_helper_cpsr_read(tmp, cpu_env);
}
store_reg(s, rd, tmp);
break;
imm = imm + 1 - shift;
if (imm != 32) {
tmp2 = load_reg(s, rd);
- gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
tcg_temp_free_i32(tmp2);
}
break;
if (op & 4) {
/* Unsigned. */
if ((op & 1) && shift == 0)
- gen_helper_usat16(tmp, tmp, tmp2);
+ gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
else
- gen_helper_usat(tmp, tmp, tmp2);
+ gen_helper_usat(tmp, cpu_env, tmp, tmp2);
} else {
/* Signed. */
if ((op & 1) && shift == 0)
- gen_helper_ssat16(tmp, tmp, tmp2);
+ gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
else
- gen_helper_ssat(tmp, tmp, tmp2);
+ gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
}
tcg_temp_free_i32(tmp2);
break;
}
}
- insn = arm_lduw_code(s->pc, s->bswap_code);
+ insn = arm_lduw_code(env, s->pc, s->bswap_code);
s->pc += 2;
switch (insn >> 12) {
if (s->condexec_mask)
tcg_gen_sub_i32(tmp, tmp, tmp2);
else
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_sub_CC(tmp, tmp, tmp2);
} else {
if (s->condexec_mask)
tcg_gen_add_i32(tmp, tmp, tmp2);
else
- gen_helper_add_cc(tmp, tmp, tmp2);
+ gen_add_CC(tmp, tmp, tmp2);
}
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
tcg_gen_movi_i32(tmp2, insn & 0xff);
switch (op) {
case 1: /* cmp */
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_sub_CC(tmp, tmp, tmp2);
tcg_temp_free_i32(tmp);
tcg_temp_free_i32(tmp2);
break;
if (s->condexec_mask)
tcg_gen_add_i32(tmp, tmp, tmp2);
else
- gen_helper_add_cc(tmp, tmp, tmp2);
+ gen_add_CC(tmp, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
break;
if (s->condexec_mask)
tcg_gen_sub_i32(tmp, tmp, tmp2);
else
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_sub_CC(tmp, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
break;
case 1: /* cmp */
tmp = load_reg(s, rd);
tmp2 = load_reg(s, rm);
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_sub_CC(tmp, tmp, tmp2);
tcg_temp_free_i32(tmp2);
tcg_temp_free_i32(tmp);
break;
break;
case 0x2: /* lsl */
if (s->condexec_mask) {
- gen_helper_shl(tmp2, tmp2, tmp);
+ gen_shl(tmp2, tmp2, tmp);
} else {
- gen_helper_shl_cc(tmp2, tmp2, tmp);
+ gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
gen_logic_CC(tmp2);
}
break;
case 0x3: /* lsr */
if (s->condexec_mask) {
- gen_helper_shr(tmp2, tmp2, tmp);
+ gen_shr(tmp2, tmp2, tmp);
} else {
- gen_helper_shr_cc(tmp2, tmp2, tmp);
+ gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
gen_logic_CC(tmp2);
}
break;
case 0x4: /* asr */
if (s->condexec_mask) {
- gen_helper_sar(tmp2, tmp2, tmp);
+ gen_sar(tmp2, tmp2, tmp);
} else {
- gen_helper_sar_cc(tmp2, tmp2, tmp);
+ gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
gen_logic_CC(tmp2);
}
break;
if (s->condexec_mask)
gen_adc(tmp, tmp2);
else
- gen_helper_adc_cc(tmp, tmp, tmp2);
+ gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
break;
case 0x6: /* sbc */
if (s->condexec_mask)
gen_sub_carry(tmp, tmp, tmp2);
else
- gen_helper_sbc_cc(tmp, tmp, tmp2);
+ gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
break;
case 0x7: /* ror */
if (s->condexec_mask) {
tcg_gen_andi_i32(tmp, tmp, 0x1f);
tcg_gen_rotr_i32(tmp2, tmp2, tmp);
} else {
- gen_helper_ror_cc(tmp2, tmp2, tmp);
+ gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
gen_logic_CC(tmp2);
}
break;
if (s->condexec_mask)
tcg_gen_neg_i32(tmp, tmp2);
else
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_sub_CC(tmp, tmp, tmp2);
break;
case 0xa: /* cmp */
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_sub_CC(tmp, tmp, tmp2);
rd = 16;
break;
case 0xb: /* cmn */
- gen_helper_add_cc(tmp, tmp, tmp2);
+ gen_add_CC(tmp, tmp, tmp2);
rd = 16;
break;
case 0xc: /* orr */
dc->tb = tb;
- gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
+ gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
}
}
if (search_pc) {
- j = gen_opc_ptr - gen_opc_buf;
+ j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
if (lj < j) {
lj++;
while (lj < j)
- gen_opc_instr_start[lj++] = 0;
+ tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
- gen_opc_pc[lj] = dc->pc;
+ tcg_ctx.gen_opc_pc[lj] = dc->pc;
gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
- gen_opc_instr_start[lj] = 1;
- gen_opc_icount[lj] = num_insns;
+ tcg_ctx.gen_opc_instr_start[lj] = 1;
+ tcg_ctx.gen_opc_icount[lj] = num_insns;
}
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
gen_io_start();
- if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
+ if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(dc->pc);
}
* Also stop translation when a page boundary is reached. This
* ensures prefetch aborts occur at the right place. */
num_insns ++;
- } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
+ } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
!env->singlestep_enabled &&
!singlestep &&
dc->pc < next_page_start &&
} else {
/* While branches must always occur at the end of an IT block,
there are a few other things that can cause us to terminate
- the TB in the middel of an IT block:
+ the TB in the middle of an IT block:
- Exception generating instructions (bkpt, swi, undefined).
- Page boundaries.
- Hardware watchpoints.
/* nothing more to generate */
break;
case DISAS_WFI:
- gen_helper_wfi();
+ gen_helper_wfi(cpu_env);
break;
case DISAS_SWI:
gen_exception(EXCP_SWI);
done_generating:
gen_icount_end(tb, num_insns);
- *gen_opc_ptr = INDEX_op_end;
+ *tcg_ctx.gen_opc_ptr = INDEX_op_end;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
- log_target_disas(pc_start, dc->pc - pc_start,
+ log_target_disas(env, pc_start, dc->pc - pc_start,
dc->thumb | (dc->bswap_code << 1));
qemu_log("\n");
}
#endif
if (search_pc) {
- j = gen_opc_ptr - gen_opc_buf;
+ j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
lj++;
while (lj <= j)
- gen_opc_instr_start[lj++] = 0;
+ tcg_ctx.gen_opc_instr_start[lj++] = 0;
} else {
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
int flags)
{
int i;
-#if 0
- union {
- uint32_t i;
- float s;
- } s0, s1;
- CPU_DoubleU d;
- /* ??? This assumes float64 and double have the same layout.
- Oh well, it's only debug dumps. */
- union {
- float64 f64;
- double d;
- } d0;
-#endif
uint32_t psr;
for(i=0;i<16;i++) {
psr & CPSR_T ? 'T' : 'A',
cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
-#if 0
- for (i = 0; i < 16; i++) {
- d.d = env->vfp.regs[i];
- s0.i = d.l.lower;
- s1.i = d.l.upper;
- d0.f64 = d.d;
- cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
- i * 2, (int)s0.i, s0.s,
- i * 2 + 1, (int)s1.i, s1.s,
- i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
- d0.d);
+ if (flags & CPU_DUMP_FPU) {
+ int numvfpregs = 0;
+ if (arm_feature(env, ARM_FEATURE_VFP)) {
+ numvfpregs += 16;
+ }
+ if (arm_feature(env, ARM_FEATURE_VFP3)) {
+ numvfpregs += 16;
+ }
+ for (i = 0; i < numvfpregs; i++) {
+ uint64_t v = float64_val(env->vfp.regs[i]);
+ cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
+ i * 2, (uint32_t)v,
+ i * 2 + 1, (uint32_t)(v >> 32),
+ i, v);
+ }
+ cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
}
- cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
-#endif
}
void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
{
- env->regs[15] = gen_opc_pc[pc_pos];
+ env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
env->condexec_bits = gen_opc_condexec_bits[pc_pos];
}