#include "exec-all.h"
#include "disas.h"
#include "tcg-op.h"
+#include "qemu-log.h"
#define GEN_HELPER 1
#include "helpers.h"
#define DISAS_WFI 4
#define DISAS_SWI 5
-/* XXX: move that elsewhere */
-extern FILE *logfile;
-extern int loglevel;
-
static TCGv cpu_env;
/* We reuse the same 64-bit temporaries for efficiency. */
static TCGv cpu_V0, cpu_V1, cpu_M0;
static TCGv cpu_T[2];
static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d;
+#define ICOUNT_TEMP cpu_T[0]
+#include "gen-icount.h"
+
/* initialize TCG globals. */
void arm_translate_init(void)
{
#define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im)
/* Value extensions. */
-#define gen_uxtb(var) tcg_gen_andi_i32(var, var, 0xff)
-#define gen_uxth(var) tcg_gen_andi_i32(var, var, 0xffff)
+#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
+#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
{
TCGv tmp1 = new_tmp();
TCGv tmp2 = new_tmp();
- tcg_gen_ext8s_i32(tmp1, a);
- tcg_gen_ext8s_i32(tmp2, b);
+ tcg_gen_ext16s_i32(tmp1, a);
+ tcg_gen_ext16s_i32(tmp2, b);
tcg_gen_mul_i32(tmp1, tmp1, tmp2);
dead_tmp(tmp2);
tcg_gen_sari_i32(a, a, 16);
#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
-/* FIXME: Implement this natively. */
-static inline void tcg_gen_not_i32(TCGv t0, TCGv t1)
-{
- tcg_gen_xori_i32(t0, t1, ~0);
-}
-
-/* FIXME: Implement this natively. */
-static inline void tcg_gen_neg_i64(TCGv dest, TCGv src)
-{
- tcg_gen_sub_i64(dest, tcg_const_i64(0), src);
-}
-
/* T0 &= ~T1. Clobbers T1. */
/* FIXME: Implement bic natively. */
static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
{
TCGv tmp;
TCGv tmp2;
- TCGv zero;
int inv;
- zero = tcg_const_i32(0);
switch (cc) {
case 0: /* eq: Z */
tmp = load_cpu_field(ZF);
- tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
break;
case 1: /* ne: !Z */
tmp = load_cpu_field(ZF);
- tcg_gen_brcond_i32(TCG_COND_NE, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
break;
case 2: /* cs: C */
tmp = load_cpu_field(CF);
- tcg_gen_brcond_i32(TCG_COND_NE, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
break;
case 3: /* cc: !C */
tmp = load_cpu_field(CF);
- tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
break;
case 4: /* mi: N */
tmp = load_cpu_field(NF);
- tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
break;
case 5: /* pl: !N */
tmp = load_cpu_field(NF);
- tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
break;
case 6: /* vs: V */
tmp = load_cpu_field(VF);
- tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
break;
case 7: /* vc: !V */
tmp = load_cpu_field(VF);
- tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
break;
case 8: /* hi: C && !Z */
inv = gen_new_label();
tmp = load_cpu_field(CF);
- tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, inv);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
dead_tmp(tmp);
tmp = load_cpu_field(ZF);
- tcg_gen_brcond_i32(TCG_COND_NE, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
gen_set_label(inv);
break;
case 9: /* ls: !C || Z */
tmp = load_cpu_field(CF);
- tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
dead_tmp(tmp);
tmp = load_cpu_field(ZF);
- tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
break;
case 10: /* ge: N == V -> N ^ V == 0 */
tmp = load_cpu_field(VF);
tmp2 = load_cpu_field(NF);
tcg_gen_xor_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
- tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
break;
case 11: /* lt: N != V -> N ^ V != 0 */
tmp = load_cpu_field(VF);
tmp2 = load_cpu_field(NF);
tcg_gen_xor_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
- tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
break;
case 12: /* gt: !Z && N == V */
inv = gen_new_label();
tmp = load_cpu_field(ZF);
- tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, inv);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
dead_tmp(tmp);
tmp = load_cpu_field(VF);
tmp2 = load_cpu_field(NF);
tcg_gen_xor_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
- tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
gen_set_label(inv);
break;
case 13: /* le: Z || N != V */
tmp = load_cpu_field(ZF);
- tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
dead_tmp(tmp);
tmp = load_cpu_field(VF);
tmp2 = load_cpu_field(NF);
tcg_gen_xor_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
- tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
break;
default:
fprintf(stderr, "Bad condition code 0x%x\n", cc);
store_cpu_field(tmp, regs[15]);
}
-static inline void gen_set_pc_T0(void)
-{
- tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, regs[15]));
-}
-
static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
{
TCGv tmp;
gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
}
-#define VFP_OP1i(name) \
+#define VFP_OP1(name) \
static inline void gen_vfp_##name(int dp, int arg) \
{ \
if (dp) \
static inline void gen_vfp_F1_ld0(int dp)
{
if (dp)
- tcg_gen_movi_i64(cpu_F0d, 0);
+ tcg_gen_movi_i64(cpu_F1d, 0);
else
- tcg_gen_movi_i32(cpu_F0s, 0);
+ tcg_gen_movi_i32(cpu_F1s, 0);
}
static inline void gen_vfp_uito(int dp)
static inline void gen_vfp_sito(int dp)
{
if (dp)
- gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
+ gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
else
- gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
+ gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
}
static inline void gen_vfp_toui(int dp)
static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
{
iwmmxt_load_reg(cpu_V1, rn);
- tcg_gen_andi_i64(cpu_V1, cpu_V1, 0xffffffffu);
+ tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
}
gen_set_pc_im(s->pc);
tmp = load_reg(s, rd);
gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
+ dead_tmp(tmp);
}
return 0;
}
TCGv tmp = new_tmp();
if (shift)
tcg_gen_shri_i32(var, var, shift);
- tcg_gen_andi_i32(var, var, 0xff);
+ tcg_gen_ext8u_i32(var, var);
tcg_gen_shli_i32(tmp, var, 8);
tcg_gen_or_i32(var, var, tmp);
tcg_gen_shli_i32(tmp, var, 16);
static void gen_neon_dup_low16(TCGv var)
{
TCGv tmp = new_tmp();
- tcg_gen_andi_i32(var, var, 0xffff);
+ tcg_gen_ext16u_i32(var, var);
tcg_gen_shli_i32(tmp, var, 16);
tcg_gen_or_i32(var, var, tmp);
dead_tmp(tmp);
tmp = load_cpu_field(vfp.xregs[rn]);
break;
case ARM_VFP_FPSCR:
- if (rd == 15) {
+ if (rd == 15) {
tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
} else {
else
i |= 0x800;
n |= i << 19;
- tcg_gen_movi_i32(cpu_F0d, ((uint64_t)n) << 32);
+ tcg_gen_movi_i32(cpu_F0s, n);
}
break;
case 15: /* extension space */
static inline void gen_jmp (DisasContext *s, uint32_t dest)
{
- if (__builtin_expect(s->singlestep_enabled, 0)) {
+ if (unlikely(s->singlestep_enabled)) {
/* An indirect jump so that we still trigger the debug exception. */
if (s->thumb)
dest |= 1;
static void gen_exception_return(DisasContext *s)
{
TCGv tmp;
- gen_set_pc_T0();
+ gen_movl_reg_T0(s, 15);
tmp = load_cpu_field(spsr);
gen_set_cpsr(tmp, 0xffffffff);
dead_tmp(tmp);
}
} else /* size == 0 */ {
if (load) {
+ TCGV_UNUSED(tmp2);
for (n = 0; n < 4; n++) {
tmp = gen_ld8u(cpu_T[1], IS_USER(s));
gen_op_addl_T1_im(stride);
break;
case 3:
return 1;
+ default: /* Avoid compiler warnings. */
+ abort();
}
gen_op_addl_T1_im(1 << size);
tmp2 = new_tmp();
case 2:
tmp = gen_ld32(cpu_T[1], IS_USER(s));
break;
+ default: /* Avoid compiler warnings. */
+ abort();
}
if (size != 2) {
tmp2 = neon_load_reg(rd, pass);
NEON_GET_REG(T0, rn, 1);
gen_neon_movl_scratch_T0(2);
}
+ TCGV_UNUSED(tmp3);
for (pass = 0; pass < 2; pass++) {
if (src1_wide) {
neon_load_reg64(cpu_V0, rn + pass);
+ TCGV_UNUSED(tmp);
} else {
if (pass == 1 && rd == rn) {
gen_neon_movl_T0_scratch(2);
}
if (src2_wide) {
neon_load_reg64(cpu_V1, rm + pass);
+ TCGV_UNUSED(tmp2);
} else {
if (pass == 1 && rd == rm) {
gen_neon_movl_T0_scratch(2);
case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
if (size == 3)
return 1;
+ TCGV_UNUSED(tmp2);
for (pass = 0; pass < 2; pass++) {
neon_load_reg64(cpu_V0, rm + pass);
tmp = new_tmp();
/* Coprocessor double register transfer. */
} else if ((insn & 0x0f000010) == 0x0e000010) {
/* Additional coprocessor register transfer. */
- } else if ((insn & 0x0ff10010) == 0x01000000) {
+ } else if ((insn & 0x0ff10020) == 0x01000000) {
uint32_t mask;
uint32_t val;
/* cps (privileged) */
if (insn & (1 << 18))
val |= mask;
}
- if (insn & (1 << 14)) {
+ if (insn & (1 << 17)) {
mask |= CPSR_M;
val |= (insn & 0x1f);
}
} else {
/* MOVT */
tmp = load_reg(s, rd);
- tcg_gen_andi_i32(tmp, tmp, 0xffff);
+ tcg_gen_ext16u_i32(tmp, tmp);
tcg_gen_ori_i32(tmp, tmp, val << 16);
}
store_reg(s, rd, tmp);
gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
dead_tmp(tmp2);
if (op1 == 2) {
- tmp = tcg_temp_new(TCG_TYPE_I64);
- tcg_gen_ext_i32_i64(tmp, cpu_T[0]);
- gen_addq(s, tmp, rn, rd);
- gen_storeq_reg(s, rn, rd, tmp);
+ tmp2 = tcg_temp_new(TCG_TYPE_I64);
+ tcg_gen_ext_i32_i64(tmp2, tmp);
+ dead_tmp(tmp);
+ gen_addq(s, tmp2, rn, rd);
+ gen_storeq_reg(s, rn, rd, tmp2);
} else {
if (op1 == 0) {
tmp2 = load_reg(s, rn);
int label = gen_new_label();
rm = insn & 0xf;
gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
- tcg_gen_brcond_i32(TCG_COND_NE, cpu_T[0],
- tcg_const_i32(0), label);
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
+ 0, label);
tmp = load_reg(s,rm);
gen_st32(tmp, cpu_T[1], IS_USER(s));
+ gen_set_label(label);
gen_movl_reg_T0(s, rd);
}
} else {
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
shift = (insn >> 7) & 0x1f;
- if (shift)
- tcg_gen_shli_i32(tmp2, tmp2, shift);
if (insn & (1 << 6)) {
/* pkhtb */
+ if (shift == 0)
+ shift = 31;
+ tcg_gen_sari_i32(tmp2, tmp2, shift);
tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
- tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
+ tcg_gen_ext16u_i32(tmp2, tmp2);
} else {
/* pkhbt */
- tcg_gen_andi_i32(tmp, tmp, 0xffff);
+ if (shift)
+ tcg_gen_shli_i32(tmp2, tmp2, shift);
+ tcg_gen_ext16u_i32(tmp, tmp);
tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
}
tcg_gen_or_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
store_reg(s, rd, tmp);
} else if ((insn & 0x00200020) == 0x00200000) {
/* [us]sat */
tmp2 = tcg_temp_new(TCG_TYPE_I64);
tcg_gen_ext_i32_i64(tmp2, tmp);
dead_tmp(tmp);
- gen_addq(s, tmp2, rn, rd);
- gen_storeq_reg(s, rn, rd, tmp2);
+ gen_addq(s, tmp2, rd, rn);
+ gen_storeq_reg(s, rd, rn, tmp2);
} else {
/* smuad, smusd, smlad, smlsd */
- if (rn != 15)
+ if (rd != 15)
{
- tmp2 = load_reg(s, rn);
+ tmp2 = load_reg(s, rd);
gen_helper_add_setq(tmp, tmp, tmp2);
dead_tmp(tmp2);
}
- store_reg(s, rd, tmp);
+ store_reg(s, rn, tmp);
}
}
break;
/* compute total size */
loaded_base = 0;
+ TCGV_UNUSED(loaded_var);
n = 0;
for(i=0;i<16;i++) {
if (insn & (1 << i))
if (!(arm_feature(env, ARM_FEATURE_THUMB2)
|| arm_feature (env, ARM_FEATURE_M))) {
- /* Thumb-1 cores may need to tread bl and blx as a pair of
+ /* Thumb-1 cores may need to treat bl and blx as a pair of
16-bit instructions to get correct prefetch abort behavior. */
insn = insn_hw1;
if ((insn & (1 << 12)) == 0) {
/* Second half of bl. */
offset = ((insn & 0x7ff) << 1) | 1;
tmp = load_reg(s, 14);
- tcg_gen_addi_i32(tmp, tmp, 14);
+ tcg_gen_addi_i32(tmp, tmp, offset);
tmp2 = new_tmp();
tcg_gen_movi_i32(tmp2, s->pc | 1);
} else {
int label = gen_new_label();
gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
- tcg_gen_brcond_i32(TCG_COND_NE, cpu_T[0],
- tcg_const_i32(0), label);
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
+ 0, label);
tmp = load_reg(s, rs);
gen_st32(tmp, cpu_T[1], IS_USER(s));
gen_set_label(label);
int label = gen_new_label();
/* Must use a global that is not killed by the branch. */
gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
- tcg_gen_brcond_i32(TCG_COND_NE, cpu_T[0], tcg_const_i32(0),
- label);
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
tmp = load_reg(s, rs);
switch (op) {
case 0:
if (insn & (1 << 23)) {
/* movt */
tmp = load_reg(s, rd);
- tcg_gen_andi_i32(tmp, tmp, 0xffff);
+ tcg_gen_ext16u_i32(tmp, tmp);
tcg_gen_ori_i32(tmp, tmp, imm << 16);
} else {
/* movw */
break;
case 0x9: /* neg */
if (s->condexec_mask)
- gen_op_subl_T0_T1();
+ tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
else
gen_op_subl_T0_T1_cc();
break;
tmp = load_reg(s, 13);
val = (insn & 0x7f) * 4;
if (insn & (1 << 7))
- val = -(int32_t)val;
+ val = -(int32_t)val;
tcg_gen_addi_i32(tmp, tmp, val);
store_reg(s, 13, tmp);
break;
tcg_gen_addi_i32(addr, addr, 4);
}
}
+ TCGV_UNUSED(tmp);
if (insn & (1 << 8)) {
if (insn & (1 << 11)) {
/* pop pc */
case 1: case 3: case 9: case 11: /* czb */
rm = insn & 7;
tmp = load_reg(s, rm);
- tmp2 = tcg_const_i32(0);
s->condlabel = gen_new_label();
s->condjmp = 1;
if (insn & (1 << 11))
- tcg_gen_brcond_i32(TCG_COND_EQ, tmp, tmp2, s->condlabel);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
else
- tcg_gen_brcond_i32(TCG_COND_NE, tmp, tmp2, s->condlabel);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
dead_tmp(tmp);
offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
val = (uint32_t)s->pc + 2;
if (cond == 0xf) {
/* swi */
gen_set_condexec(s);
- gen_set_pc_im(s->pc | 1);
+ gen_set_pc_im(s->pc);
s->is_jmp = DISAS_SWI;
break;
}
case 15:
if (disas_thumb2_insn(env, s, insn))
- goto undef32;
+ goto undef32;
break;
}
return;
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
basic block 'tb'. If search_pc is TRUE, also generate PC
information for each intermediate instruction. */
-static inline int gen_intermediate_code_internal(CPUState *env,
- TranslationBlock *tb,
- int search_pc)
+static inline void gen_intermediate_code_internal(CPUState *env,
+ TranslationBlock *tb,
+ int search_pc)
{
DisasContext dc1, *dc = &dc1;
uint16_t *gen_opc_end;
int j, lj;
target_ulong pc_start;
uint32_t next_page_start;
+ int num_insns;
+ int max_insns;
/* generate intermediate code */
num_temps = 0;
cpu_M0 = tcg_temp_new(TCG_TYPE_I64);
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
lj = -1;
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0)
+ max_insns = CF_COUNT_MASK;
+
+ gen_icount_start();
/* Reset the conditional execution bits immediately. This avoids
complications trying to do it at the end of the block. */
if (env->condexec_bits)
store_cpu_field(tmp, condexec_bits);
}
do {
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ /* Intercept jump to the magic kernel page. */
+ if (dc->pc >= 0xffff0000) {
+ /* We always get here via a jump, so know we are not in a
+ conditional execution block. */
+ gen_exception(EXCP_KERNEL_TRAP);
+ dc->is_jmp = DISAS_UPDATE;
+ break;
+ }
+#else
if (dc->pc >= 0xfffffff0 && IS_M(env)) {
/* We always get here via a jump, so know we are not in a
conditional execution block. */
gen_exception(EXCP_EXCEPTION_EXIT);
+ dc->is_jmp = DISAS_UPDATE;
+ break;
}
#endif
}
gen_opc_pc[lj] = dc->pc;
gen_opc_instr_start[lj] = 1;
+ gen_opc_icount[lj] = num_insns;
}
+ if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
+ gen_io_start();
+
if (env->thumb) {
disas_thumb_insn(env, dc);
if (dc->condexec_mask) {
/* Translation stops when a conditional branch is enoutered.
* Otherwise the subsequent code could get translated several times.
* Also stop translation when a page boundary is reached. This
- * ensures prefech aborts occur at the right place. */
+ * ensures prefetch aborts occur at the right place. */
+ num_insns ++;
} while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
!env->singlestep_enabled &&
- dc->pc < next_page_start);
+ dc->pc < next_page_start &&
+ num_insns < max_insns);
+
+ if (tb->cflags & CF_LAST_IO) {
+ if (dc->condjmp) {
+ /* FIXME: This can theoretically happen with self-modifying
+ code. */
+ cpu_abort(env, "IO on conditional branch instruction");
+ }
+ gen_io_end();
+ }
/* At this stage dc->condjmp will only be set when the skipped
instruction was a conditional branch or trap, and the PC has
already been written. */
- if (__builtin_expect(env->singlestep_enabled, 0)) {
+ if (unlikely(env->singlestep_enabled)) {
/* Make sure the pc is updated, and raise a debug exception. */
if (dc->condjmp) {
gen_set_condexec(dc);
dc->condjmp = 0;
}
}
+
done_generating:
+ gen_icount_end(tb, num_insns);
*gen_opc_ptr = INDEX_op_end;
#ifdef DEBUG_DISAS
gen_opc_instr_start[lj++] = 0;
} else {
tb->size = dc->pc - pc_start;
+ tb->icount = num_insns;
}
- return 0;
}
-int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
{
- return gen_intermediate_code_internal(env, tb, 0);
+ gen_intermediate_code_internal(env, tb, 0);
}
-int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
{
- return gen_intermediate_code_internal(env, tb, 1);
+ gen_intermediate_code_internal(env, tb, 1);
}
static const char *cpu_mode_names[16] = {
int flags)
{
int i;
+#if 0
union {
uint32_t i;
float s;
float64 f64;
double d;
} d0;
+#endif
uint32_t psr;
for(i=0;i<16;i++) {
#endif
}
+void gen_pc_load(CPUState *env, TranslationBlock *tb,
+ unsigned long searched_pc, int pc_pos, void *puc)
+{
+ env->regs[15] = gen_opc_pc[pc_pos];
+}