#include "exec/exec-all.h"
#include "tcg-op.h"
#include "exec/cpu_ldst.h"
+#include "exec/translator.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
#include "trace-tcg.h"
#include "exec/log.h"
-
#define PREFIX_REPZ 0x01
#define PREFIX_REPNZ 0x02
#define PREFIX_LOCK 0x04
#endif
typedef struct DisasContext {
+ DisasContextBase base;
+
/* current insn context */
int override; /* -1 if no override */
int prefix;
TCGMemOp dflag;
target_ulong pc_start;
target_ulong pc; /* pc = eip + cs_base */
- int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
- static state change (stop translation) */
/* current block context */
target_ulong cs_base; /* base of CS segment */
int pe; /* protected mode */
int cpl;
int iopl;
int tf; /* TF cpu flag */
- int singlestep_enabled; /* "hardware" single step enabled */
int jmp_opt; /* use direct block chaining for direct jumps */
int repz_opt; /* optimize jumps within repz instructions */
int mem_index; /* select memory access functions */
uint64_t flags; /* all execution flags */
- struct TranslationBlock *tb;
int popl_esp_hack; /* for correct popl with esp base handling */
int rip_offset; /* only used in x86_64, but left for simplicity */
int cpuid_features;
int cpuid_ext3_features;
int cpuid_7_0_ebx_features;
int cpuid_xsave_features;
+ sigjmp_buf jmpbuf;
} DisasContext;
static void gen_eob(DisasContext *s);
size = s->cc_op - CC_OP_SUBB;
t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
/* If no temporary was used, be careful not to alias t1 and t0. */
- t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
+ t0 = t1 == cpu_cc_src ? cpu_tmp0 : reg;
tcg_gen_mov_tl(t0, cpu_cc_srcT);
gen_extu(size, t0);
goto add_sub;
break;
case JCC_L:
gen_compute_eflags(s);
- if (TCGV_EQUAL(reg, cpu_cc_src)) {
+ if (reg == cpu_cc_src) {
reg = cpu_tmp0;
}
tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
default:
case JCC_LE:
gen_compute_eflags(s);
- if (TCGV_EQUAL(reg, cpu_cc_src)) {
+ if (reg == cpu_cc_src) {
reg = cpu_tmp0;
}
tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
static inline void gen_ins(DisasContext *s, TCGMemOp ot)
{
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_string_movl_A0_EDI(s);
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_EDI);
gen_bpt_io(s, cpu_tmp2_i32, ot);
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
}
}
static inline void gen_outs(DisasContext *s, TCGMemOp ot)
{
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_string_movl_A0_ESI(s);
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_ESI);
gen_bpt_io(s, cpu_tmp2_i32, ot);
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
}
}
}
}
+#define X86_MAX_INSN_LENGTH 15
+
+static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
+{
+ uint64_t pc = s->pc;
+
+ s->pc += num_bytes;
+ if (unlikely(s->pc - s->pc_start > X86_MAX_INSN_LENGTH)) {
+ /* If the instruction's 16th byte is on a different page than the 1st, a
+ * page fault on the second page wins over the general protection fault
+ * caused by the instruction being too long.
+ * This can happen even if the operand is only one byte long!
+ */
+ if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
+ volatile uint8_t unused =
+ cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK);
+ (void) unused;
+ }
+ siglongjmp(s->jmpbuf, 1);
+ }
+
+ return pc;
+}
+
+static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
+{
+ return cpu_ldub_code(env, advance_pc(env, s, 1));
+}
+
+static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
+{
+ return cpu_ldsw_code(env, advance_pc(env, s, 2));
+}
+
+static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
+{
+ return cpu_lduw_code(env, advance_pc(env, s, 2));
+}
+
+static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
+{
+ return cpu_ldl_code(env, advance_pc(env, s, 4));
+}
+
+#ifdef TARGET_X86_64
+static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
+{
+ return cpu_ldq_code(env, advance_pc(env, s, 8));
+}
+#endif
+
/* Decompose an address. */
typedef struct AddressParts {
case MO_32:
havesib = 0;
if (rm == 4) {
- int code = cpu_ldub_code(env, s->pc++);
+ int code = x86_ldub_code(env, s);
scale = (code >> 6) & 3;
index = ((code >> 3) & 7) | REX_X(s);
if (index == 4) {
case 0:
if ((base & 7) == 5) {
base = -1;
- disp = (int32_t)cpu_ldl_code(env, s->pc);
- s->pc += 4;
+ disp = (int32_t)x86_ldl_code(env, s);
if (CODE64(s) && !havesib) {
base = -2;
disp += s->pc + s->rip_offset;
}
break;
case 1:
- disp = (int8_t)cpu_ldub_code(env, s->pc++);
+ disp = (int8_t)x86_ldub_code(env, s);
break;
default:
case 2:
- disp = (int32_t)cpu_ldl_code(env, s->pc);
- s->pc += 4;
+ disp = (int32_t)x86_ldl_code(env, s);
break;
}
if (mod == 0) {
if (rm == 6) {
base = -1;
- disp = cpu_lduw_code(env, s->pc);
- s->pc += 2;
+ disp = x86_lduw_code(env, s);
break;
}
} else if (mod == 1) {
- disp = (int8_t)cpu_ldub_code(env, s->pc++);
+ disp = (int8_t)x86_ldub_code(env, s);
} else {
- disp = (int16_t)cpu_lduw_code(env, s->pc);
- s->pc += 2;
+ disp = (int16_t)x86_lduw_code(env, s);
}
switch (rm) {
switch (ot) {
case MO_8:
- ret = cpu_ldub_code(env, s->pc);
- s->pc++;
+ ret = x86_ldub_code(env, s);
break;
case MO_16:
- ret = cpu_lduw_code(env, s->pc);
- s->pc += 2;
+ ret = x86_lduw_code(env, s);
break;
case MO_32:
#ifdef TARGET_X86_64
case MO_64:
#endif
- ret = cpu_ldl_code(env, s->pc);
- s->pc += 4;
+ ret = x86_ldl_code(env, s);
break;
default:
tcg_abort();
static inline bool use_goto_tb(DisasContext *s, target_ulong pc)
{
#ifndef CONFIG_USER_ONLY
- return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
+ return (pc & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
(pc & TARGET_PAGE_MASK) == (s->pc_start & TARGET_PAGE_MASK);
#else
return true;
/* jump to same page: we can use a direct jump */
tcg_gen_goto_tb(tb_num);
gen_jmp_im(eip);
- tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
+ tcg_gen_exit_tb((uintptr_t)s->base.tb + tb_num);
+ s->base.is_jmp = DISAS_NORETURN;
} else {
/* jump to another page */
gen_jmp_im(eip);
gen_set_label(l1);
gen_goto_tb(s, 1, val);
- s->is_jmp = DISAS_TB_JUMP;
} else {
l1 = gen_new_label();
l2 = gen_new_label();
because ss32 may change. For R_SS, translation must always
stop as a special handling must be done to disable hardware
interrupts for the next instruction */
- if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
- s->is_jmp = DISAS_TB_JUMP;
+ if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS)) {
+ s->base.is_jmp = DISAS_TOO_MANY;
+ }
} else {
gen_op_movl_seg_T0_vm(seg_reg);
- if (seg_reg == R_SS)
- s->is_jmp = DISAS_TB_JUMP;
+ if (seg_reg == R_SS) {
+ s->base.is_jmp = DISAS_TOO_MANY;
+ }
}
}
gen_update_cc_op(s);
gen_jmp_im(cur_eip);
gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
- s->is_jmp = DISAS_TB_JUMP;
+ s->base.is_jmp = DISAS_NORETURN;
}
/* Generate #UD for the current instruction. The assumption here is that
gen_jmp_im(cur_eip);
gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
tcg_const_i32(next_eip - cur_eip));
- s->is_jmp = DISAS_TB_JUMP;
+ s->base.is_jmp = DISAS_NORETURN;
}
static void gen_debug(DisasContext *s, target_ulong cur_eip)
gen_update_cc_op(s);
gen_jmp_im(cur_eip);
gen_helper_debug(cpu_env);
- s->is_jmp = DISAS_TB_JUMP;
+ s->base.is_jmp = DISAS_NORETURN;
}
static void gen_set_hflag(DisasContext *s, uint32_t mask)
If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
S->TF. This is used by the syscall/sysret insns. */
static void
-do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, TCGv jr)
+do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
{
gen_update_cc_op(s);
gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
}
- if (s->tb->flags & HF_RF_MASK) {
+ if (s->base.tb->flags & HF_RF_MASK) {
gen_helper_reset_rf(cpu_env);
}
- if (s->singlestep_enabled) {
+ if (s->base.singlestep_enabled) {
gen_helper_debug(cpu_env);
} else if (recheck_tf) {
gen_helper_rechecking_single_step(cpu_env);
tcg_gen_exit_tb(0);
} else if (s->tf) {
gen_helper_single_step(cpu_env);
- } else if (!TCGV_IS_UNUSED(jr)) {
- TCGv vaddr = tcg_temp_new();
-
- tcg_gen_add_tl(vaddr, jr, cpu_seg_base[R_CS]);
- tcg_gen_lookup_and_goto_ptr(vaddr);
- tcg_temp_free(vaddr);
+ } else if (jr) {
+ tcg_gen_lookup_and_goto_ptr();
} else {
tcg_gen_exit_tb(0);
}
- s->is_jmp = DISAS_TB_JUMP;
+ s->base.is_jmp = DISAS_NORETURN;
}
static inline void
gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
{
- TCGv unused;
-
- TCGV_UNUSED(unused);
- do_gen_eob_worker(s, inhibit, recheck_tf, unused);
+ do_gen_eob_worker(s, inhibit, recheck_tf, false);
}
/* End of block.
/* Jump to register */
static void gen_jr(DisasContext *s, TCGv dest)
{
- do_gen_eob_worker(s, false, false, dest);
+ do_gen_eob_worker(s, false, false, true);
}
/* generate a jump to eip. No segment change must happen before as a
set_cc_op(s, CC_OP_DYNAMIC);
if (s->jmp_opt) {
gen_goto_tb(s, tb_num, eip);
- s->is_jmp = DISAS_TB_JUMP;
} else {
gen_jmp_im(eip);
gen_eob(s);
gen_helper_enter_mmx(cpu_env);
}
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7);
if (is_xmm)
reg |= rex_r;
if (b1 == 1 && reg != 0)
goto illegal_op;
- field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
- bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
+ field_length = x86_ldub_code(env, s) & 0x3F;
+ bit_index = x86_ldub_code(env, s) & 0x3F;
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
if (b1 == 1)
if (b1 >= 2) {
goto unknown_op;
}
- val = cpu_ldub_code(env, s->pc++);
+ val = x86_ldub_code(env, s);
if (is_xmm) {
tcg_gen_movi_tl(cpu_T0, val);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
case 0x1c4:
s->rip_offset = 1;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
- val = cpu_ldub_code(env, s->pc++);
+ val = x86_ldub_code(env, s);
if (b1) {
val &= 7;
tcg_gen_st16_tl(cpu_T0, cpu_env,
if (mod != 3)
goto illegal_op;
ot = mo_64_32(s->dflag);
- val = cpu_ldub_code(env, s->pc++);
+ val = x86_ldub_code(env, s);
if (b1) {
val &= 7;
rm = (modrm & 7) | REX_B(s);
if ((b & 0xf0) == 0xf0) {
goto do_0f_38_fx;
}
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
rm = modrm & 7;
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
do_0f_38_fx:
/* Various integer extensions at 0f 38 f[0-f]. */
b = modrm | (b1 << 8);
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
switch (b) {
case 0x03a:
case 0x13a:
b = modrm;
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
rm = modrm & 7;
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
goto illegal_op;
+ s->rip_offset = 1;
+
if (sse_fn_eppi == SSE_SPECIAL) {
ot = mo_64_32(s->dflag);
rm = (modrm & 7) | REX_B(s);
- s->rip_offset = 1;
if (mod != 3)
gen_lea_modrm(env, s, modrm);
reg = ((modrm >> 3) & 7) | rex_r;
- val = cpu_ldub_code(env, s->pc++);
+ val = x86_ldub_code(env, s);
switch (b) {
case 0x14: /* pextrb */
tcg_gen_ld8u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
gen_ldq_env_A0(s, op2_offset);
}
}
- val = cpu_ldub_code(env, s->pc++);
+ val = x86_ldub_code(env, s);
if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
set_cc_op(s, CC_OP_EFLAGS);
case 0x33a:
/* Various integer extensions at 0f 3a f[0-f]. */
b = modrm | (b1 << 8);
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
switch (b) {
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
- b = cpu_ldub_code(env, s->pc++);
+ b = x86_ldub_code(env, s);
if (ot == MO_64) {
tcg_gen_rotri_tl(cpu_T0, cpu_T0, b & 63);
} else {
}
switch(b) {
case 0x0f: /* 3DNow! data insns */
- val = cpu_ldub_code(env, s->pc++);
+ val = x86_ldub_code(env, s);
sse_fn_epp = sse_op_table5[val];
if (!sse_fn_epp) {
goto unknown_op;
break;
case 0x70: /* pshufx insn */
case 0xc6: /* pshufx insn */
- val = cpu_ldub_code(env, s->pc++);
+ val = x86_ldub_code(env, s);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
/* XXX: introduce a new table? */
break;
case 0xc2:
/* compare insns */
- val = cpu_ldub_code(env, s->pc++);
+ val = x86_ldub_code(env, s);
if (val >= 8)
goto unknown_op;
sse_fn_epp = sse_op_table4[val][b1];
}
}
-/* convert one instruction. s->is_jmp is set if the translation must
+/* convert one instruction. s->base.is_jmp is set if the translation must
be stopped. Return the next pc value */
-static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
- target_ulong pc_start)
+static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
{
+ CPUX86State *env = cpu->env_ptr;
int b, prefixes;
int shift;
TCGMemOp ot, aflag, dflag;
int modrm, reg, rm, mod, op, opreg, val;
target_ulong next_eip, tval;
int rex_w, rex_r;
+ target_ulong pc_start = s->base.pc_next;
s->pc_start = s->pc = pc_start;
prefixes = 0;
s->rip_offset = 0; /* for relative ip address */
s->vex_l = 0;
s->vex_v = 0;
- next_byte:
- /* x86 has an upper limit of 15 bytes for an instruction. Since we
- * do not want to decode and generate IR for an illegal
- * instruction, the following check limits the instruction size to
- * 25 bytes: 14 prefix + 1 opc + 6 (modrm+sib+ofs) + 4 imm */
- if (s->pc - pc_start > 14) {
- goto illegal_op;
+ if (sigsetjmp(s->jmpbuf, 0) != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ return s->pc;
}
- b = cpu_ldub_code(env, s->pc);
- s->pc++;
+
+ next_byte:
+ b = x86_ldub_code(env, s);
/* Collect prefixes. */
switch (b) {
case 0xf3:
static const int pp_prefix[4] = {
0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
};
- int vex3, vex2 = cpu_ldub_code(env, s->pc);
+ int vex3, vex2 = x86_ldub_code(env, s);
if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
/* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
rex_r = (~vex2 >> 4) & 8;
if (b == 0xc5) {
vex3 = vex2;
- b = cpu_ldub_code(env, s->pc++);
+ b = x86_ldub_code(env, s);
} else {
#ifdef TARGET_X86_64
s->rex_x = (~vex2 >> 3) & 8;
s->rex_b = (~vex2 >> 2) & 8;
#endif
- vex3 = cpu_ldub_code(env, s->pc++);
+ vex3 = x86_ldub_code(env, s);
rex_w = (vex3 >> 7) & 1;
switch (vex2 & 0x1f) {
case 0x01: /* Implied 0f leading opcode bytes. */
- b = cpu_ldub_code(env, s->pc++) | 0x100;
+ b = x86_ldub_code(env, s) | 0x100;
break;
case 0x02: /* Implied 0f 38 leading opcode bytes. */
b = 0x138;
case 0x0f:
/**************************/
/* extended op code */
- b = cpu_ldub_code(env, s->pc++) | 0x100;
+ b = x86_ldub_code(env, s) | 0x100;
goto reswitch;
/**************************/
switch(f) {
case 0: /* OP Ev, Gv */
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
gen_op(s, op, ot, opreg);
break;
case 1: /* OP Gv, Ev */
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
reg = ((modrm >> 3) & 7) | rex_r;
rm = (modrm & 7) | REX_B(s);
ot = mo_b_d(b, dflag);
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
case 0xf7:
ot = mo_b_d(b, dflag);
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
case 0xff: /* GRP5 */
ot = mo_b_d(b, dflag);
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
case 0x85:
ot = mo_b_d(b, dflag);
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
case 0x69: /* imul Gv, Ev, I */
case 0x6b:
ot = dflag;
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
if (b == 0x69)
s->rip_offset = insn_const_size(ot);
case 0x1c0:
case 0x1c1: /* xadd Ev, Gv */
ot = mo_b_d(b, dflag);
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
gen_op_mov_v_reg(ot, cpu_T0, reg);
TCGv oldv, newv, cmpv;
ot = mo_b_d(b, dflag);
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
oldv = tcg_temp_new();
}
break;
case 0x1c7: /* cmpxchg8b */
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
if ((mod == 3) || ((modrm & 0x38) != 0x8))
goto illegal_op;
gen_push_v(s, cpu_T0);
break;
case 0x8f: /* pop Ev */
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
ot = gen_pop_T0(s);
if (mod == 3) {
case 0xc8: /* enter */
{
int level;
- val = cpu_lduw_code(env, s->pc);
- s->pc += 2;
- level = cpu_ldub_code(env, s->pc++);
+ val = x86_lduw_code(env, s);
+ level = x86_ldub_code(env, s);
gen_enter(s, val, level);
}
break;
gen_movl_seg_T0(s, reg);
gen_pop_update(s, ot);
/* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
- if (s->is_jmp) {
+ if (s->base.is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
if (reg == R_SS) {
s->tf = 0;
ot = gen_pop_T0(s);
gen_movl_seg_T0(s, (b >> 3) & 7);
gen_pop_update(s, ot);
- if (s->is_jmp) {
+ if (s->base.is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
}
case 0x88:
case 0x89: /* mov Gv, Ev */
ot = mo_b_d(b, dflag);
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
/* generate a generic store */
case 0xc6:
case 0xc7: /* mov Ev, Iv */
ot = mo_b_d(b, dflag);
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
if (mod != 3) {
s->rip_offset = insn_const_size(ot);
case 0x8a:
case 0x8b: /* mov Ev, Gv */
ot = mo_b_d(b, dflag);
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0x8e: /* mov seg, Gv */
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = (modrm >> 3) & 7;
if (reg >= 6 || reg == R_CS)
goto illegal_op;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
gen_movl_seg_T0(s, reg);
/* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
- if (s->is_jmp) {
+ if (s->base.is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
if (reg == R_SS) {
s->tf = 0;
}
break;
case 0x8c: /* mov Gv, seg */
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
if (reg >= 6)
/* s_ot is the sign+size of source */
s_ot = b & 8 ? MO_SIGN | ot : ot;
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
break;
case 0x8d: /* lea */
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
switch (s->aflag) {
#ifdef TARGET_X86_64
case MO_64:
- offset_addr = cpu_ldq_code(env, s->pc);
- s->pc += 8;
+ offset_addr = x86_ldq_code(env, s);
break;
#endif
default:
if (dflag == MO_64) {
uint64_t tmp;
/* 64 bit case */
- tmp = cpu_ldq_code(env, s->pc);
- s->pc += 8;
+ tmp = x86_ldq_code(env, s);
reg = (b & 7) | REX_B(s);
tcg_gen_movi_tl(cpu_T0, tmp);
gen_op_mov_reg_v(MO_64, reg, cpu_T0);
case 0x86:
case 0x87: /* xchg Ev, Gv */
ot = mo_b_d(b, dflag);
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (mod == 3) {
op = R_GS;
do_lxx:
ot = dflag != MO_16 ? MO_32 : MO_16;
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (mod == 3)
gen_movl_seg_T0(s, op);
/* then put the data */
gen_op_mov_reg_v(ot, reg, cpu_T1);
- if (s->is_jmp) {
+ if (s->base.is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
}
grp2:
{
ot = mo_b_d(b, dflag);
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
gen_shift(s, op, ot, opreg, OR_ECX);
} else {
if (shift == 2) {
- shift = cpu_ldub_code(env, s->pc++);
+ shift = x86_ldub_code(env, s);
}
gen_shifti(s, op, ot, opreg, shift);
}
shift = 0;
do_shiftd:
ot = dflag;
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
reg = ((modrm >> 3) & 7) | rex_r;
gen_op_mov_v_reg(ot, cpu_T1, reg);
if (shift) {
- TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
+ TCGv imm = tcg_const_tl(x86_ldub_code(env, s));
gen_shiftd_rm_T1(s, ot, opreg, op, imm);
tcg_temp_free(imm);
} else {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
rm = modrm & 7;
op = ((b & 7) << 3) | ((modrm >> 3) & 7);
gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_ins(s, ot);
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_jmp(s, s->pc - s->cs_base);
}
}
gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_outs(s, ot);
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_jmp(s, s->pc - s->cs_base);
}
}
case 0xe4:
case 0xe5:
ot = mo_b_d32(b, dflag);
- val = cpu_ldub_code(env, s->pc++);
+ val = x86_ldub_code(env, s);
tcg_gen_movi_tl(cpu_T0, val);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_movi_i32(cpu_tmp2_i32, val);
gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
gen_bpt_io(s, cpu_tmp2_i32, ot);
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
case 0xe6:
case 0xe7:
ot = mo_b_d32(b, dflag);
- val = cpu_ldub_code(env, s->pc++);
+ val = x86_ldub_code(env, s);
tcg_gen_movi_tl(cpu_T0, val);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes));
gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_movi_i32(cpu_tmp2_i32, val);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
gen_bpt_io(s, cpu_tmp2_i32, ot);
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
gen_bpt_io(s, cpu_tmp2_i32, ot);
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
svm_is_rep(prefixes));
gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
gen_bpt_io(s, cpu_tmp2_i32, ot);
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
/************************/
/* control */
case 0xc2: /* ret im */
- val = cpu_ldsw_code(env, s->pc);
- s->pc += 2;
+ val = x86_ldsw_code(env, s);
ot = gen_pop_T0(s);
gen_stack_update(s, val + (1 << ot));
/* Note that gen_pop_T0 uses a zero-extending load. */
gen_jr(s, cpu_T0);
break;
case 0xca: /* lret im */
- val = cpu_ldsw_code(env, s->pc);
- s->pc += 2;
+ val = x86_ldsw_code(env, s);
do_lret:
if (s->pe && !s->vm86) {
gen_update_cc_op(s);
break;
case 0x190 ... 0x19f: /* setcc Gv */
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
gen_setcc1(s, b, cpu_T0);
gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
break;
goto illegal_op;
}
ot = dflag;
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
gen_cmovcc1(env, s, ot, b, modrm, reg);
break;
/* bit operations */
case 0x1ba: /* bt/bts/btr/btc Gv, im */
ot = dflag;
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
op = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
gen_op_mov_v_reg(ot, cpu_T0, rm);
}
/* load shift */
- val = cpu_ldub_code(env, s->pc++);
+ val = x86_ldub_code(env, s);
tcg_gen_movi_tl(cpu_T1, val);
if (op < 4)
goto unknown_op;
op = 3;
do_btx:
ot = dflag;
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
case 0x1bc: /* bsf / tzcnt */
case 0x1bd: /* bsr / lzcnt */
ot = dflag;
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_extu(ot, cpu_T0);
case 0xd4: /* aam */
if (CODE64(s))
goto illegal_op;
- val = cpu_ldub_code(env, s->pc++);
+ val = x86_ldub_code(env, s);
if (val == 0) {
gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
} else {
case 0xd5: /* aad */
if (CODE64(s))
goto illegal_op;
- val = cpu_ldub_code(env, s->pc++);
+ val = x86_ldub_code(env, s);
gen_helper_aad(cpu_env, tcg_const_i32(val));
set_cc_op(s, CC_OP_LOGICB);
break;
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
- s->is_jmp = DISAS_TB_JUMP;
+ s->base.is_jmp = DISAS_NORETURN;
}
break;
case 0x9b: /* fwait */
gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
break;
case 0xcd: /* int N */
- val = cpu_ldub_code(env, s->pc++);
+ val = x86_ldub_code(env, s);
if (s->vm86 && s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
if (CODE64(s))
goto illegal_op;
ot = dflag;
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
if (mod == 3)
case 0x131: /* rdtsc */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_rdtsc(cpu_env);
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
- s->is_jmp = DISAS_TB_JUMP;
+ s->base.is_jmp = DISAS_NORETURN;
}
break;
case 0x100:
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
switch(op) {
break;
case 0x101:
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
switch (modrm) {
CASE_MODRM_MEM_OP(0): /* sgdt */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
tcg_const_i32(s->pc - pc_start));
tcg_gen_exit_tb(0);
- s->is_jmp = DISAS_TB_JUMP;
+ s->base.is_jmp = DISAS_NORETURN;
break;
case 0xd9: /* VMMCALL */
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_rdtscp(cpu_env);
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
/* d_ot is the size of destination */
d_ot = dflag;
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
t1 = tcg_temp_local_new();
t2 = tcg_temp_local_new();
ot = MO_16;
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
rm = modrm & 7;
if (!s->pe || s->vm86)
goto illegal_op;
ot = dflag != MO_16 ? MO_32 : MO_16;
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
t0 = tcg_temp_local_new();
}
break;
case 0x118:
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
switch(op) {
}
break;
case 0x11a:
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
if (s->flags & HF_MPX_EN_MASK) {
mod = (modrm >> 6) & 3;
reg = ((modrm >> 3) & 7) | rex_r;
gen_nop_modrm(env, s, modrm);
break;
case 0x11b:
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
if (s->flags & HF_MPX_EN_MASK) {
mod = (modrm >> 6) & 3;
reg = ((modrm >> 3) & 7) | rex_r;
gen_nop_modrm(env, s, modrm);
break;
case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
gen_nop_modrm(env, s, modrm);
break;
case 0x120: /* mov reg, crN */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
* AMD documentation (24594.pdf) and testing of
* intel 386 and 486 processors all show that the mod bits
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (b & 2) {
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_op_mov_v_reg(ot, cpu_T0, rm);
gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
cpu_T0);
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
}
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
gen_op_mov_reg_v(ot, rm, cpu_T0);
- if (s->tb->cflags & CF_USE_ICOUNT) {
+ if (s->base.tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
}
}
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
* AMD documentation (24594.pdf) and testing of
* intel 386 and 486 processors all show that the mod bits
if (!(s->cpuid_features & CPUID_SSE2))
goto illegal_op;
ot = mo_64_32(dflag);
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
gen_ldst_modrm(env, s, modrm, ot, reg, 1);
break;
case 0x1ae:
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
switch (modrm) {
CASE_MODRM_MEM_OP(0): /* fxsave */
if (!(s->cpuid_features & CPUID_FXSR)
break;
case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
- case 0xc8 ... 0xc8: /* rdgsbase (f3 0f ae /1) */
+ case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
- case 0xd8 ... 0xd8: /* wrgsbase (f3 0f ae /3) */
+ case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
if (CODE64(s)
&& (prefixes & PREFIX_REPZ)
&& !(prefixes & PREFIX_LOCK)
break;
case 0x10d: /* 3DNow! prefetch(w) */
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
goto illegal_op;
- modrm = cpu_ldub_code(env, s->pc++);
+ modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | rex_r;
if (s->prefix & PREFIX_DATA) {
"bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
};
int i;
- static bool initialized;
-
- if (initialized) {
- return;
- }
- initialized = true;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
tcg_ctx.tcg_env = cpu_env;
}
}
-/* generate intermediate code for basic block 'tb'. */
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
+static int i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu,
+ int max_insns)
{
- CPUX86State *env = cs->env_ptr;
- DisasContext dc1, *dc = &dc1;
- target_ulong pc_ptr;
- uint32_t flags;
- target_ulong pc_start;
- target_ulong cs_base;
- int num_insns;
- int max_insns;
-
- /* generate intermediate code */
- pc_start = tb->pc;
- cs_base = tb->cs_base;
- flags = tb->flags;
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ CPUX86State *env = cpu->env_ptr;
+ uint32_t flags = dc->base.tb->flags;
+ target_ulong cs_base = dc->base.tb->cs_base;
dc->pe = (flags >> HF_PE_SHIFT) & 1;
dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
dc->iopl = (flags >> IOPL_SHIFT) & 3;
dc->tf = (flags >> TF_SHIFT) & 1;
- dc->singlestep_enabled = cs->singlestep_enabled;
dc->cc_op = CC_OP_DYNAMIC;
dc->cc_op_dirty = false;
dc->cs_base = cs_base;
- dc->tb = tb;
dc->popl_esp_hack = 0;
/* select memory access functions */
dc->mem_index = 0;
dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
#endif
dc->flags = flags;
- dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
+ dc->jmp_opt = !(dc->tf || dc->base.singlestep_enabled ||
(flags & HF_INHIBIT_IRQ_MASK));
/* Do not optimize repz jumps at all in icount mode, because
rep movsS instructions are execured with different paths
record/replay modes and there will always be an
additional step for ecx=0 when icount is enabled.
*/
- dc->repz_opt = !dc->jmp_opt && !(tb->cflags & CF_USE_ICOUNT);
+ dc->repz_opt = !dc->jmp_opt && !(dc->base.tb->cflags & CF_USE_ICOUNT);
#if 0
/* check addseg logic */
if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
cpu_ptr1 = tcg_temp_new_ptr();
cpu_cc_srcT = tcg_temp_local_new();
- dc->is_jmp = DISAS_NEXT;
- pc_ptr = pc_start;
- num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
- if (max_insns == 0) {
- max_insns = CF_COUNT_MASK;
- }
- if (max_insns > TCG_MAX_INSNS) {
- max_insns = TCG_MAX_INSNS;
+ return max_insns;
+}
+
+static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
+{
+}
+
+static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
+}
+
+static bool i386_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
+ const CPUBreakpoint *bp)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ /* If RF is set, suppress an internally generated breakpoint. */
+ int flags = dc->base.tb->flags & HF_RF_MASK ? BP_GDB : BP_ANY;
+ if (bp->flags & flags) {
+ gen_debug(dc, dc->base.pc_next - dc->cs_base);
+ dc->base.is_jmp = DISAS_NORETURN;
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the generic logic setting tb->size later does the right thing. */
+ dc->base.pc_next += 1;
+ return true;
+ } else {
+ return false;
}
+}
- gen_tb_start(tb);
- for(;;) {
- tcg_gen_insn_start(pc_ptr, dc->cc_op);
- num_insns++;
-
- /* If RF is set, suppress an internally generated breakpoint. */
- if (unlikely(cpu_breakpoint_test(cs, pc_ptr,
- tb->flags & HF_RF_MASK
- ? BP_GDB : BP_ANY))) {
- gen_debug(dc, pc_ptr - dc->cs_base);
- /* The address covered by the breakpoint must be included in
- [tb->pc, tb->pc + tb->size) in order to for it to be
- properly cleared -- thus we increment the PC here so that
- the logic setting tb->size below does the right thing. */
- pc_ptr += 1;
- goto done_generating;
- }
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
- gen_io_start();
- }
+static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ target_ulong pc_next = disas_insn(dc, cpu);
- pc_ptr = disas_insn(env, dc, pc_ptr);
- /* stop translation if indicated */
- if (dc->is_jmp)
- break;
+ if (dc->tf || (dc->base.tb->flags & HF_INHIBIT_IRQ_MASK)) {
/* if single step mode, we generate only one instruction and
generate an exception */
/* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
the flag and abort the translation to give the irqs a
- change to be happen */
- if (dc->tf || dc->singlestep_enabled ||
- (flags & HF_INHIBIT_IRQ_MASK)) {
- gen_jmp_im(pc_ptr - dc->cs_base);
- gen_eob(dc);
- break;
- }
+ chance to happen */
+ dc->base.is_jmp = DISAS_TOO_MANY;
+ } else if ((dc->base.tb->cflags & CF_USE_ICOUNT)
+ && ((dc->base.pc_next & TARGET_PAGE_MASK)
+ != ((dc->base.pc_next + TARGET_MAX_INSN_SIZE - 1)
+ & TARGET_PAGE_MASK)
+ || (dc->base.pc_next & ~TARGET_PAGE_MASK) == 0)) {
/* Do not cross the boundary of the pages in icount mode,
it can cause an exception. Do it only when boundary is
crossed by the first instruction in the block.
If current instruction already crossed the bound - it's ok,
because an exception hasn't stopped this code.
*/
- if ((tb->cflags & CF_USE_ICOUNT)
- && ((pc_ptr & TARGET_PAGE_MASK)
- != ((pc_ptr + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK)
- || (pc_ptr & ~TARGET_PAGE_MASK) == 0)) {
- gen_jmp_im(pc_ptr - dc->cs_base);
- gen_eob(dc);
- break;
- }
- /* if too long translation, stop generation too */
- if (tcg_op_buf_full() ||
- (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
- num_insns >= max_insns) {
- gen_jmp_im(pc_ptr - dc->cs_base);
- gen_eob(dc);
- break;
- }
- if (singlestep) {
- gen_jmp_im(pc_ptr - dc->cs_base);
- gen_eob(dc);
- break;
- }
+ dc->base.is_jmp = DISAS_TOO_MANY;
+ } else if ((pc_next - dc->base.pc_first) >= (TARGET_PAGE_SIZE - 32)) {
+ dc->base.is_jmp = DISAS_TOO_MANY;
}
- if (tb->cflags & CF_LAST_IO)
- gen_io_end();
-done_generating:
- gen_tb_end(tb, num_insns);
-#ifdef DEBUG_DISAS
- if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
- && qemu_log_in_addr_range(pc_start)) {
- int disas_flags;
- qemu_log_lock();
- qemu_log("----------------\n");
- qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ dc->base.pc_next = pc_next;
+}
+
+static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ if (dc->base.is_jmp == DISAS_TOO_MANY) {
+ gen_jmp_im(dc->base.pc_next - dc->cs_base);
+ gen_eob(dc);
+ }
+}
+
+static void i386_tr_disas_log(const DisasContextBase *dcbase,
+ CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ int disas_flags = !dc->code32;
+
+ qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
#ifdef TARGET_X86_64
- if (dc->code64)
- disas_flags = 2;
- else
-#endif
- disas_flags = !dc->code32;
- log_target_disas(cs, pc_start, pc_ptr - pc_start, disas_flags);
- qemu_log("\n");
- qemu_log_unlock();
+ if (dc->code64) {
+ disas_flags = 2;
}
#endif
+ log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size, disas_flags);
+}
+
+static const TranslatorOps i386_tr_ops = {
+ .init_disas_context = i386_tr_init_disas_context,
+ .tb_start = i386_tr_tb_start,
+ .insn_start = i386_tr_insn_start,
+ .breakpoint_check = i386_tr_breakpoint_check,
+ .translate_insn = i386_tr_translate_insn,
+ .tb_stop = i386_tr_tb_stop,
+ .disas_log = i386_tr_disas_log,
+};
+
+/* generate intermediate code for basic block 'tb'. */
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
+{
+ DisasContext dc;
- tb->size = pc_ptr - pc_start;
- tb->icount = num_insns;
+ translator_loop(&i386_tr_ops, &dc.base, cpu, tb);
}
void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,