#include <stdio.h>
#include <string.h>
#include <inttypes.h>
-#include <signal.h>
#include "qemu/host-utils.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
+#include "trace-tcg.h"
+
+
#define PREFIX_REPZ 0x01
#define PREFIX_REPNZ 0x02
#define PREFIX_LOCK 0x04
int tf; /* TF cpu flag */
int singlestep_enabled; /* "hardware" single step enabled */
int jmp_opt; /* use direct block chaining for direct jumps */
+ int repz_opt; /* optimize jumps within repz instructions */
int mem_index; /* select memory access functions */
uint64_t flags; /* all execution flags */
struct TranslationBlock *tb;
gen_ext_tl(reg, reg, ot, true);
}
-static inline void gen_op_jnz_ecx(TCGMemOp size, int label1)
+static inline void gen_op_jnz_ecx(TCGMemOp size, TCGLabel *label1)
{
tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
gen_extu(size, cpu_tmp0);
tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
}
-static inline void gen_op_jz_ecx(TCGMemOp size, int label1)
+static inline void gen_op_jz_ecx(TCGMemOp size, TCGLabel *label1)
{
tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
gen_extu(size, cpu_tmp0);
{
switch (ot) {
case MO_8:
- gen_helper_inb(v, n);
+ gen_helper_inb(v, cpu_env, n);
break;
case MO_16:
- gen_helper_inw(v, n);
+ gen_helper_inw(v, cpu_env, n);
break;
case MO_32:
- gen_helper_inl(v, n);
+ gen_helper_inl(v, cpu_env, n);
break;
default:
tcg_abort();
{
switch (ot) {
case MO_8:
- gen_helper_outb(v, n);
+ gen_helper_outb(cpu_env, v, n);
break;
case MO_16:
- gen_helper_outw(v, n);
+ gen_helper_outw(cpu_env, v, n);
break;
case MO_32:
- gen_helper_outl(v, n);
+ gen_helper_outl(cpu_env, v, n);
break;
default:
tcg_abort();
static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
uint32_t svm_flags)
{
- int state_saved;
target_ulong next_eip;
- state_saved = 0;
if (s->pe && (s->cpl > s->iopl || s->vm86)) {
- gen_update_cc_op(s);
- gen_jmp_im(cur_eip);
- state_saved = 1;
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
switch (ot) {
case MO_8:
}
}
if(s->flags & HF_SVMI_MASK) {
- if (!state_saved) {
- gen_update_cc_op(s);
- gen_jmp_im(cur_eip);
- }
+ gen_update_cc_op(s);
+ gen_jmp_im(cur_eip);
svm_flags |= (1 << (4 + ot));
next_eip = s->pc - s->cs_base;
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
/* generate a conditional jump to label 'l1' according to jump opcode
value 'b'. In the fast case, T0 is guaranted not to be used. */
-static inline void gen_jcc1_noeob(DisasContext *s, int b, int l1)
+static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
{
CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
/* Generate a conditional jump to label 'l1' according to jump opcode
value 'b'. In the fast case, T0 is guaranted not to be used.
A translation block must end soon. */
-static inline void gen_jcc1(DisasContext *s, int b, int l1)
+static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
{
CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
/* XXX: does not work with gdbstub "ice" single step - not a
serious problem */
-static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
+static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
{
- int l1, l2;
-
- l1 = gen_new_label();
- l2 = gen_new_label();
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
gen_op_jnz_ecx(s->aflag, l1);
gen_set_label(l2);
gen_jmp_tb(s, next_eip, 1);
static inline void gen_ins(DisasContext *s, TCGMemOp ot)
{
- if (use_icount)
+ if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
+ }
gen_string_movl_A0_EDI(s);
/* Note: we must do this dummy write first to be restartable in
case of page fault. */
gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_EDI);
- if (use_icount)
+ if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
+ }
}
static inline void gen_outs(DisasContext *s, TCGMemOp ot)
{
- if (use_icount)
+ if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
+ }
gen_string_movl_A0_ESI(s);
gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_ESI);
- if (use_icount)
+ if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
+ }
}
/* same method as Valgrind : we generate jumps to current or next
static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
target_ulong cur_eip, target_ulong next_eip) \
{ \
- int l2;\
+ TCGLabel *l2; \
gen_update_cc_op(s); \
l2 = gen_jz_ecx_string(s, next_eip); \
gen_ ## op(s, ot); \
gen_op_add_reg_im(s->aflag, R_ECX, -1); \
/* a loop would cause two single step exceptions if ECX = 1 \
before rep string_insn */ \
- if (!s->jmp_opt) \
+ if (s->repz_opt) \
gen_op_jz_ecx(s->aflag, l2); \
gen_jmp(s, cur_eip); \
}
target_ulong next_eip, \
int nz) \
{ \
- int l2;\
+ TCGLabel *l2; \
gen_update_cc_op(s); \
l2 = gen_jz_ecx_string(s, next_eip); \
gen_ ## op(s, ot); \
gen_op_add_reg_im(s->aflag, R_ECX, -1); \
gen_update_cc_op(s); \
gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
- if (!s->jmp_opt) \
+ if (s->repz_opt) \
gen_op_jz_ecx(s->aflag, l2); \
gen_jmp(s, cur_eip); \
}
}
}
-static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
-{
- if (arg2 >= 0)
- tcg_gen_shli_tl(ret, arg1, arg2);
- else
- tcg_gen_shri_tl(ret, arg1, -arg2);
-}
-
static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
{
target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
static inline void gen_jcc(DisasContext *s, int b,
target_ulong val, target_ulong next_eip)
{
- int l1, l2;
+ TCGLabel *l1, *l2;
if (s->jmp_opt) {
l1 = gen_new_label();
/* move T0 to seg_reg and compute if the CPU state may change. Never
call this function with seg_reg == R_CS */
-static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
+static void gen_movl_seg_T0(DisasContext *s, int seg_reg)
{
if (s->pe && !s->vm86) {
- /* XXX: optimize by finding processor state dynamically */
- gen_update_cc_op(s);
- gen_jmp_im(cur_eip);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
/* abort translation because the addseg value may change or
static inline void gen_op_movo(int d_offset, int s_offset)
{
- tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
- tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
- tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
- tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
+ tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(XMMReg, XMM_Q(1)));
+ tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(XMMReg, XMM_Q(1)));
}
static inline void gen_op_movq(int d_offset, int s_offset)
goto illegal_op;
gen_lea_modrm(env, s, modrm);
if (b1 & 1) {
- gen_stq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+ gen_stq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].XMM_Q(0)));
} else {
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
xmm_regs[reg].XMM_L(0)));
int rex_w, rex_r;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
- tcg_gen_debug_insn_start(pc_start);
+ tcg_gen_insn_start(pc_start);
}
s->pc = pc_start;
prefixes = 0;
case 6: /* div */
switch(ot) {
case MO_8:
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_divb_AL(cpu_env, cpu_T[0]);
break;
case MO_16:
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_divw_AX(cpu_env, cpu_T[0]);
break;
default:
case MO_32:
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_divl_EAX(cpu_env, cpu_T[0]);
break;
#ifdef TARGET_X86_64
case MO_64:
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_divq_EAX(cpu_env, cpu_T[0]);
break;
#endif
case 7: /* idiv */
switch(ot) {
case MO_8:
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_idivb_AL(cpu_env, cpu_T[0]);
break;
case MO_16:
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_idivw_AX(cpu_env, cpu_T[0]);
break;
default:
case MO_32:
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
break;
#ifdef TARGET_X86_64
case MO_64:
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
break;
#endif
gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
do_lcall:
if (s->pe && !s->vm86) {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
tcg_const_i32(dflag - 1),
- tcg_const_i32(s->pc - pc_start));
+ tcg_const_tl(s->pc - s->cs_base));
} else {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
do_ljmp:
if (s->pe && !s->vm86) {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
- tcg_const_i32(s->pc - pc_start));
+ tcg_const_tl(s->pc - s->cs_base));
} else {
gen_op_movl_seg_T0_vm(R_CS);
gen_op_jmp_v(cpu_T[1]);
case 0x1b0:
case 0x1b1: /* cmpxchg Ev, Gv */
{
- int label1, label2;
+ TCGLabel *label1, *label2;
TCGv t0, t1, t2, a0;
ot = mo_b_d(b, dflag);
if (dflag == MO_64) {
if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
goto illegal_op;
- gen_jmp_im(pc_start - s->cs_base);
- gen_update_cc_op(s);
gen_lea_modrm(env, s, modrm);
gen_helper_cmpxchg16b(cpu_env, cpu_A0);
} else
{
if (!(s->cpuid_features & CPUID_CX8))
goto illegal_op;
- gen_jmp_im(pc_start - s->cs_base);
- gen_update_cc_op(s);
gen_lea_modrm(env, s, modrm);
gen_helper_cmpxchg8b(cpu_env, cpu_A0);
}
goto illegal_op;
reg = b >> 3;
ot = gen_pop_T0(s);
- gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
+ gen_movl_seg_T0(s, reg);
gen_pop_update(s, ot);
if (reg == R_SS) {
/* if reg == SS, inhibit interrupts/trace. */
case 0x1a1: /* pop fs */
case 0x1a9: /* pop gs */
ot = gen_pop_T0(s);
- gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
+ gen_movl_seg_T0(s, (b >> 3) & 7);
gen_pop_update(s, ot);
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
if (reg >= 6 || reg == R_CS)
goto illegal_op;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
- gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
+ gen_movl_seg_T0(s, reg);
if (reg == R_SS) {
/* if reg == SS, inhibit interrupts/trace */
/* If several instructions disable interrupts, only the
gen_add_A0_im(s, 1 << ot);
/* load the segment first to handle exceptions properly */
gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
- gen_movl_seg_T0(s, op, pc_start - s->cs_base);
+ gen_movl_seg_T0(s, op);
/* then put the data */
gen_op_mov_reg_v(ot, reg, cpu_T[1]);
if (s->is_jmp) {
}
break;
case 0x0c: /* fldenv mem */
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x0d: /* fldcw mem */
gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
break;
case 0x0e: /* fnstenv mem */
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x0f: /* fnstcw mem */
s->mem_index, MO_LEUW);
break;
case 0x1d: /* fldt mem */
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fldt_ST0(cpu_env, cpu_A0);
break;
case 0x1f: /* fstpt mem */
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fstt_ST0(cpu_env, cpu_A0);
gen_helper_fpop(cpu_env);
break;
case 0x2c: /* frstor mem */
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x2e: /* fnsave mem */
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
break;
case 0x2f: /* fnstsw mem */
s->mem_index, MO_LEUW);
break;
case 0x3c: /* fbld */
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fbld_ST0(cpu_env, cpu_A0);
break;
case 0x3e: /* fbstp */
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fbst_ST0(cpu_env, cpu_A0);
gen_helper_fpop(cpu_env);
break;
switch(rm) {
case 0: /* fnop */
/* check exceptions (FreeBSD FPU probe) */
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fwait(cpu_env);
break;
default:
case 0x10 ... 0x13: /* fcmovxx */
case 0x18 ... 0x1b:
{
- int op1, l1;
+ int op1;
+ TCGLabel *l1;
static const uint8_t fcmov_cc[8] = {
(JCC_B << 1),
(JCC_Z << 1),
gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_ins(s, ot);
- if (use_icount) {
+ if (s->tb->cflags & CF_USE_ICOUNT) {
gen_jmp(s, s->pc - s->cs_base);
}
}
gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_outs(s, ot);
- if (use_icount) {
+ if (s->tb->cflags & CF_USE_ICOUNT) {
gen_jmp(s, s->pc - s->cs_base);
}
}
tcg_gen_movi_tl(cpu_T[0], val);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
- if (use_icount)
+ if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
+ }
tcg_gen_movi_i32(cpu_tmp2_i32, val);
gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
- if (use_icount) {
+ if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
svm_is_rep(prefixes));
gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
- if (use_icount)
+ if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
+ }
tcg_gen_movi_i32(cpu_tmp2_i32, val);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
- if (use_icount) {
+ if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
- if (use_icount)
+ if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
+ }
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
- if (use_icount) {
+ if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
svm_is_rep(prefixes));
gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
- if (use_icount)
+ if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
+ }
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
- if (use_icount) {
+ if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
set_cc_op(s, CC_OP_EFLAGS);
}
} else {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
tcg_const_i32(s->pc - s->cs_base));
set_cc_op(s, CC_OP_EFLAGS);
(HF_MP_MASK | HF_TS_MASK)) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
} else {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fwait(cpu_env);
}
break;
gen_debug(s, pc_start - s->cs_base);
#else
/* start debug */
- tb_flush(env);
+ tb_flush(CPU(x86_env_get_cpu(env)));
qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
#endif
break;
goto illegal_op;
gen_op_mov_v_reg(ot, cpu_T[0], reg);
gen_lea_modrm(env, s, modrm);
- gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
if (ot == MO_16) {
gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
case 0xe2: /* loop */
case 0xe3: /* jecxz */
{
- int l1, l2, l3;
+ TCGLabel *l1, *l2, *l3;
tval = (int8_t)insn_get(env, s, MO_8);
next_eip = s->pc - s->cs_base;
case 0x131: /* rdtsc */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
- if (use_icount)
+ if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
+ }
gen_helper_rdtsc(cpu_env);
- if (use_icount) {
+ if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_sysenter(cpu_env);
gen_eob(s);
}
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
gen_eob(s);
}
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
/* condition codes are modified only in long mode */
if (s->lma) {
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
- gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_lldt(cpu_env, cpu_tmp2_i32);
}
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
- gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_ltr(cpu_env, cpu_tmp2_i32);
}
goto illegal_op;
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
- if (use_icount)
+ if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
+ }
gen_helper_rdtscp(cpu_env);
- if (use_icount) {
+ if (s->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
} else
#endif
{
- int label1;
+ TCGLabel *label1;
TCGv t0, t1, t2, a0;
if (!s->pe || s->vm86)
case 0x102: /* lar */
case 0x103: /* lsl */
{
- int label1;
+ TCGLabel *label1;
TCGv t0;
if (!s->pe || s->vm86)
goto illegal_op;
break;
}
gen_lea_modrm(env, s, modrm);
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
break;
case 1: /* fxrstor */
break;
}
gen_lea_modrm(env, s, modrm);
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_fxrstor(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
break;
case 2: /* ldmxcsr */
offsetof(CPUX86State, regs[i]),
reg_names[i]);
}
+
+ helper_lock_init();
}
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
CPUX86State *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
target_ulong pc_ptr;
- uint16_t *gen_opc_end;
CPUBreakpoint *bp;
int j, lj;
uint64_t flags;
/* select memory access functions */
dc->mem_index = 0;
if (flags & HF_SOFTMMU_MASK) {
- dc->mem_index = cpu_mmu_index(env);
+ dc->mem_index = cpu_mmu_index(env, false);
}
dc->cpuid_features = env->features[FEAT_1_EDX];
dc->cpuid_ext_features = env->features[FEAT_1_ECX];
|| (flags & HF_SOFTMMU_MASK)
#endif
);
+ /* Do not optimize repz jumps at all in icount mode, because
+ rep movsS instructions are execured with different paths
+ in !repz_opt and repz_opt modes. The first one was used
+ always except single step mode. And this setting
+ disables jumps optimization and control paths become
+ equivalent in run and single step modes.
+ Now there will be no jump optimization for repz in
+ record/replay modes and there will always be an
+ additional step for ecx=0 when icount is enabled.
+ */
+ dc->repz_opt = !dc->jmp_opt && !(tb->cflags & CF_USE_ICOUNT);
#if 0
/* check addseg logic */
if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
cpu_ptr1 = tcg_temp_new_ptr();
cpu_cc_srcT = tcg_temp_local_new();
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
-
dc->is_jmp = DISAS_NEXT;
pc_ptr = pc_start;
lj = -1;
if (max_insns == 0)
max_insns = CF_COUNT_MASK;
- gen_tb_start();
+ gen_tb_start(tb);
for(;;) {
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == pc_ptr &&
!((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
gen_debug(dc, pc_ptr - dc->cs_base);
- break;
+ goto done_generating;
}
}
}
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
gen_eob(dc);
break;
}
+ /* Do not cross the boundary of the pages in icount mode,
+ it can cause an exception. Do it only when boundary is
+ crossed by the first instruction in the block.
+ If current instruction already crossed the bound - it's ok,
+ because an exception hasn't stopped this code.
+ */
+ if ((tb->cflags & CF_USE_ICOUNT)
+ && ((pc_ptr & TARGET_PAGE_MASK)
+ != ((pc_ptr + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK)
+ || (pc_ptr & ~TARGET_PAGE_MASK) == 0)) {
+ gen_jmp_im(pc_ptr - dc->cs_base);
+ gen_eob(dc);
+ break;
+ }
/* if too long translation, stop generation too */
- if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
+ if (tcg_op_buf_full() ||
(pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
num_insns >= max_insns) {
gen_jmp_im(pc_ptr - dc->cs_base);
}
if (tb->cflags & CF_LAST_IO)
gen_io_end();
+done_generating:
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
+
/* we don't forget to fill the last values */
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
else
#endif
disas_flags = !dc->code32;
- log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags);
+ log_target_disas(cs, pc_start, pc_ptr - pc_start, disas_flags);
qemu_log("\n");
}
#endif