static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
static TCGv_i64 cpu_tmp1_i64;
-static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
-
#include "exec/gen-icount.h"
#ifdef TARGET_X86_64
static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
uint32_t svm_flags)
{
- int state_saved;
target_ulong next_eip;
- state_saved = 0;
if (s->pe && (s->cpl > s->iopl || s->vm86)) {
- gen_update_cc_op(s);
- gen_jmp_im(cur_eip);
- state_saved = 1;
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
switch (ot) {
case MO_8:
}
}
if(s->flags & HF_SVMI_MASK) {
- if (!state_saved) {
- gen_update_cc_op(s);
- gen_jmp_im(cur_eip);
- }
+ gen_update_cc_op(s);
+ gen_jmp_im(cur_eip);
svm_flags |= (1 << (4 + ot));
next_eip = s->pc - s->cs_base;
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
/* move T0 to seg_reg and compute if the CPU state may change. Never
call this function with seg_reg == R_CS */
-static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
+static void gen_movl_seg_T0(DisasContext *s, int seg_reg)
{
if (s->pe && !s->vm86) {
- /* XXX: optimize by finding processor state dynamically */
- gen_update_cc_op(s);
- gen_jmp_im(cur_eip);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
/* abort translation because the addseg value may change or
target_ulong next_eip, tval;
int rex_w, rex_r;
- if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
- tcg_gen_debug_insn_start(pc_start);
- }
s->pc = pc_start;
prefixes = 0;
s->override = -1;
gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
do_lcall:
if (s->pe && !s->vm86) {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
tcg_const_i32(dflag - 1),
- tcg_const_i32(s->pc - pc_start));
+ tcg_const_tl(s->pc - s->cs_base));
} else {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
do_ljmp:
if (s->pe && !s->vm86) {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
- tcg_const_i32(s->pc - pc_start));
+ tcg_const_tl(s->pc - s->cs_base));
} else {
gen_op_movl_seg_T0_vm(R_CS);
gen_op_jmp_v(cpu_T[1]);
if (dflag == MO_64) {
if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
goto illegal_op;
- gen_jmp_im(pc_start - s->cs_base);
- gen_update_cc_op(s);
gen_lea_modrm(env, s, modrm);
gen_helper_cmpxchg16b(cpu_env, cpu_A0);
} else
{
if (!(s->cpuid_features & CPUID_CX8))
goto illegal_op;
- gen_jmp_im(pc_start - s->cs_base);
- gen_update_cc_op(s);
gen_lea_modrm(env, s, modrm);
gen_helper_cmpxchg8b(cpu_env, cpu_A0);
}
goto illegal_op;
reg = b >> 3;
ot = gen_pop_T0(s);
- gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
+ gen_movl_seg_T0(s, reg);
gen_pop_update(s, ot);
if (reg == R_SS) {
/* if reg == SS, inhibit interrupts/trace. */
case 0x1a1: /* pop fs */
case 0x1a9: /* pop gs */
ot = gen_pop_T0(s);
- gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
+ gen_movl_seg_T0(s, (b >> 3) & 7);
gen_pop_update(s, ot);
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
if (reg >= 6 || reg == R_CS)
goto illegal_op;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
- gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
+ gen_movl_seg_T0(s, reg);
if (reg == R_SS) {
/* if reg == SS, inhibit interrupts/trace */
/* If several instructions disable interrupts, only the
gen_add_A0_im(s, 1 << ot);
/* load the segment first to handle exceptions properly */
gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
- gen_movl_seg_T0(s, op, pc_start - s->cs_base);
+ gen_movl_seg_T0(s, op);
/* then put the data */
gen_op_mov_reg_v(ot, reg, cpu_T[1]);
if (s->is_jmp) {
set_cc_op(s, CC_OP_EFLAGS);
}
} else {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
tcg_const_i32(s->pc - s->cs_base));
set_cc_op(s, CC_OP_EFLAGS);
goto illegal_op;
gen_op_mov_v_reg(ot, cpu_T[0], reg);
gen_lea_modrm(env, s, modrm);
- gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
if (ot == MO_16) {
gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_sysenter(cpu_env);
gen_eob(s);
}
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
gen_eob(s);
}
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
/* condition codes are modified only in long mode */
if (s->lma) {
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
- gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_lldt(cpu_env, cpu_tmp2_i32);
}
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
- gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_ltr(cpu_env, cpu_tmp2_i32);
}
}
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
- basic block 'tb'. If search_pc is TRUE, also generate PC
- information for each intermediate instruction. */
-static inline void gen_intermediate_code_internal(X86CPU *cpu,
- TranslationBlock *tb,
- bool search_pc)
+ basic block 'tb'. */
+void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
{
+ X86CPU *cpu = x86_env_get_cpu(env);
CPUState *cs = CPU(cpu);
- CPUX86State *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
target_ulong pc_ptr;
- CPUBreakpoint *bp;
- int j, lj;
uint64_t flags;
target_ulong pc_start;
target_ulong cs_base;
dc->is_jmp = DISAS_NEXT;
pc_ptr = pc_start;
- lj = -1;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
- if (max_insns == 0)
+ if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
gen_tb_start(tb);
for(;;) {
- if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
- QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
- if (bp->pc == pc_ptr &&
- !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
- gen_debug(dc, pc_ptr - dc->cs_base);
- goto done_generating;
- }
- }
- }
- if (search_pc) {
- j = tcg_op_buf_count();
- if (lj < j) {
- lj++;
- while (lj < j)
- tcg_ctx.gen_opc_instr_start[lj++] = 0;
- }
- tcg_ctx.gen_opc_pc[lj] = pc_ptr;
- gen_opc_cc_op[lj] = dc->cc_op;
- tcg_ctx.gen_opc_instr_start[lj] = 1;
- tcg_ctx.gen_opc_icount[lj] = num_insns;
+ tcg_gen_insn_start(pc_ptr, dc->cc_op);
+ num_insns++;
+
+ /* If RF is set, suppress an internally generated breakpoint. */
+ if (unlikely(cpu_breakpoint_test(cs, pc_ptr,
+ tb->flags & HF_RF_MASK
+ ? BP_GDB : BP_ANY))) {
+ gen_debug(dc, pc_ptr - dc->cs_base);
+ goto done_generating;
}
- if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
+ }
pc_ptr = disas_insn(env, dc, pc_ptr);
- num_insns++;
/* stop translation if indicated */
if (dc->is_jmp)
break;
done_generating:
gen_tb_end(tb, num_insns);
- /* we don't forget to fill the last values */
- if (search_pc) {
- j = tcg_op_buf_count();
- lj++;
- while (lj <= j)
- tcg_ctx.gen_opc_instr_start[lj++] = 0;
- }
-
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
int disas_flags;
}
#endif
- if (!search_pc) {
- tb->size = pc_ptr - pc_start;
- tb->icount = num_insns;
- }
+ tb->size = pc_ptr - pc_start;
+ tb->icount = num_insns;
}
-void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
+void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
+ target_ulong *data)
{
- gen_intermediate_code_internal(x86_env_get_cpu(env), tb, false);
-}
-
-void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
-{
- gen_intermediate_code_internal(x86_env_get_cpu(env), tb, true);
-}
-
-void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
-{
- int cc_op;
-#ifdef DEBUG_DISAS
- if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
- int i;
- qemu_log("RESTORE:\n");
- for(i = 0;i <= pc_pos; i++) {
- if (tcg_ctx.gen_opc_instr_start[i]) {
- qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
- tcg_ctx.gen_opc_pc[i]);
- }
- }
- qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
- pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
- (uint32_t)tb->cs_base);
- }
-#endif
- env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
- cc_op = gen_opc_cc_op[pc_pos];
- if (cc_op != CC_OP_DYNAMIC)
+ int cc_op = data[1];
+ env->eip = data[0] - tb->cs_base;
+ if (cc_op != CC_OP_DYNAMIC) {
env->cc_op = cc_op;
+ }
}