int cpuid_features;
int cpuid_ext_features;
int cpuid_ext2_features;
+ int cpuid_ext3_features;
} DisasContext;
static void gen_eob(DisasContext *s);
OP_SAR = 7,
};
+enum {
+ JCC_O,
+ JCC_B,
+ JCC_Z,
+ JCC_BE,
+ JCC_S,
+ JCC_P,
+ JCC_L,
+ JCC_LE,
+};
+
/* operand size */
enum {
OT_BYTE = 0,
#define NB_OP_SIZES 4
-#define DEF_REGS(prefix, suffix) \
- prefix ## EAX ## suffix,\
- prefix ## ECX ## suffix,\
- prefix ## EDX ## suffix,\
- prefix ## EBX ## suffix,\
- prefix ## ESP ## suffix,\
- prefix ## EBP ## suffix,\
- prefix ## ESI ## suffix,\
- prefix ## EDI ## suffix,\
- prefix ## R8 ## suffix,\
- prefix ## R9 ## suffix,\
- prefix ## R10 ## suffix,\
- prefix ## R11 ## suffix,\
- prefix ## R12 ## suffix,\
- prefix ## R13 ## suffix,\
- prefix ## R14 ## suffix,\
- prefix ## R15 ## suffix,
-
#else /* !TARGET_X86_64 */
#define NB_OP_SIZES 3
-#define DEF_REGS(prefix, suffix) \
- prefix ## EAX ## suffix,\
- prefix ## ECX ## suffix,\
- prefix ## EDX ## suffix,\
- prefix ## EBX ## suffix,\
- prefix ## ESP ## suffix,\
- prefix ## EBP ## suffix,\
- prefix ## ESI ## suffix,\
- prefix ## EDI ## suffix,
-
#endif /* !TARGET_X86_64 */
#if defined(WORDS_BIGENDIAN)
tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, eip));
}
-static inline void gen_op_addw_ESP_im(int32_t val)
+static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ESP]));
- tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
- tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ESP]) + REG_W_OFFSET);
+ switch(size) {
+ case 0:
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
+ tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
+ break;
+ case 1:
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
+#ifdef TARGET_X86_64
+ tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
+#endif
+ tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ break;
+#ifdef TARGET_X86_64
+ case 2:
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
+ tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ break;
+#endif
+ }
}
-static inline void gen_op_addl_ESP_im(int32_t val)
+static inline void gen_op_add_reg_T0(int size, int reg)
{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ESP]));
- tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
+ switch(size) {
+ case 0:
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
+ tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
+ break;
+ case 1:
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
#ifdef TARGET_X86_64
- tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
+ tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
#endif
- tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ESP]));
-}
-
+ tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ break;
#ifdef TARGET_X86_64
-static inline void gen_op_addq_ESP_im(int32_t val)
-{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ESP]));
- tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
- tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ESP]));
-}
+ case 2:
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
+ tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ break;
#endif
+ }
+}
static inline void gen_op_set_cc_op(int32_t val)
{
}
#endif
-static GenOpFunc *gen_op_cmov_reg_T1_T0[NB_OP_SIZES - 1][CPU_NB_REGS] = {
- [0] = {
- DEF_REGS(gen_op_cmovw_, _T1_T0)
- },
- [1] = {
- DEF_REGS(gen_op_cmovl_, _T1_T0)
- },
-#ifdef TARGET_X86_64
- [2] = {
- DEF_REGS(gen_op_cmovq_, _T1_T0)
- },
-#endif
-};
-
static inline void gen_op_lds_T0_A0(int idx)
{
int mem_index = (idx >> 2) - 1;
}
}
-static GenOpFunc *gen_op_movl_T0_Dshift[4] = {
- gen_op_movl_T0_Dshiftb,
- gen_op_movl_T0_Dshiftw,
- gen_op_movl_T0_Dshiftl,
- X86_64_ONLY(gen_op_movl_T0_Dshiftq),
+static inline void gen_op_movl_T0_Dshift(int ot)
+{
+ tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
+ tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
};
-static GenOpFunc1 *gen_op_jnz_ecx[3] = {
- gen_op_jnz_ecxw,
- gen_op_jnz_ecxl,
- X86_64_ONLY(gen_op_jnz_ecxq),
-};
+static void gen_extu(int ot, TCGv reg)
+{
+ switch(ot) {
+ case OT_BYTE:
+ tcg_gen_ext8u_tl(reg, reg);
+ break;
+ case OT_WORD:
+ tcg_gen_ext16u_tl(reg, reg);
+ break;
+ case OT_LONG:
+ tcg_gen_ext32u_tl(reg, reg);
+ break;
+ default:
+ break;
+ }
+}
-static GenOpFunc1 *gen_op_jz_ecx[3] = {
- gen_op_jz_ecxw,
- gen_op_jz_ecxl,
- X86_64_ONLY(gen_op_jz_ecxq),
-};
+static void gen_exts(int ot, TCGv reg)
+{
+ switch(ot) {
+ case OT_BYTE:
+ tcg_gen_ext8s_tl(reg, reg);
+ break;
+ case OT_WORD:
+ tcg_gen_ext16s_tl(reg, reg);
+ break;
+ case OT_LONG:
+ tcg_gen_ext32s_tl(reg, reg);
+ break;
+ default:
+ break;
+ }
+}
-static GenOpFunc *gen_op_dec_ECX[3] = {
- gen_op_decw_ECX,
- gen_op_decl_ECX,
- X86_64_ONLY(gen_op_decq_ECX),
-};
+static inline void gen_op_jnz_ecx(int size, int label1)
+{
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));
+ gen_extu(size + 1, cpu_tmp0);
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
+}
-static GenOpFunc1 *gen_op_string_jnz_sub[2][4] = {
- {
- gen_op_jnz_subb,
- gen_op_jnz_subw,
- gen_op_jnz_subl,
- X86_64_ONLY(gen_op_jnz_subq),
- },
- {
- gen_op_jz_subb,
- gen_op_jz_subw,
- gen_op_jz_subl,
- X86_64_ONLY(gen_op_jz_subq),
- },
-};
+static inline void gen_op_jz_ecx(int size, int label1)
+{
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));
+ gen_extu(size + 1, cpu_tmp0);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
+}
static void *helper_in_func[3] = {
helper_inb,
gen_op_ld_T0_A0(ot + s->mem_index);
gen_string_movl_A0_EDI(s);
gen_op_st_T0_A0(ot + s->mem_index);
- gen_op_movl_T0_Dshift[ot]();
-#ifdef TARGET_X86_64
- if (s->aflag == 2) {
- gen_op_addq_ESI_T0();
- gen_op_addq_EDI_T0();
- } else
-#endif
- if (s->aflag) {
- gen_op_addl_ESI_T0();
- gen_op_addl_EDI_T0();
- } else {
- gen_op_addw_ESI_T0();
- gen_op_addw_EDI_T0();
- }
+ gen_op_movl_T0_Dshift(ot);
+ gen_op_add_reg_T0(s->aflag, R_ESI);
+ gen_op_add_reg_T0(s->aflag, R_EDI);
}
static inline void gen_update_cc_op(DisasContext *s)
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
}
+/* compute eflags.C to reg */
+static void gen_compute_eflags_c(TCGv reg)
+{
+#if TCG_TARGET_REG_BITS == 32
+ tcg_gen_shli_i32(cpu_tmp2_i32, cpu_cc_op, 3);
+ tcg_gen_addi_i32(cpu_tmp2_i32, cpu_tmp2_i32,
+ (long)cc_table + offsetof(CCTable, compute_c));
+ tcg_gen_ld_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0);
+ tcg_gen_call(&tcg_ctx, cpu_tmp2_i32, TCG_CALL_PURE,
+ 1, &cpu_tmp2_i32, 0, NULL);
+#else
+ tcg_gen_extu_i32_tl(cpu_tmp1_i64, cpu_cc_op);
+ tcg_gen_shli_i64(cpu_tmp1_i64, cpu_tmp1_i64, 4);
+ tcg_gen_addi_i64(cpu_tmp1_i64, cpu_tmp1_i64,
+ (long)cc_table + offsetof(CCTable, compute_c));
+ tcg_gen_ld_i64(cpu_tmp1_i64, cpu_tmp1_i64, 0);
+ tcg_gen_call(&tcg_ctx, cpu_tmp1_i64, TCG_CALL_PURE,
+ 1, &cpu_tmp2_i32, 0, NULL);
+#endif
+ tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
+}
+
+/* compute all eflags to cc_src */
+static void gen_compute_eflags(TCGv reg)
+{
+#if TCG_TARGET_REG_BITS == 32
+ tcg_gen_shli_i32(cpu_tmp2_i32, cpu_cc_op, 3);
+ tcg_gen_addi_i32(cpu_tmp2_i32, cpu_tmp2_i32,
+ (long)cc_table + offsetof(CCTable, compute_all));
+ tcg_gen_ld_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0);
+ tcg_gen_call(&tcg_ctx, cpu_tmp2_i32, TCG_CALL_PURE,
+ 1, &cpu_tmp2_i32, 0, NULL);
+#else
+ tcg_gen_extu_i32_tl(cpu_tmp1_i64, cpu_cc_op);
+ tcg_gen_shli_i64(cpu_tmp1_i64, cpu_tmp1_i64, 4);
+ tcg_gen_addi_i64(cpu_tmp1_i64, cpu_tmp1_i64,
+ (long)cc_table + offsetof(CCTable, compute_all));
+ tcg_gen_ld_i64(cpu_tmp1_i64, cpu_tmp1_i64, 0);
+ tcg_gen_call(&tcg_ctx, cpu_tmp1_i64, TCG_CALL_PURE,
+ 1, &cpu_tmp2_i32, 0, NULL);
+#endif
+ tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
+}
+
+static inline void gen_setcc_slow_T0(int op)
+{
+ switch(op) {
+ case JCC_O:
+ gen_compute_eflags(cpu_T[0]);
+ tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 11);
+ tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
+ break;
+ case JCC_B:
+ gen_compute_eflags_c(cpu_T[0]);
+ break;
+ case JCC_Z:
+ gen_compute_eflags(cpu_T[0]);
+ tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 6);
+ tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
+ break;
+ case JCC_BE:
+ gen_compute_eflags(cpu_tmp0);
+ tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 6);
+ tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
+ tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
+ break;
+ case JCC_S:
+ gen_compute_eflags(cpu_T[0]);
+ tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 7);
+ tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
+ break;
+ case JCC_P:
+ gen_compute_eflags(cpu_T[0]);
+ tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 2);
+ tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
+ break;
+ case JCC_L:
+ gen_compute_eflags(cpu_tmp0);
+ tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
+ tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 7); /* CC_S */
+ tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
+ tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
+ break;
+ default:
+ case JCC_LE:
+ gen_compute_eflags(cpu_tmp0);
+ tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
+ tcg_gen_shri_tl(cpu_tmp4, cpu_tmp0, 7); /* CC_S */
+ tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 6); /* CC_Z */
+ tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
+ tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
+ tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
+ break;
+ }
+}
+
+/* return true if setcc_slow is not needed (WARNING: must be kept in
+ sync with gen_jcc1) */
+static int is_fast_jcc_case(DisasContext *s, int b)
+{
+ int jcc_op;
+ jcc_op = (b >> 1) & 7;
+ switch(s->cc_op) {
+ /* we optimize the cmp/jcc case */
+ case CC_OP_SUBB:
+ case CC_OP_SUBW:
+ case CC_OP_SUBL:
+ case CC_OP_SUBQ:
+ if (jcc_op == JCC_O || jcc_op == JCC_P)
+ goto slow_jcc;
+ break;
+
+ /* some jumps are easy to compute */
+ case CC_OP_ADDB:
+ case CC_OP_ADDW:
+ case CC_OP_ADDL:
+ case CC_OP_ADDQ:
+
+ case CC_OP_LOGICB:
+ case CC_OP_LOGICW:
+ case CC_OP_LOGICL:
+ case CC_OP_LOGICQ:
+
+ case CC_OP_INCB:
+ case CC_OP_INCW:
+ case CC_OP_INCL:
+ case CC_OP_INCQ:
+
+ case CC_OP_DECB:
+ case CC_OP_DECW:
+ case CC_OP_DECL:
+ case CC_OP_DECQ:
+
+ case CC_OP_SHLB:
+ case CC_OP_SHLW:
+ case CC_OP_SHLL:
+ case CC_OP_SHLQ:
+ if (jcc_op != JCC_Z && jcc_op != JCC_S)
+ goto slow_jcc;
+ break;
+ default:
+ slow_jcc:
+ return 0;
+ }
+ return 1;
+}
+
+/* generate a conditional jump to label 'l1' according to jump opcode
+ value 'b'. In the fast case, T0 is guaranted not to be used. */
+static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
+{
+ int inv, jcc_op, size, cond;
+ TCGv t0;
+
+ inv = b & 1;
+ jcc_op = (b >> 1) & 7;
+
+ switch(cc_op) {
+ /* we optimize the cmp/jcc case */
+ case CC_OP_SUBB:
+ case CC_OP_SUBW:
+ case CC_OP_SUBL:
+ case CC_OP_SUBQ:
+
+ size = cc_op - CC_OP_SUBB;
+ switch(jcc_op) {
+ case JCC_Z:
+ fast_jcc_z:
+ switch(size) {
+ case 0:
+ tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xff);
+ t0 = cpu_tmp0;
+ break;
+ case 1:
+ tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffff);
+ t0 = cpu_tmp0;
+ break;
+#ifdef TARGET_X86_64
+ case 2:
+ tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffffffff);
+ t0 = cpu_tmp0;
+ break;
+#endif
+ default:
+ t0 = cpu_cc_dst;
+ break;
+ }
+ tcg_gen_brcondi_tl(inv ? TCG_COND_NE : TCG_COND_EQ, t0, 0, l1);
+ break;
+ case JCC_S:
+ fast_jcc_s:
+ switch(size) {
+ case 0:
+ tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80);
+ tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
+ 0, l1);
+ break;
+ case 1:
+ tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000);
+ tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
+ 0, l1);
+ break;
+#ifdef TARGET_X86_64
+ case 2:
+ tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000);
+ tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
+ 0, l1);
+ break;
+#endif
+ default:
+ tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
+ 0, l1);
+ break;
+ }
+ break;
+
+ case JCC_B:
+ cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
+ goto fast_jcc_b;
+ case JCC_BE:
+ cond = inv ? TCG_COND_GTU : TCG_COND_LEU;
+ fast_jcc_b:
+ tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
+ switch(size) {
+ case 0:
+ t0 = cpu_tmp0;
+ tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xff);
+ tcg_gen_andi_tl(t0, cpu_cc_src, 0xff);
+ break;
+ case 1:
+ t0 = cpu_tmp0;
+ tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffff);
+ tcg_gen_andi_tl(t0, cpu_cc_src, 0xffff);
+ break;
+#ifdef TARGET_X86_64
+ case 2:
+ t0 = cpu_tmp0;
+ tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffffffff);
+ tcg_gen_andi_tl(t0, cpu_cc_src, 0xffffffff);
+ break;
+#endif
+ default:
+ t0 = cpu_cc_src;
+ break;
+ }
+ tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
+ break;
+
+ case JCC_L:
+ cond = inv ? TCG_COND_GE : TCG_COND_LT;
+ goto fast_jcc_l;
+ case JCC_LE:
+ cond = inv ? TCG_COND_GT : TCG_COND_LE;
+ fast_jcc_l:
+ tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
+ switch(size) {
+ case 0:
+ t0 = cpu_tmp0;
+ tcg_gen_ext8s_tl(cpu_tmp4, cpu_tmp4);
+ tcg_gen_ext8s_tl(t0, cpu_cc_src);
+ break;
+ case 1:
+ t0 = cpu_tmp0;
+ tcg_gen_ext16s_tl(cpu_tmp4, cpu_tmp4);
+ tcg_gen_ext16s_tl(t0, cpu_cc_src);
+ break;
+#ifdef TARGET_X86_64
+ case 2:
+ t0 = cpu_tmp0;
+ tcg_gen_ext32s_tl(cpu_tmp4, cpu_tmp4);
+ tcg_gen_ext32s_tl(t0, cpu_cc_src);
+ break;
+#endif
+ default:
+ t0 = cpu_cc_src;
+ break;
+ }
+ tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
+ break;
+
+ default:
+ goto slow_jcc;
+ }
+ break;
+
+ /* some jumps are easy to compute */
+ case CC_OP_ADDB:
+ case CC_OP_ADDW:
+ case CC_OP_ADDL:
+ case CC_OP_ADDQ:
+
+ case CC_OP_ADCB:
+ case CC_OP_ADCW:
+ case CC_OP_ADCL:
+ case CC_OP_ADCQ:
+
+ case CC_OP_SBBB:
+ case CC_OP_SBBW:
+ case CC_OP_SBBL:
+ case CC_OP_SBBQ:
+
+ case CC_OP_LOGICB:
+ case CC_OP_LOGICW:
+ case CC_OP_LOGICL:
+ case CC_OP_LOGICQ:
+
+ case CC_OP_INCB:
+ case CC_OP_INCW:
+ case CC_OP_INCL:
+ case CC_OP_INCQ:
+
+ case CC_OP_DECB:
+ case CC_OP_DECW:
+ case CC_OP_DECL:
+ case CC_OP_DECQ:
+
+ case CC_OP_SHLB:
+ case CC_OP_SHLW:
+ case CC_OP_SHLL:
+ case CC_OP_SHLQ:
+
+ case CC_OP_SARB:
+ case CC_OP_SARW:
+ case CC_OP_SARL:
+ case CC_OP_SARQ:
+ switch(jcc_op) {
+ case JCC_Z:
+ size = (cc_op - CC_OP_ADDB) & 3;
+ goto fast_jcc_z;
+ case JCC_S:
+ size = (cc_op - CC_OP_ADDB) & 3;
+ goto fast_jcc_s;
+ default:
+ goto slow_jcc;
+ }
+ break;
+ default:
+ slow_jcc:
+ gen_setcc_slow_T0(jcc_op);
+ tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
+ cpu_T[0], 0, l1);
+ break;
+ }
+}
+
/* XXX: does not work with gdbstub "ice" single step - not a
serious problem */
static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
l1 = gen_new_label();
l2 = gen_new_label();
- gen_op_jnz_ecx[s->aflag](l1);
+ gen_op_jnz_ecx(s->aflag, l1);
gen_set_label(l2);
gen_jmp_tb(s, next_eip, 1);
gen_set_label(l1);
gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
gen_string_movl_A0_EDI(s);
gen_op_st_T0_A0(ot + s->mem_index);
- gen_op_movl_T0_Dshift[ot]();
-#ifdef TARGET_X86_64
- if (s->aflag == 2) {
- gen_op_addq_EDI_T0();
- } else
-#endif
- if (s->aflag) {
- gen_op_addl_EDI_T0();
- } else {
- gen_op_addw_EDI_T0();
- }
+ gen_op_movl_T0_Dshift(ot);
+ gen_op_add_reg_T0(s->aflag, R_EDI);
}
static inline void gen_lods(DisasContext *s, int ot)
gen_string_movl_A0_ESI(s);
gen_op_ld_T0_A0(ot + s->mem_index);
gen_op_mov_reg_T0(ot, R_EAX);
- gen_op_movl_T0_Dshift[ot]();
-#ifdef TARGET_X86_64
- if (s->aflag == 2) {
- gen_op_addq_ESI_T0();
- } else
-#endif
- if (s->aflag) {
- gen_op_addl_ESI_T0();
- } else {
- gen_op_addw_ESI_T0();
- }
+ gen_op_movl_T0_Dshift(ot);
+ gen_op_add_reg_T0(s->aflag, R_ESI);
}
static inline void gen_scas(DisasContext *s, int ot)
gen_string_movl_A0_EDI(s);
gen_op_ld_T1_A0(ot + s->mem_index);
gen_op_cmpl_T0_T1_cc();
- gen_op_movl_T0_Dshift[ot]();
-#ifdef TARGET_X86_64
- if (s->aflag == 2) {
- gen_op_addq_EDI_T0();
- } else
-#endif
- if (s->aflag) {
- gen_op_addl_EDI_T0();
- } else {
- gen_op_addw_EDI_T0();
- }
+ gen_op_movl_T0_Dshift(ot);
+ gen_op_add_reg_T0(s->aflag, R_EDI);
}
static inline void gen_cmps(DisasContext *s, int ot)
gen_string_movl_A0_EDI(s);
gen_op_ld_T1_A0(ot + s->mem_index);
gen_op_cmpl_T0_T1_cc();
- gen_op_movl_T0_Dshift[ot]();
-#ifdef TARGET_X86_64
- if (s->aflag == 2) {
- gen_op_addq_ESI_T0();
- gen_op_addq_EDI_T0();
- } else
-#endif
- if (s->aflag) {
- gen_op_addl_ESI_T0();
- gen_op_addl_EDI_T0();
- } else {
- gen_op_addw_ESI_T0();
- gen_op_addw_EDI_T0();
- }
+ gen_op_movl_T0_Dshift(ot);
+ gen_op_add_reg_T0(s->aflag, R_ESI);
+ gen_op_add_reg_T0(s->aflag, R_EDI);
}
static inline void gen_ins(DisasContext *s, int ot)
{
gen_string_movl_A0_EDI(s);
+ /* Note: we must do this dummy write first to be restartable in
+ case of page fault. */
gen_op_movl_T0_0();
gen_op_st_T0_A0(ot + s->mem_index);
gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[0], cpu_tmp2_i32);
gen_op_st_T0_A0(ot + s->mem_index);
- gen_op_movl_T0_Dshift[ot]();
-#ifdef TARGET_X86_64
- if (s->aflag == 2) {
- gen_op_addq_EDI_T0();
- } else
-#endif
- if (s->aflag) {
- gen_op_addl_EDI_T0();
- } else {
- gen_op_addw_EDI_T0();
- }
+ gen_op_movl_T0_Dshift(ot);
+ gen_op_add_reg_T0(s->aflag, R_EDI);
}
static inline void gen_outs(DisasContext *s, int ot)
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
tcg_gen_helper_0_2(helper_out_func[ot], cpu_tmp2_i32, cpu_tmp3_i32);
- gen_op_movl_T0_Dshift[ot]();
-#ifdef TARGET_X86_64
- if (s->aflag == 2) {
- gen_op_addq_ESI_T0();
- } else
-#endif
- if (s->aflag) {
- gen_op_addl_ESI_T0();
- } else {
- gen_op_addw_ESI_T0();
- }
+ gen_op_movl_T0_Dshift(ot);
+ gen_op_add_reg_T0(s->aflag, R_ESI);
}
/* same method as Valgrind : we generate jumps to current or next
gen_update_cc_op(s); \
l2 = gen_jz_ecx_string(s, next_eip); \
gen_ ## op(s, ot); \
- gen_op_dec_ECX[s->aflag](); \
+ gen_op_add_reg_im(s->aflag, R_ECX, -1); \
/* a loop would cause two single step exceptions if ECX = 1 \
before rep string_insn */ \
if (!s->jmp_opt) \
- gen_op_jz_ecx[s->aflag](l2); \
+ gen_op_jz_ecx(s->aflag, l2); \
gen_jmp(s, cur_eip); \
}
gen_update_cc_op(s); \
l2 = gen_jz_ecx_string(s, next_eip); \
gen_ ## op(s, ot); \
- gen_op_dec_ECX[s->aflag](); \
+ gen_op_add_reg_im(s->aflag, R_ECX, -1); \
gen_op_set_cc_op(CC_OP_SUBB + ot); \
- gen_op_string_jnz_sub[nz][ot](l2);\
+ gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2); \
if (!s->jmp_opt) \
- gen_op_jz_ecx[s->aflag](l2); \
+ gen_op_jz_ecx(s->aflag, l2); \
gen_jmp(s, cur_eip); \
}
GEN_REPZ2(scas)
GEN_REPZ2(cmps)
-enum {
- JCC_O,
- JCC_B,
- JCC_Z,
- JCC_BE,
- JCC_S,
- JCC_P,
- JCC_L,
- JCC_LE,
-};
-
-static GenOpFunc1 *gen_jcc_sub[4][8] = {
- [OT_BYTE] = {
- NULL,
- gen_op_jb_subb,
- gen_op_jz_subb,
- gen_op_jbe_subb,
- gen_op_js_subb,
- NULL,
- gen_op_jl_subb,
- gen_op_jle_subb,
- },
- [OT_WORD] = {
- NULL,
- gen_op_jb_subw,
- gen_op_jz_subw,
- gen_op_jbe_subw,
- gen_op_js_subw,
- NULL,
- gen_op_jl_subw,
- gen_op_jle_subw,
- },
- [OT_LONG] = {
- NULL,
- gen_op_jb_subl,
- gen_op_jz_subl,
- gen_op_jbe_subl,
- gen_op_js_subl,
- NULL,
- gen_op_jl_subl,
- gen_op_jle_subl,
- },
-#ifdef TARGET_X86_64
- [OT_QUAD] = {
- NULL,
- BUGGY_64(gen_op_jb_subq),
- gen_op_jz_subq,
- BUGGY_64(gen_op_jbe_subq),
- gen_op_js_subq,
- NULL,
- BUGGY_64(gen_op_jl_subq),
- BUGGY_64(gen_op_jle_subq),
- },
-#endif
-};
-static GenOpFunc1 *gen_op_loop[3][4] = {
- [0] = {
- gen_op_loopnzw,
- gen_op_loopzw,
- gen_op_jnz_ecxw,
- },
- [1] = {
- gen_op_loopnzl,
- gen_op_loopzl,
- gen_op_jnz_ecxl,
- },
-#ifdef TARGET_X86_64
- [2] = {
- gen_op_loopnzq,
- gen_op_loopzq,
- gen_op_jnz_ecxq,
- },
-#endif
-};
-
-static GenOpFunc *gen_setcc_slow[8] = {
- gen_op_seto_T0_cc,
- gen_op_setb_T0_cc,
- gen_op_setz_T0_cc,
- gen_op_setbe_T0_cc,
- gen_op_sets_T0_cc,
- gen_op_setp_T0_cc,
- gen_op_setl_T0_cc,
- gen_op_setle_T0_cc,
-};
-
-static GenOpFunc *gen_setcc_sub[4][8] = {
- [OT_BYTE] = {
- NULL,
- gen_op_setb_T0_subb,
- gen_op_setz_T0_subb,
- gen_op_setbe_T0_subb,
- gen_op_sets_T0_subb,
- NULL,
- gen_op_setl_T0_subb,
- gen_op_setle_T0_subb,
- },
- [OT_WORD] = {
- NULL,
- gen_op_setb_T0_subw,
- gen_op_setz_T0_subw,
- gen_op_setbe_T0_subw,
- gen_op_sets_T0_subw,
- NULL,
- gen_op_setl_T0_subw,
- gen_op_setle_T0_subw,
- },
- [OT_LONG] = {
- NULL,
- gen_op_setb_T0_subl,
- gen_op_setz_T0_subl,
- gen_op_setbe_T0_subl,
- gen_op_sets_T0_subl,
- NULL,
- gen_op_setl_T0_subl,
- gen_op_setle_T0_subl,
- },
-#ifdef TARGET_X86_64
- [OT_QUAD] = {
- NULL,
- gen_op_setb_T0_subq,
- gen_op_setz_T0_subq,
- gen_op_setbe_T0_subq,
- gen_op_sets_T0_subq,
- NULL,
- gen_op_setl_T0_subq,
- gen_op_setle_T0_subq,
- },
-#endif
-};
-
static void *helper_fp_arith_ST0_FT0[8] = {
helper_fadd_ST0_FT0,
helper_fmul_ST0_FT0,
helper_fdiv_STN_ST0,
};
-/* compute eflags.C to reg */
-static void gen_compute_eflags_c(TCGv reg)
-{
-#if TCG_TARGET_REG_BITS == 32
- tcg_gen_shli_i32(cpu_tmp2_i32, cpu_cc_op, 3);
- tcg_gen_addi_i32(cpu_tmp2_i32, cpu_tmp2_i32,
- (long)cc_table + offsetof(CCTable, compute_c));
- tcg_gen_ld_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0);
- tcg_gen_call(&tcg_ctx, cpu_tmp2_i32, TCG_CALL_PURE,
- 1, &cpu_tmp2_i32, 0, NULL);
-#else
- tcg_gen_extu_i32_tl(cpu_tmp1_i64, cpu_cc_op);
- tcg_gen_shli_i64(cpu_tmp1_i64, cpu_tmp1_i64, 4);
- tcg_gen_addi_i64(cpu_tmp1_i64, cpu_tmp1_i64,
- (long)cc_table + offsetof(CCTable, compute_c));
- tcg_gen_ld_i64(cpu_tmp1_i64, cpu_tmp1_i64, 0);
- tcg_gen_call(&tcg_ctx, cpu_tmp1_i64, TCG_CALL_PURE,
- 1, &cpu_tmp2_i32, 0, NULL);
-#endif
- tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
-}
-
-/* compute all eflags to cc_src */
-static void gen_compute_eflags(TCGv reg)
-{
-#if TCG_TARGET_REG_BITS == 32
- tcg_gen_shli_i32(cpu_tmp2_i32, cpu_cc_op, 3);
- tcg_gen_addi_i32(cpu_tmp2_i32, cpu_tmp2_i32,
- (long)cc_table + offsetof(CCTable, compute_all));
- tcg_gen_ld_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0);
- tcg_gen_call(&tcg_ctx, cpu_tmp2_i32, TCG_CALL_PURE,
- 1, &cpu_tmp2_i32, 0, NULL);
-#else
- tcg_gen_extu_i32_tl(cpu_tmp1_i64, cpu_cc_op);
- tcg_gen_shli_i64(cpu_tmp1_i64, cpu_tmp1_i64, 4);
- tcg_gen_addi_i64(cpu_tmp1_i64, cpu_tmp1_i64,
- (long)cc_table + offsetof(CCTable, compute_all));
- tcg_gen_ld_i64(cpu_tmp1_i64, cpu_tmp1_i64, 0);
- tcg_gen_call(&tcg_ctx, cpu_tmp1_i64, TCG_CALL_PURE,
- 1, &cpu_tmp2_i32, 0, NULL);
-#endif
- tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
-}
-
/* if d == OR_TMP0, it means memory operand (address in A0) */
static void gen_op(DisasContext *s1, int op, int ot, int d)
{
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
}
-static void gen_extu(int ot, TCGv reg)
-{
- switch(ot) {
- case OT_BYTE:
- tcg_gen_ext8u_tl(reg, reg);
- break;
- case OT_WORD:
- tcg_gen_ext16u_tl(reg, reg);
- break;
- case OT_LONG:
- tcg_gen_ext32u_tl(reg, reg);
- break;
- default:
- break;
- }
-}
-
-static void gen_exts(int ot, TCGv reg)
-{
- switch(ot) {
- case OT_BYTE:
- tcg_gen_ext8s_tl(reg, reg);
- break;
- case OT_WORD:
- tcg_gen_ext16s_tl(reg, reg);
- break;
- case OT_LONG:
- tcg_gen_ext32s_tl(reg, reg);
- break;
- default:
- break;
- }
-}
-
-/* XXX: add faster immediate case */
static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
int is_right, int is_arith)
{
gen_op_set_cc_op(s->cc_op);
shift_label = gen_new_label();
- tcg_gen_brcond_tl(TCG_COND_EQ, cpu_T[1], tcg_const_tl(0), shift_label);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, shift_label);
tcg_gen_mov_tl(cpu_cc_src, cpu_T3);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
if (is_right)
tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
else
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
+
+ gen_set_label(shift_label);
+ s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
+}
+
+static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
+ int is_right, int is_arith)
+{
+ int mask;
+
+ if (ot == OT_QUAD)
+ mask = 0x3f;
+ else
+ mask = 0x1f;
+
+ /* load */
+ if (op1 == OR_TMP0)
+ gen_op_ld_T0_A0(ot + s->mem_index);
+ else
+ gen_op_mov_TN_reg(ot, 0, op1);
+
+ op2 &= mask;
+ if (op2 != 0) {
+ if (is_right) {
+ if (is_arith) {
+ gen_exts(ot, cpu_T[0]);
+ tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], op2 - 1);
+ tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
+ } else {
+ gen_extu(ot, cpu_T[0]);
+ tcg_gen_shri_tl(cpu_tmp0, cpu_T[0], op2 - 1);
+ tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
+ }
+ } else {
+ tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], op2 - 1);
+ tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
+ }
+ }
+
+ /* store */
+ if (op1 == OR_TMP0)
+ gen_op_st_T0_A0(ot + s->mem_index);
+ else
+ gen_op_mov_reg_T0(ot, op1);
- gen_set_label(shift_label);
- s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
+ /* update eflags if non zero shift */
+ if (op2 != 0) {
+ tcg_gen_mov_tl(cpu_cc_src, cpu_tmp0);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ if (is_right)
+ s->cc_op = CC_OP_SARB + ot;
+ else
+ s->cc_op = CC_OP_SHLB + ot;
+ }
}
static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
/* Must test zero case to avoid using undefined behaviour in TCG
shifts. */
label1 = gen_new_label();
- tcg_gen_brcond_tl(TCG_COND_EQ, cpu_T[1], tcg_const_tl(0), label1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, label1);
if (ot <= OT_WORD)
tcg_gen_andi_tl(cpu_tmp0, cpu_T[1], (1 << (3 + ot)) - 1);
gen_op_set_cc_op(s->cc_op);
label2 = gen_new_label();
- tcg_gen_brcond_tl(TCG_COND_EQ, cpu_T[1], tcg_const_tl(0), label2);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, label2);
gen_compute_eflags(cpu_cc_src);
tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
/* update eflags */
label1 = gen_new_label();
- tcg_gen_brcond_tl(TCG_COND_EQ, cpu_T3, tcg_const_tl(-1), label1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T3, -1, label1);
tcg_gen_mov_tl(cpu_cc_src, cpu_T3);
tcg_gen_discard_tl(cpu_cc_dst);
/* Must test zero case to avoid using undefined behaviour in TCG
shifts. */
label1 = gen_new_label();
- tcg_gen_brcond_tl(TCG_COND_EQ, cpu_T3, tcg_const_tl(0), label1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T3, 0, label1);
tcg_gen_addi_tl(cpu_tmp5, cpu_T3, -1);
if (ot == OT_WORD) {
gen_op_set_cc_op(s->cc_op);
label2 = gen_new_label();
- tcg_gen_brcond_tl(TCG_COND_EQ, cpu_T3, tcg_const_tl(0), label2);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T3, 0, label2);
tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
{
- /* currently not optimized */
- gen_op_movl_T1_im(c);
- gen_shift(s1, op, ot, d, OR_TMP1);
+ switch(op) {
+ case OP_SHL:
+ case OP_SHL1:
+ gen_shift_rm_im(s1, ot, d, c, 0, 0);
+ break;
+ case OP_SHR:
+ gen_shift_rm_im(s1, ot, d, c, 1, 0);
+ break;
+ case OP_SAR:
+ gen_shift_rm_im(s1, ot, d, c, 1, 1);
+ break;
+ default:
+ /* currently not optimized */
+ gen_op_movl_T1_im(c);
+ gen_shift(s1, op, ot, d, OR_TMP1);
+ break;
+ }
}
static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr)
static inline void gen_jcc(DisasContext *s, int b,
target_ulong val, target_ulong next_eip)
{
- TranslationBlock *tb;
- int inv, jcc_op;
- GenOpFunc1 *func;
- target_ulong tmp;
- int l1, l2;
-
- inv = b & 1;
- jcc_op = (b >> 1) & 7;
+ int l1, l2, cc_op;
+ cc_op = s->cc_op;
+ if (s->cc_op != CC_OP_DYNAMIC) {
+ gen_op_set_cc_op(s->cc_op);
+ s->cc_op = CC_OP_DYNAMIC;
+ }
if (s->jmp_opt) {
- switch(s->cc_op) {
- /* we optimize the cmp/jcc case */
- case CC_OP_SUBB:
- case CC_OP_SUBW:
- case CC_OP_SUBL:
- case CC_OP_SUBQ:
- func = gen_jcc_sub[s->cc_op - CC_OP_SUBB][jcc_op];
- break;
-
- /* some jumps are easy to compute */
- case CC_OP_ADDB:
- case CC_OP_ADDW:
- case CC_OP_ADDL:
- case CC_OP_ADDQ:
-
- case CC_OP_ADCB:
- case CC_OP_ADCW:
- case CC_OP_ADCL:
- case CC_OP_ADCQ:
-
- case CC_OP_SBBB:
- case CC_OP_SBBW:
- case CC_OP_SBBL:
- case CC_OP_SBBQ:
-
- case CC_OP_LOGICB:
- case CC_OP_LOGICW:
- case CC_OP_LOGICL:
- case CC_OP_LOGICQ:
-
- case CC_OP_INCB:
- case CC_OP_INCW:
- case CC_OP_INCL:
- case CC_OP_INCQ:
-
- case CC_OP_DECB:
- case CC_OP_DECW:
- case CC_OP_DECL:
- case CC_OP_DECQ:
-
- case CC_OP_SHLB:
- case CC_OP_SHLW:
- case CC_OP_SHLL:
- case CC_OP_SHLQ:
-
- case CC_OP_SARB:
- case CC_OP_SARW:
- case CC_OP_SARL:
- case CC_OP_SARQ:
- switch(jcc_op) {
- case JCC_Z:
- func = gen_jcc_sub[(s->cc_op - CC_OP_ADDB) % 4][jcc_op];
- break;
- case JCC_S:
- func = gen_jcc_sub[(s->cc_op - CC_OP_ADDB) % 4][jcc_op];
- break;
- default:
- func = NULL;
- break;
- }
- break;
- default:
- func = NULL;
- break;
- }
-
- if (s->cc_op != CC_OP_DYNAMIC) {
- gen_op_set_cc_op(s->cc_op);
- s->cc_op = CC_OP_DYNAMIC;
- }
-
- if (!func) {
- gen_setcc_slow[jcc_op]();
- func = gen_op_jnz_T0_label;
- }
-
- if (inv) {
- tmp = val;
- val = next_eip;
- next_eip = tmp;
- }
- tb = s->tb;
-
l1 = gen_new_label();
- func(l1);
-
+ gen_jcc1(s, cc_op, b, l1);
+
gen_goto_tb(s, 0, next_eip);
gen_set_label(l1);
gen_goto_tb(s, 1, val);
-
s->is_jmp = 3;
} else {
- if (s->cc_op != CC_OP_DYNAMIC) {
- gen_op_set_cc_op(s->cc_op);
- s->cc_op = CC_OP_DYNAMIC;
- }
- gen_setcc_slow[jcc_op]();
- if (inv) {
- tmp = val;
- val = next_eip;
- next_eip = tmp;
- }
l1 = gen_new_label();
l2 = gen_new_label();
- gen_op_jnz_T0_label(l1);
+ gen_jcc1(s, cc_op, b, l1);
+
gen_jmp_im(next_eip);
- gen_op_jmp_label(l2);
+ tcg_gen_br(l2);
+
gen_set_label(l1);
gen_jmp_im(val);
gen_set_label(l2);
static void gen_setcc(DisasContext *s, int b)
{
- int inv, jcc_op;
- GenOpFunc *func;
-
- inv = b & 1;
- jcc_op = (b >> 1) & 7;
- switch(s->cc_op) {
- /* we optimize the cmp/jcc case */
- case CC_OP_SUBB:
- case CC_OP_SUBW:
- case CC_OP_SUBL:
- case CC_OP_SUBQ:
- func = gen_setcc_sub[s->cc_op - CC_OP_SUBB][jcc_op];
- if (!func)
- goto slow_jcc;
- break;
-
- /* some jumps are easy to compute */
- case CC_OP_ADDB:
- case CC_OP_ADDW:
- case CC_OP_ADDL:
- case CC_OP_ADDQ:
-
- case CC_OP_LOGICB:
- case CC_OP_LOGICW:
- case CC_OP_LOGICL:
- case CC_OP_LOGICQ:
-
- case CC_OP_INCB:
- case CC_OP_INCW:
- case CC_OP_INCL:
- case CC_OP_INCQ:
-
- case CC_OP_DECB:
- case CC_OP_DECW:
- case CC_OP_DECL:
- case CC_OP_DECQ:
+ int inv, jcc_op, l1;
- case CC_OP_SHLB:
- case CC_OP_SHLW:
- case CC_OP_SHLL:
- case CC_OP_SHLQ:
- switch(jcc_op) {
- case JCC_Z:
- func = gen_setcc_sub[(s->cc_op - CC_OP_ADDB) % 4][jcc_op];
- break;
- case JCC_S:
- func = gen_setcc_sub[(s->cc_op - CC_OP_ADDB) % 4][jcc_op];
- break;
- default:
- goto slow_jcc;
- }
- break;
- default:
- slow_jcc:
+ if (is_fast_jcc_case(s, b)) {
+ /* nominal case: we use a jump */
+ tcg_gen_movi_tl(cpu_T[0], 0);
+ l1 = gen_new_label();
+ gen_jcc1(s, s->cc_op, b ^ 1, l1);
+ tcg_gen_movi_tl(cpu_T[0], 1);
+ gen_set_label(l1);
+ } else {
+ /* slow case: it is more efficient not to generate a jump,
+ although it is questionnable whether this optimization is
+ worth to */
+ inv = b & 1;
+ jcc_op = (b >> 1) & 7;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- func = gen_setcc_slow[jcc_op];
- break;
- }
- func();
- if (inv) {
- gen_op_xor_T0_1();
+ gen_setcc_slow_T0(jcc_op);
+ if (inv) {
+ tcg_gen_xori_tl(cpu_T[0], cpu_T[0], 1);
+ }
}
}
+static inline void gen_op_movl_T0_seg(int seg_reg)
+{
+ tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
+ offsetof(CPUX86State,segs[seg_reg].selector));
+}
+
+static inline void gen_op_movl_seg_T0_vm(int seg_reg)
+{
+ tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
+ tcg_gen_st32_tl(cpu_T[0], cpu_env,
+ offsetof(CPUX86State,segs[seg_reg].selector));
+ tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
+ tcg_gen_st_tl(cpu_T[0], cpu_env,
+ offsetof(CPUX86State,segs[seg_reg].base));
+}
+
/* move T0 to seg_reg and compute if the CPU state may change. Never
call this function with seg_reg == R_CS */
static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
s->is_jmp = 3;
} else {
- gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[seg_reg]));
+ gen_op_movl_seg_T0_vm(seg_reg);
if (seg_reg == R_SS)
s->is_jmp = 3;
}
{
#ifdef TARGET_X86_64
if (CODE64(s)) {
- gen_op_addq_ESP_im(addend);
+ gen_op_add_reg_im(2, R_ESP, addend);
} else
#endif
if (s->ss32) {
- gen_op_addl_ESP_im(addend);
+ gen_op_add_reg_im(1, R_ESP, addend);
} else {
- gen_op_addw_ESP_im(addend);
+ gen_op_add_reg_im(0, R_ESP, addend);
}
}
if (mod != 3) {
gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
gen_op_ld_T0_A0(OT_LONG + s->mem_index);
- gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
+ tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
gen_op_movl_T0_0();
- gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
- gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
- gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
+ tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
+ tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
+ tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
gen_op_movl_T0_0();
- gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
- gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
+ tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
+ tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
case 0x211: /* movss ea, xmm */
if (mod != 3) {
gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
- gen_op_movl_T0_env(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
+ tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
gen_op_st_T0_A0(OT_LONG + s->mem_index);
} else {
rm = (modrm & 7) | REX_B(s);
val = ldub_code(s->pc++);
if (is_xmm) {
gen_op_movl_T0_im(val);
- gen_op_movl_env_T0(offsetof(CPUX86State,xmm_t0.XMM_L(0)));
+ tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
gen_op_movl_T0_0();
- gen_op_movl_env_T0(offsetof(CPUX86State,xmm_t0.XMM_L(1)));
+ tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
op1_offset = offsetof(CPUX86State,xmm_t0);
} else {
gen_op_movl_T0_im(val);
- gen_op_movl_env_T0(offsetof(CPUX86State,mmx_t0.MMX_L(0)));
+ tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
gen_op_movl_T0_0();
- gen_op_movl_env_T0(offsetof(CPUX86State,mmx_t0.MMX_L(1)));
+ tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
op1_offset = offsetof(CPUX86State,mmx_t0);
}
sse_op2 = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1];
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
} else {
gen_op_ld_T0_A0(OT_LONG + s->mem_index);
- gen_op_movl_env_T0(offsetof(CPUX86State,xmm_t0.XMM_L(0)));
+ tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
}
op2_offset = offsetof(CPUX86State,xmm_t0);
} else {
if (b1 == 2) {
/* 32 bit access */
gen_op_ld_T0_A0(OT_LONG + s->mem_index);
- gen_op_movl_env_T0(offsetof(CPUX86State,xmm_t0.XMM_L(0)));
+ tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
} else {
/* 64 bit access */
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
break;
}
if (b == 0x2e || b == 0x2f) {
- /* just to keep the EFLAGS optimization correct */
- gen_op_com_dummy();
s->cc_op = CC_OP_EFLAGS;
}
}
target_ulong next_eip, tval;
int rex_w, rex_r;
+ if (unlikely(loglevel & CPU_LOG_TB_OP))
+ tcg_gen_debug_insn_start(pc_start);
s->pc = pc_start;
prefixes = 0;
aflag = s->code32;
case 4: /* mul */
switch(ot) {
case OT_BYTE:
- gen_op_mulb_AL_T0();
+ gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
+ tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ gen_op_mov_reg_T0(OT_WORD, R_EAX);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
s->cc_op = CC_OP_MULB;
break;
case OT_WORD:
- gen_op_mulw_AX_T0();
+ gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
+ tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ gen_op_mov_reg_T0(OT_WORD, R_EAX);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
+ gen_op_mov_reg_T0(OT_WORD, R_EDX);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
s->cc_op = CC_OP_MULW;
break;
default:
case OT_LONG:
- gen_op_mull_EAX_T0();
+#ifdef TARGET_X86_64
+ gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
+ tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
+ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ gen_op_mov_reg_T0(OT_LONG, R_EAX);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
+ gen_op_mov_reg_T0(OT_LONG, R_EDX);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
+#else
+ {
+ TCGv t0, t1;
+ t0 = tcg_temp_new(TCG_TYPE_I64);
+ t1 = tcg_temp_new(TCG_TYPE_I64);
+ gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
+ tcg_gen_extu_i32_i64(t0, cpu_T[0]);
+ tcg_gen_extu_i32_i64(t1, cpu_T[1]);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_trunc_i64_i32(cpu_T[0], t0);
+ gen_op_mov_reg_T0(OT_LONG, R_EAX);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_trunc_i64_i32(cpu_T[0], t0);
+ gen_op_mov_reg_T0(OT_LONG, R_EDX);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
+ }
+#endif
s->cc_op = CC_OP_MULL;
break;
#ifdef TARGET_X86_64
case OT_QUAD:
- gen_op_mulq_EAX_T0();
+ tcg_gen_helper_0_1(helper_mulq_EAX_T0, cpu_T[0]);
s->cc_op = CC_OP_MULQ;
break;
#endif
case 5: /* imul */
switch(ot) {
case OT_BYTE:
- gen_op_imulb_AL_T0();
+ gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
+ tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ gen_op_mov_reg_T0(OT_WORD, R_EAX);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
s->cc_op = CC_OP_MULB;
break;
case OT_WORD:
- gen_op_imulw_AX_T0();
+ gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
+ tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ gen_op_mov_reg_T0(OT_WORD, R_EAX);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
+ tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
+ gen_op_mov_reg_T0(OT_WORD, R_EDX);
s->cc_op = CC_OP_MULW;
break;
default:
case OT_LONG:
- gen_op_imull_EAX_T0();
+#ifdef TARGET_X86_64
+ gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
+ tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
+ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ gen_op_mov_reg_T0(OT_LONG, R_EAX);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
+ tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
+ gen_op_mov_reg_T0(OT_LONG, R_EDX);
+#else
+ {
+ TCGv t0, t1;
+ t0 = tcg_temp_new(TCG_TYPE_I64);
+ t1 = tcg_temp_new(TCG_TYPE_I64);
+ gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
+ tcg_gen_ext_i32_i64(t0, cpu_T[0]);
+ tcg_gen_ext_i32_i64(t1, cpu_T[1]);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_trunc_i64_i32(cpu_T[0], t0);
+ gen_op_mov_reg_T0(OT_LONG, R_EAX);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_trunc_i64_i32(cpu_T[0], t0);
+ gen_op_mov_reg_T0(OT_LONG, R_EDX);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
+ }
+#endif
s->cc_op = CC_OP_MULL;
break;
#ifdef TARGET_X86_64
case OT_QUAD:
- gen_op_imulq_EAX_T0();
+ tcg_gen_helper_0_1(helper_imulq_EAX_T0, cpu_T[0]);
s->cc_op = CC_OP_MULQ;
break;
#endif
cpu_T[1],
tcg_const_i32(s->pc - pc_start));
} else {
- gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[R_CS]));
+ gen_op_movl_seg_T0_vm(R_CS);
gen_op_movl_T0_T1();
gen_op_jmp_T0();
}
#ifdef TARGET_X86_64
if (ot == OT_QUAD) {
- gen_op_imulq_T0_T1();
+ tcg_gen_helper_1_2(helper_imulq_T0_T1, cpu_T[0], cpu_T[0], cpu_T[1]);
} else
#endif
if (ot == OT_LONG) {
- gen_op_imull_T0_T1();
+#ifdef TARGET_X86_64
+ tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
+ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
+#else
+ {
+ TCGv t0, t1;
+ t0 = tcg_temp_new(TCG_TYPE_I64);
+ t1 = tcg_temp_new(TCG_TYPE_I64);
+ tcg_gen_ext_i32_i64(t0, cpu_T[0]);
+ tcg_gen_ext_i32_i64(t1, cpu_T[1]);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_trunc_i64_i32(cpu_T[0], t0);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_trunc_i64_i32(cpu_T[1], t0);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
+ }
+#endif
} else {
- gen_op_imulw_T0_T1();
+ tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
+ tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
}
gen_op_mov_reg_T0(ot, reg);
s->cc_op = CC_OP_MULB + ot;
case 0x1b0:
case 0x1b1: /* cmpxchg Ev, Gv */
{
- int label1;
+ int label1, label2;
if ((b & 1) == 0)
ot = OT_BYTE;
tcg_gen_ld_tl(cpu_T3, cpu_env, offsetof(CPUState, regs[R_EAX]));
tcg_gen_sub_tl(cpu_T3, cpu_T3, cpu_T[0]);
gen_extu(ot, cpu_T3);
- tcg_gen_brcond_tl(TCG_COND_EQ, cpu_T3, tcg_const_tl(0), label1);
- tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
- gen_op_mov_reg_T0(ot, R_EAX);
- gen_set_label(label1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T3, 0, label1);
if (mod == 3) {
+ label2 = gen_new_label();
+ gen_op_mov_reg_T0(ot, R_EAX);
+ tcg_gen_br(label2);
+ gen_set_label(label1);
gen_op_mov_reg_T1(ot, rm);
+ gen_set_label(label2);
} else {
+ tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
+ gen_op_mov_reg_T0(ot, R_EAX);
+ gen_set_label(label1);
+ /* always store */
gen_op_st_T1_A0(ot + s->mem_index);
}
tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
mod = (modrm >> 6) & 3;
if ((mod == 3) || ((modrm & 0x38) != 0x8))
goto illegal_op;
- gen_jmp_im(pc_start - s->cs_base);
- if (s->cc_op != CC_OP_DYNAMIC)
- gen_op_set_cc_op(s->cc_op);
- gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
- gen_op_cmpxchg8b();
+#ifdef TARGET_X86_64
+ if (dflag == 2) {
+ if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
+ goto illegal_op;
+ gen_jmp_im(pc_start - s->cs_base);
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ tcg_gen_helper_0_1(helper_cmpxchg16b, cpu_A0);
+ } else
+#endif
+ {
+ if (!(s->cpuid_features & CPUID_CX8))
+ goto illegal_op;
+ gen_jmp_im(pc_start - s->cs_base);
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ tcg_gen_helper_0_1(helper_cmpxchg8b, cpu_A0);
+ }
s->cc_op = CC_OP_EFLAGS;
break;
gen_op_set_cc_op(s->cc_op);
tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
tcg_gen_helper_0_0(helper_fucomi_ST0_FT0);
- gen_op_fcomi_dummy();
s->cc_op = CC_OP_EFLAGS;
break;
case 0x1e: /* fcomi */
gen_op_set_cc_op(s->cc_op);
tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
tcg_gen_helper_0_0(helper_fcomi_ST0_FT0);
- gen_op_fcomi_dummy();
s->cc_op = CC_OP_EFLAGS;
break;
case 0x28: /* ffree sti */
tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
tcg_gen_helper_0_0(helper_fucomi_ST0_FT0);
tcg_gen_helper_0_0(helper_fpop);
- gen_op_fcomi_dummy();
s->cc_op = CC_OP_EFLAGS;
break;
case 0x3e: /* fcomip */
tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg));
tcg_gen_helper_0_0(helper_fcomi_ST0_FT0);
tcg_gen_helper_0_0(helper_fpop);
- gen_op_fcomi_dummy();
s->cc_op = CC_OP_EFLAGS;
break;
case 0x10 ... 0x13: /* fcmovxx */
op1 = fcmov_cc[op & 3] | ((op >> 3) & 1);
gen_setcc(s, op1);
l1 = gen_new_label();
- tcg_gen_brcond_tl(TCG_COND_EQ, cpu_T[0], tcg_const_tl(0), l1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[0], 0, l1);
tcg_gen_helper_0_1(helper_fmov_ST0_STN, tcg_const_i32(opreg));
gen_set_label(l1);
}
/* pop selector */
gen_op_addl_A0_im(2 << s->dflag);
gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
- gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[R_CS]));
+ gen_op_movl_seg_T0_vm(R_CS);
/* add stack offset */
gen_stack_update(s, val + (4 << s->dflag));
}
gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1);
break;
case 0x140 ... 0x14f: /* cmov Gv, Ev */
- ot = dflag + OT_WORD;
- modrm = ldub_code(s->pc++);
- reg = ((modrm >> 3) & 7) | rex_r;
- mod = (modrm >> 6) & 3;
- gen_setcc(s, b);
- if (mod != 3) {
- gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
- gen_op_ld_T1_A0(ot + s->mem_index);
- } else {
- rm = (modrm & 7) | REX_B(s);
- gen_op_mov_TN_reg(ot, 1, rm);
+ {
+ int l1;
+ ot = dflag + OT_WORD;
+ modrm = ldub_code(s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+ mod = (modrm >> 6) & 3;
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ gen_op_ld_T1_A0(ot + s->mem_index);
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_mov_TN_reg(ot, 1, rm);
+ }
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+#ifdef TARGET_X86_64
+ if (ot == OT_LONG) {
+ /* XXX: specific Intel behaviour ? */
+ l1 = gen_new_label();
+ gen_jcc1(s, s->cc_op, b ^ 1, l1);
+ tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_tmp0, 0);
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
+ } else
+#endif
+ {
+ l1 = gen_new_label();
+ gen_jcc1(s, s->cc_op, b ^ 1, l1);
+ gen_op_mov_reg_T1(ot, reg);
+ gen_set_label(l1);
+ }
}
- gen_op_cmov_reg_T1_T0[ot - OT_WORD][reg]();
break;
/************************/
} else {
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- gen_op_movl_T0_eflags();
+ tcg_gen_helper_1_0(helper_read_eflags, cpu_T[0]);
gen_push_T0(s);
}
break;
gen_pop_T0(s);
if (s->cpl == 0) {
if (s->dflag) {
- gen_op_movl_eflags_T0_cpl0();
+ tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0],
+ tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK)));
} else {
- gen_op_movw_eflags_T0_cpl0();
+ tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0],
+ tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff));
}
} else {
if (s->cpl <= s->iopl) {
if (s->dflag) {
- gen_op_movl_eflags_T0_io();
+ tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0],
+ tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK)));
} else {
- gen_op_movw_eflags_T0_io();
+ tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0],
+ tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff));
}
} else {
if (s->dflag) {
- gen_op_movl_eflags_T0();
+ tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0],
+ tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK)));
} else {
- gen_op_movw_eflags_T0();
+ tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0],
+ tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff));
}
}
}
}
break;
case 0x9e: /* sahf */
- if (CODE64(s))
+ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
goto illegal_op;
gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- gen_op_movb_eflags_T0();
+ gen_compute_eflags(cpu_cc_src);
+ tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
+ tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
+ tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
s->cc_op = CC_OP_EFLAGS;
break;
case 0x9f: /* lahf */
- if (CODE64(s))
+ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- gen_op_movl_T0_eflags();
+ gen_compute_eflags(cpu_T[0]);
+ /* Note: gen_compute_eflags() only gives the condition codes */
+ tcg_gen_ori_tl(cpu_T[0], cpu_T[0], 0x02);
gen_op_mov_reg_T0(OT_BYTE, R_AH);
break;
case 0xf5: /* cmc */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- gen_op_cmc();
+ gen_compute_eflags(cpu_cc_src);
+ tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
s->cc_op = CC_OP_EFLAGS;
break;
case 0xf8: /* clc */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- gen_op_clc();
+ gen_compute_eflags(cpu_cc_src);
+ tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
s->cc_op = CC_OP_EFLAGS;
break;
case 0xf9: /* stc */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- gen_op_stc();
+ gen_compute_eflags(cpu_cc_src);
+ tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
s->cc_op = CC_OP_EFLAGS;
break;
case 0xfc: /* cld */
gen_extu(ot, cpu_T[0]);
label1 = gen_new_label();
tcg_gen_movi_tl(cpu_cc_dst, 0);
- tcg_gen_brcond_tl(TCG_COND_EQ, cpu_T[0], tcg_const_tl(0), label1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[0], 0, label1);
if (b & 1) {
tcg_gen_helper_1_1(helper_bsr, cpu_T[0], cpu_T[0]);
} else {
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- gen_op_daa();
+ tcg_gen_helper_0_0(helper_daa);
s->cc_op = CC_OP_EFLAGS;
break;
case 0x2f: /* das */
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- gen_op_das();
+ tcg_gen_helper_0_0(helper_das);
s->cc_op = CC_OP_EFLAGS;
break;
case 0x37: /* aaa */
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- gen_op_aaa();
+ tcg_gen_helper_0_0(helper_aaa);
s->cc_op = CC_OP_EFLAGS;
break;
case 0x3f: /* aas */
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- gen_op_aas();
+ tcg_gen_helper_0_0(helper_aas);
s->cc_op = CC_OP_EFLAGS;
break;
case 0xd4: /* aam */
if (val == 0) {
gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
} else {
- gen_op_aam(val);
+ tcg_gen_helper_0_1(helper_aam, tcg_const_i32(val));
s->cc_op = CC_OP_LOGICB;
}
break;
if (CODE64(s))
goto illegal_op;
val = ldub_code(s->pc++);
- gen_op_aad(val);
+ tcg_gen_helper_0_1(helper_aad, tcg_const_i32(val));
s->cc_op = CC_OP_LOGICB;
break;
/************************/
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(pc_start - s->cs_base);
- gen_op_into(s->pc - pc_start);
+ tcg_gen_helper_0_1(helper_into, tcg_const_i32(s->pc - pc_start));
break;
case 0xf1: /* icebp (undocumented, exits to external debugger) */
if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP))
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- gen_op_salc();
+ gen_compute_eflags_c(cpu_T[0]);
+ tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
+ gen_op_mov_reg_T0(OT_BYTE, R_EAX);
break;
case 0xe0: /* loopnz */
case 0xe1: /* loopz */
- if (s->cc_op != CC_OP_DYNAMIC)
- gen_op_set_cc_op(s->cc_op);
- /* FALL THRU */
case 0xe2: /* loop */
case 0xe3: /* jecxz */
{
- int l1, l2;
+ int l1, l2, l3;
tval = (int8_t)insn_get(s, OT_BYTE);
next_eip = s->pc - s->cs_base;
l1 = gen_new_label();
l2 = gen_new_label();
+ l3 = gen_new_label();
b &= 3;
- if (b == 3) {
- gen_op_jz_ecx[s->aflag](l1);
- } else {
- gen_op_dec_ECX[s->aflag]();
- if (b <= 1)
- gen_op_mov_T0_cc();
- gen_op_loop[s->aflag][b](l1);
+ switch(b) {
+ case 0: /* loopnz */
+ case 1: /* loopz */
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_add_reg_im(s->aflag, R_ECX, -1);
+ gen_op_jz_ecx(s->aflag, l3);
+ gen_compute_eflags(cpu_tmp0);
+ tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_Z);
+ if (b == 0) {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, l1);
+ } else {
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, l1);
+ }
+ break;
+ case 2: /* loop */
+ gen_op_add_reg_im(s->aflag, R_ECX, -1);
+ gen_op_jnz_ecx(s->aflag, l1);
+ break;
+ default:
+ case 3: /* jcxz */
+ gen_op_jz_ecx(s->aflag, l1);
+ break;
}
+ gen_set_label(l3);
gen_jmp_im(next_eip);
- gen_op_jmp_label(l2);
+ tcg_gen_br(l2);
+
gen_set_label(l1);
gen_jmp_im(tval);
gen_set_label(l2);
goto illegal_op;
if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ))
break;
- gen_op_movl_T0_env(offsetof(CPUX86State,ldt.selector));
+ tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
ot = OT_WORD;
if (mod == 3)
ot += s->dflag;
goto illegal_op;
if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ))
break;
- gen_op_movl_T0_env(offsetof(CPUX86State,tr.selector));
+ tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
ot = OT_WORD;
if (mod == 3)
ot += s->dflag;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
if (op == 4)
- gen_op_verr();
+ tcg_gen_helper_0_1(helper_verr, cpu_T[0]);
else
- gen_op_verw();
+ tcg_gen_helper_0_1(helper_verw, cpu_T[0]);
s->cc_op = CC_OP_EFLAGS;
break;
default:
if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ))
break;
gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
- gen_op_movl_T0_env(offsetof(CPUX86State, gdt.limit));
+ tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
gen_op_st_T0_A0(OT_WORD + s->mem_index);
gen_add_A0_im(s, 2);
- gen_op_movtl_T0_env(offsetof(CPUX86State, gdt.base));
+ tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
if (!s->dflag)
gen_op_andl_T0_im(0xffffff);
gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ))
break;
gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
- gen_op_movl_T0_env(offsetof(CPUX86State, idt.limit));
+ tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
gen_op_st_T0_A0(OT_WORD + s->mem_index);
gen_add_A0_im(s, 2);
- gen_op_movtl_T0_env(offsetof(CPUX86State, idt.base));
+ tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
if (!s->dflag)
gen_op_andl_T0_im(0xffffff);
gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
if (!s->dflag)
gen_op_andl_T0_im(0xffffff);
if (op == 2) {
- gen_op_movtl_env_T0(offsetof(CPUX86State,gdt.base));
- gen_op_movl_env_T1(offsetof(CPUX86State,gdt.limit));
+ tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
+ tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
} else {
- gen_op_movtl_env_T0(offsetof(CPUX86State,idt.base));
- gen_op_movl_env_T1(offsetof(CPUX86State,idt.limit));
+ tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
+ tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
}
}
break;
case 4: /* smsw */
if (gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0))
break;
- gen_op_movl_T0_env(offsetof(CPUX86State,cr[0]));
+ tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1);
break;
case 6: /* lmsw */
#ifdef TARGET_X86_64
if (CODE64(s) && rm == 0) {
/* swapgs */
- gen_op_movtl_T0_env(offsetof(CPUX86State,segs[R_GS].base));
- gen_op_movtl_T1_env(offsetof(CPUX86State,kernelgsbase));
- gen_op_movtl_env_T1(offsetof(CPUX86State,segs[R_GS].base));
- gen_op_movtl_env_T0(offsetof(CPUX86State,kernelgsbase));
+ tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,segs[R_GS].base));
+ tcg_gen_ld_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,kernelgsbase));
+ tcg_gen_st_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,segs[R_GS].base));
+ tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,kernelgsbase));
} else
#endif
{
} else
#endif
{
+ int label1;
if (!s->pe || s->vm86)
goto illegal_op;
- ot = dflag ? OT_LONG : OT_WORD;
+ ot = OT_WORD;
modrm = ldub_code(s->pc++);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
gen_op_mov_TN_reg(ot, 0, rm);
}
gen_op_mov_TN_reg(ot, 1, reg);
- if (s->cc_op != CC_OP_DYNAMIC)
- gen_op_set_cc_op(s->cc_op);
- gen_op_arpl();
- s->cc_op = CC_OP_EFLAGS;
+ tcg_gen_andi_tl(cpu_tmp0, cpu_T[0], 3);
+ tcg_gen_andi_tl(cpu_T[1], cpu_T[1], 3);
+ tcg_gen_movi_tl(cpu_T3, 0);
+ label1 = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, cpu_T[1], label1);
+ tcg_gen_andi_tl(cpu_T[0], cpu_T[0], ~3);
+ tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_movi_tl(cpu_T3, CC_Z);
+ gen_set_label(label1);
if (mod != 3) {
gen_op_st_T0_A0(ot + s->mem_index);
} else {
gen_op_mov_reg_T0(ot, rm);
}
- gen_op_arpl_update();
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_compute_eflags(cpu_cc_src);
+ tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
+ tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T3);
+ s->cc_op = CC_OP_EFLAGS;
}
break;
case 0x102: /* lar */
case 0x103: /* lsl */
- if (!s->pe || s->vm86)
- goto illegal_op;
- ot = dflag ? OT_LONG : OT_WORD;
- modrm = ldub_code(s->pc++);
- reg = ((modrm >> 3) & 7) | rex_r;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
- gen_op_mov_TN_reg(ot, 1, reg);
- if (s->cc_op != CC_OP_DYNAMIC)
- gen_op_set_cc_op(s->cc_op);
- if (b == 0x102)
- gen_op_lar();
- else
- gen_op_lsl();
- s->cc_op = CC_OP_EFLAGS;
- gen_op_mov_reg_T1(ot, reg);
+ {
+ int label1;
+ if (!s->pe || s->vm86)
+ goto illegal_op;
+ ot = dflag ? OT_LONG : OT_WORD;
+ modrm = ldub_code(s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+ gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ if (b == 0x102)
+ tcg_gen_helper_1_1(helper_lar, cpu_T[0], cpu_T[0]);
+ else
+ tcg_gen_helper_1_1(helper_lsl, cpu_T[0], cpu_T[0]);
+ tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
+ label1 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
+ gen_op_mov_reg_T0(ot, reg);
+ gen_set_label(label1);
+ s->cc_op = CC_OP_EFLAGS;
+ }
break;
case 0x118:
modrm = ldub_code(s->pc++);
tcg_gen_helper_1_0(helper_movtl_T0_cr8, cpu_T[0]);
else
#endif
- gen_op_movtl_T0_env(offsetof(CPUX86State,cr[reg]));
+ tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[reg]));
gen_op_mov_reg_T0(ot, rm);
}
break;
gen_eob(s);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
- gen_op_movtl_T0_env(offsetof(CPUX86State,dr[reg]));
+ tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
gen_op_mov_reg_T0(ot, rm);
}
}
gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
if (op == 2) {
gen_op_ld_T0_A0(OT_LONG + s->mem_index);
- gen_op_movl_env_T0(offsetof(CPUX86State, mxcsr));
+ tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
} else {
- gen_op_movl_T0_env(offsetof(CPUX86State, mxcsr));
+ tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
gen_op_st_T0_A0(OT_LONG + s->mem_index);
}
break;
TCG_AREG0, offsetof(CPUState, cc_src), "cc_src");
cpu_cc_dst = tcg_global_mem_new(TCG_TYPE_TL,
TCG_AREG0, offsetof(CPUState, cc_dst), "cc_dst");
+
+ /* register helpers */
+
+#define DEF_HELPER(ret, name, params) tcg_register_helper(name, #name);
+#include "helper.h"
}
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
dc->cpuid_features = env->cpuid_features;
dc->cpuid_ext_features = env->cpuid_ext_features;
dc->cpuid_ext2_features = env->cpuid_ext2_features;
+ dc->cpuid_ext3_features = env->cpuid_ext3_features;
#ifdef TARGET_X86_64
dc->lma = (flags >> HF_LMA_SHIFT) & 1;
dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
disas_flags = !dc->code32;
target_disas(logfile, pc_start, pc_ptr - pc_start, disas_flags);
fprintf(logfile, "\n");
- if (loglevel & CPU_LOG_TB_OP_OPT) {
- fprintf(logfile, "OP before opt:\n");
- tcg_dump_ops(&tcg_ctx, logfile);
- fprintf(logfile, "\n");
- }
}
#endif