4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/translator.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40 #define PREFIX_VEX 0x20
43 #define CODE64(s) ((s)->code64)
44 #define REX_X(s) ((s)->rex_x)
45 #define REX_B(s) ((s)->rex_b)
60 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
61 #define CASE_MODRM_MEM_OP(OP) \
62 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
63 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
64 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
66 #define CASE_MODRM_OP(OP) \
67 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
68 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
69 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
70 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
72 //#define MACRO_TEST 1
74 /* global register indexes */
75 static TCGv_env cpu_env;
77 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
78 static TCGv_i32 cpu_cc_op;
79 static TCGv cpu_regs[CPU_NB_REGS];
80 static TCGv cpu_seg_base[6];
81 static TCGv_i64 cpu_bndl[4];
82 static TCGv_i64 cpu_bndu[4];
84 static TCGv cpu_T0, cpu_T1;
85 /* local register indexes (only used inside old micro ops) */
86 static TCGv cpu_tmp0, cpu_tmp4;
87 static TCGv_ptr cpu_ptr0, cpu_ptr1;
88 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
89 static TCGv_i64 cpu_tmp1_i64;
91 #include "exec/gen-icount.h"
94 static int x86_64_hregs;
97 typedef struct DisasContext {
98 DisasContextBase base;
100 /* current insn context */
101 int override; /* -1 if no override */
105 target_ulong pc_start;
106 target_ulong pc; /* pc = eip + cs_base */
107 /* current block context */
108 target_ulong cs_base; /* base of CS segment */
109 int pe; /* protected mode */
110 int code32; /* 32 bit code segment */
112 int lma; /* long mode active */
113 int code64; /* 64 bit code segment */
116 int vex_l; /* vex vector length */
117 int vex_v; /* vex vvvv register, without 1's compliment. */
118 int ss32; /* 32 bit stack segment */
119 CCOp cc_op; /* current CC operation */
121 int addseg; /* non zero if either DS/ES/SS have a non zero base */
122 int f_st; /* currently unused */
123 int vm86; /* vm86 mode */
126 int tf; /* TF cpu flag */
127 int jmp_opt; /* use direct block chaining for direct jumps */
128 int repz_opt; /* optimize jumps within repz instructions */
129 int mem_index; /* select memory access functions */
130 uint64_t flags; /* all execution flags */
131 int popl_esp_hack; /* for correct popl with esp base handling */
132 int rip_offset; /* only used in x86_64, but left for simplicity */
134 int cpuid_ext_features;
135 int cpuid_ext2_features;
136 int cpuid_ext3_features;
137 int cpuid_7_0_ebx_features;
138 int cpuid_xsave_features;
141 static void gen_eob(DisasContext *s);
142 static void gen_jr(DisasContext *s, TCGv dest);
143 static void gen_jmp(DisasContext *s, target_ulong eip);
144 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
145 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d);
147 /* i386 arith/logic operations */
167 OP_SHL1, /* undocumented */
183 /* I386 int registers */
184 OR_EAX, /* MUST be even numbered */
193 OR_TMP0 = 16, /* temporary operand register */
195 OR_A0, /* temporary register used when doing address evaluation */
205 /* Bit set if the global variable is live after setting CC_OP to X. */
206 static const uint8_t cc_op_live[CC_OP_NB] = {
207 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
208 [CC_OP_EFLAGS] = USES_CC_SRC,
209 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
210 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
211 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
212 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
213 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
214 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
215 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
216 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
217 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
218 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
219 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
220 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
221 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
222 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
224 [CC_OP_POPCNT] = USES_CC_SRC,
227 static void set_cc_op(DisasContext *s, CCOp op)
231 if (s->cc_op == op) {
235 /* Discard CC computation that will no longer be used. */
236 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
237 if (dead & USES_CC_DST) {
238 tcg_gen_discard_tl(cpu_cc_dst);
240 if (dead & USES_CC_SRC) {
241 tcg_gen_discard_tl(cpu_cc_src);
243 if (dead & USES_CC_SRC2) {
244 tcg_gen_discard_tl(cpu_cc_src2);
246 if (dead & USES_CC_SRCT) {
247 tcg_gen_discard_tl(cpu_cc_srcT);
250 if (op == CC_OP_DYNAMIC) {
251 /* The DYNAMIC setting is translator only, and should never be
252 stored. Thus we always consider it clean. */
253 s->cc_op_dirty = false;
255 /* Discard any computed CC_OP value (see shifts). */
256 if (s->cc_op == CC_OP_DYNAMIC) {
257 tcg_gen_discard_i32(cpu_cc_op);
259 s->cc_op_dirty = true;
264 static void gen_update_cc_op(DisasContext *s)
266 if (s->cc_op_dirty) {
267 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
268 s->cc_op_dirty = false;
274 #define NB_OP_SIZES 4
276 #else /* !TARGET_X86_64 */
278 #define NB_OP_SIZES 3
280 #endif /* !TARGET_X86_64 */
282 #if defined(HOST_WORDS_BIGENDIAN)
283 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
284 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
285 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
286 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
287 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
289 #define REG_B_OFFSET 0
290 #define REG_H_OFFSET 1
291 #define REG_W_OFFSET 0
292 #define REG_L_OFFSET 0
293 #define REG_LH_OFFSET 4
296 /* In instruction encodings for byte register accesses the
297 * register number usually indicates "low 8 bits of register N";
298 * however there are some special cases where N 4..7 indicates
299 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
300 * true for this special case, false otherwise.
302 static inline bool byte_reg_is_xH(int reg)
308 if (reg >= 8 || x86_64_hregs) {
315 /* Select the size of a push/pop operation. */
316 static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
319 return ot == MO_16 ? MO_16 : MO_64;
325 /* Select the size of the stack pointer. */
326 static inline TCGMemOp mo_stacksize(DisasContext *s)
328 return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
331 /* Select only size 64 else 32. Used for SSE operand sizes. */
332 static inline TCGMemOp mo_64_32(TCGMemOp ot)
335 return ot == MO_64 ? MO_64 : MO_32;
341 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
342 byte vs word opcodes. */
343 static inline TCGMemOp mo_b_d(int b, TCGMemOp ot)
345 return b & 1 ? ot : MO_8;
348 /* Select size 8 if lsb of B is clear, else OT capped at 32.
349 Used for decoding operand size of port opcodes. */
350 static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot)
352 return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
355 static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGv t0)
359 if (!byte_reg_is_xH(reg)) {
360 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
362 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
366 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
369 /* For x86_64, this sets the higher half of register to zero.
370 For i386, this is equivalent to a mov. */
371 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
375 tcg_gen_mov_tl(cpu_regs[reg], t0);
383 static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg)
385 if (ot == MO_8 && byte_reg_is_xH(reg)) {
386 tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
388 tcg_gen_mov_tl(t0, cpu_regs[reg]);
392 static void gen_add_A0_im(DisasContext *s, int val)
394 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
396 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
400 static inline void gen_op_jmp_v(TCGv dest)
402 tcg_gen_st_tl(dest, cpu_env, offsetof(CPUX86State, eip));
405 static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val)
407 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
408 gen_op_mov_reg_v(size, reg, cpu_tmp0);
411 static inline void gen_op_add_reg_T0(TCGMemOp size, int reg)
413 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T0);
414 gen_op_mov_reg_v(size, reg, cpu_tmp0);
417 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
419 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
422 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
424 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
427 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
430 gen_op_st_v(s, idx, cpu_T0, cpu_A0);
432 gen_op_mov_reg_v(idx, d, cpu_T0);
436 static inline void gen_jmp_im(target_ulong pc)
438 tcg_gen_movi_tl(cpu_tmp0, pc);
439 gen_op_jmp_v(cpu_tmp0);
442 /* Compute SEG:REG into A0. SEG is selected from the override segment
443 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
444 indicate no override. */
445 static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0,
446 int def_seg, int ovr_seg)
452 tcg_gen_mov_tl(cpu_A0, a0);
459 if (ovr_seg < 0 && s->addseg) {
463 tcg_gen_ext32u_tl(cpu_A0, a0);
469 tcg_gen_ext16u_tl(cpu_A0, a0);
484 TCGv seg = cpu_seg_base[ovr_seg];
486 if (aflag == MO_64) {
487 tcg_gen_add_tl(cpu_A0, a0, seg);
488 } else if (CODE64(s)) {
489 tcg_gen_ext32u_tl(cpu_A0, a0);
490 tcg_gen_add_tl(cpu_A0, cpu_A0, seg);
492 tcg_gen_add_tl(cpu_A0, a0, seg);
493 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
498 static inline void gen_string_movl_A0_ESI(DisasContext *s)
500 gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
503 static inline void gen_string_movl_A0_EDI(DisasContext *s)
505 gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
508 static inline void gen_op_movl_T0_Dshift(TCGMemOp ot)
510 tcg_gen_ld32s_tl(cpu_T0, cpu_env, offsetof(CPUX86State, df));
511 tcg_gen_shli_tl(cpu_T0, cpu_T0, ot);
514 static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
519 tcg_gen_ext8s_tl(dst, src);
521 tcg_gen_ext8u_tl(dst, src);
526 tcg_gen_ext16s_tl(dst, src);
528 tcg_gen_ext16u_tl(dst, src);
534 tcg_gen_ext32s_tl(dst, src);
536 tcg_gen_ext32u_tl(dst, src);
545 static void gen_extu(TCGMemOp ot, TCGv reg)
547 gen_ext_tl(reg, reg, ot, false);
550 static void gen_exts(TCGMemOp ot, TCGv reg)
552 gen_ext_tl(reg, reg, ot, true);
555 static inline void gen_op_jnz_ecx(TCGMemOp size, TCGLabel *label1)
557 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
558 gen_extu(size, cpu_tmp0);
559 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
562 static inline void gen_op_jz_ecx(TCGMemOp size, TCGLabel *label1)
564 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
565 gen_extu(size, cpu_tmp0);
566 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
569 static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n)
573 gen_helper_inb(v, cpu_env, n);
576 gen_helper_inw(v, cpu_env, n);
579 gen_helper_inl(v, cpu_env, n);
586 static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
590 gen_helper_outb(cpu_env, v, n);
593 gen_helper_outw(cpu_env, v, n);
596 gen_helper_outl(cpu_env, v, n);
603 static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
606 target_ulong next_eip;
608 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
609 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
612 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
615 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
618 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
624 if(s->flags & HF_SVMI_MASK) {
627 svm_flags |= (1 << (4 + ot));
628 next_eip = s->pc - s->cs_base;
629 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
630 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
631 tcg_const_i32(svm_flags),
632 tcg_const_i32(next_eip - cur_eip));
636 static inline void gen_movs(DisasContext *s, TCGMemOp ot)
638 gen_string_movl_A0_ESI(s);
639 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
640 gen_string_movl_A0_EDI(s);
641 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
642 gen_op_movl_T0_Dshift(ot);
643 gen_op_add_reg_T0(s->aflag, R_ESI);
644 gen_op_add_reg_T0(s->aflag, R_EDI);
647 static void gen_op_update1_cc(void)
649 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
652 static void gen_op_update2_cc(void)
654 tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
655 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
658 static void gen_op_update3_cc(TCGv reg)
660 tcg_gen_mov_tl(cpu_cc_src2, reg);
661 tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
662 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
665 static inline void gen_op_testl_T0_T1_cc(void)
667 tcg_gen_and_tl(cpu_cc_dst, cpu_T0, cpu_T1);
670 static void gen_op_update_neg_cc(void)
672 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
673 tcg_gen_neg_tl(cpu_cc_src, cpu_T0);
674 tcg_gen_movi_tl(cpu_cc_srcT, 0);
677 /* compute all eflags to cc_src */
678 static void gen_compute_eflags(DisasContext *s)
680 TCGv zero, dst, src1, src2;
683 if (s->cc_op == CC_OP_EFLAGS) {
686 if (s->cc_op == CC_OP_CLR) {
687 tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
688 set_cc_op(s, CC_OP_EFLAGS);
697 /* Take care to not read values that are not live. */
698 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
699 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
701 zero = tcg_const_tl(0);
702 if (dead & USES_CC_DST) {
705 if (dead & USES_CC_SRC) {
708 if (dead & USES_CC_SRC2) {
714 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
715 set_cc_op(s, CC_OP_EFLAGS);
722 typedef struct CCPrepare {
732 /* compute eflags.C to reg */
733 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
739 case CC_OP_SUBB ... CC_OP_SUBQ:
740 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
741 size = s->cc_op - CC_OP_SUBB;
742 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
743 /* If no temporary was used, be careful not to alias t1 and t0. */
744 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
745 tcg_gen_mov_tl(t0, cpu_cc_srcT);
749 case CC_OP_ADDB ... CC_OP_ADDQ:
750 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
751 size = s->cc_op - CC_OP_ADDB;
752 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
753 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
755 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
756 .reg2 = t1, .mask = -1, .use_reg2 = true };
758 case CC_OP_LOGICB ... CC_OP_LOGICQ:
761 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
763 case CC_OP_INCB ... CC_OP_INCQ:
764 case CC_OP_DECB ... CC_OP_DECQ:
765 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
766 .mask = -1, .no_setcond = true };
768 case CC_OP_SHLB ... CC_OP_SHLQ:
769 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
770 size = s->cc_op - CC_OP_SHLB;
771 shift = (8 << size) - 1;
772 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
773 .mask = (target_ulong)1 << shift };
775 case CC_OP_MULB ... CC_OP_MULQ:
776 return (CCPrepare) { .cond = TCG_COND_NE,
777 .reg = cpu_cc_src, .mask = -1 };
779 case CC_OP_BMILGB ... CC_OP_BMILGQ:
780 size = s->cc_op - CC_OP_BMILGB;
781 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
782 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
786 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
787 .mask = -1, .no_setcond = true };
790 case CC_OP_SARB ... CC_OP_SARQ:
792 return (CCPrepare) { .cond = TCG_COND_NE,
793 .reg = cpu_cc_src, .mask = CC_C };
796 /* The need to compute only C from CC_OP_DYNAMIC is important
797 in efficiently implementing e.g. INC at the start of a TB. */
799 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
800 cpu_cc_src2, cpu_cc_op);
801 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
802 .mask = -1, .no_setcond = true };
806 /* compute eflags.P to reg */
807 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
809 gen_compute_eflags(s);
810 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
814 /* compute eflags.S to reg */
815 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
819 gen_compute_eflags(s);
825 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
829 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
832 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
833 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
834 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
839 /* compute eflags.O to reg */
840 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
845 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
846 .mask = -1, .no_setcond = true };
849 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
851 gen_compute_eflags(s);
852 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
857 /* compute eflags.Z to reg */
858 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
862 gen_compute_eflags(s);
868 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
871 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
873 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src,
877 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
878 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
879 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
884 /* perform a conditional store into register 'reg' according to jump opcode
885 value 'b'. In the fast case, T0 is guaranted not to be used. */
886 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
888 int inv, jcc_op, cond;
894 jcc_op = (b >> 1) & 7;
897 case CC_OP_SUBB ... CC_OP_SUBQ:
898 /* We optimize relational operators for the cmp/jcc case. */
899 size = s->cc_op - CC_OP_SUBB;
902 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
903 gen_extu(size, cpu_tmp4);
904 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
905 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
906 .reg2 = t0, .mask = -1, .use_reg2 = true };
915 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
916 gen_exts(size, cpu_tmp4);
917 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
918 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
919 .reg2 = t0, .mask = -1, .use_reg2 = true };
929 /* This actually generates good code for JC, JZ and JS. */
932 cc = gen_prepare_eflags_o(s, reg);
935 cc = gen_prepare_eflags_c(s, reg);
938 cc = gen_prepare_eflags_z(s, reg);
941 gen_compute_eflags(s);
942 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
943 .mask = CC_Z | CC_C };
946 cc = gen_prepare_eflags_s(s, reg);
949 cc = gen_prepare_eflags_p(s, reg);
952 gen_compute_eflags(s);
953 if (TCGV_EQUAL(reg, cpu_cc_src)) {
956 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
957 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
958 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
963 gen_compute_eflags(s);
964 if (TCGV_EQUAL(reg, cpu_cc_src)) {
967 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
968 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
969 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
970 .mask = CC_S | CC_Z };
977 cc.cond = tcg_invert_cond(cc.cond);
982 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
984 CCPrepare cc = gen_prepare_cc(s, b, reg);
987 if (cc.cond == TCG_COND_EQ) {
988 tcg_gen_xori_tl(reg, cc.reg, 1);
990 tcg_gen_mov_tl(reg, cc.reg);
995 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
996 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
997 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
998 tcg_gen_andi_tl(reg, reg, 1);
1001 if (cc.mask != -1) {
1002 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1006 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1008 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1012 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1014 gen_setcc1(s, JCC_B << 1, reg);
1017 /* generate a conditional jump to label 'l1' according to jump opcode
1018 value 'b'. In the fast case, T0 is guaranted not to be used. */
1019 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1021 CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
1023 if (cc.mask != -1) {
1024 tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
1028 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1030 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1034 /* Generate a conditional jump to label 'l1' according to jump opcode
1035 value 'b'. In the fast case, T0 is guaranted not to be used.
1036 A translation block must end soon. */
1037 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1039 CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
1041 gen_update_cc_op(s);
1042 if (cc.mask != -1) {
1043 tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
1046 set_cc_op(s, CC_OP_DYNAMIC);
1048 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1050 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1054 /* XXX: does not work with gdbstub "ice" single step - not a
1056 static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1058 TCGLabel *l1 = gen_new_label();
1059 TCGLabel *l2 = gen_new_label();
1060 gen_op_jnz_ecx(s->aflag, l1);
1062 gen_jmp_tb(s, next_eip, 1);
1067 static inline void gen_stos(DisasContext *s, TCGMemOp ot)
1069 gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
1070 gen_string_movl_A0_EDI(s);
1071 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
1072 gen_op_movl_T0_Dshift(ot);
1073 gen_op_add_reg_T0(s->aflag, R_EDI);
1076 static inline void gen_lods(DisasContext *s, TCGMemOp ot)
1078 gen_string_movl_A0_ESI(s);
1079 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1080 gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
1081 gen_op_movl_T0_Dshift(ot);
1082 gen_op_add_reg_T0(s->aflag, R_ESI);
1085 static inline void gen_scas(DisasContext *s, TCGMemOp ot)
1087 gen_string_movl_A0_EDI(s);
1088 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
1089 gen_op(s, OP_CMPL, ot, R_EAX);
1090 gen_op_movl_T0_Dshift(ot);
1091 gen_op_add_reg_T0(s->aflag, R_EDI);
1094 static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
1096 gen_string_movl_A0_EDI(s);
1097 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
1098 gen_string_movl_A0_ESI(s);
1099 gen_op(s, OP_CMPL, ot, OR_TMP0);
1100 gen_op_movl_T0_Dshift(ot);
1101 gen_op_add_reg_T0(s->aflag, R_ESI);
1102 gen_op_add_reg_T0(s->aflag, R_EDI);
1105 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1107 if (s->flags & HF_IOBPT_MASK) {
1108 TCGv_i32 t_size = tcg_const_i32(1 << ot);
1109 TCGv t_next = tcg_const_tl(s->pc - s->cs_base);
1111 gen_helper_bpt_io(cpu_env, t_port, t_size, t_next);
1112 tcg_temp_free_i32(t_size);
1113 tcg_temp_free(t_next);
1118 static inline void gen_ins(DisasContext *s, TCGMemOp ot)
1120 if (s->base.tb->cflags & CF_USE_ICOUNT) {
1123 gen_string_movl_A0_EDI(s);
1124 /* Note: we must do this dummy write first to be restartable in
1125 case of page fault. */
1126 tcg_gen_movi_tl(cpu_T0, 0);
1127 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
1128 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1129 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1130 gen_helper_in_func(ot, cpu_T0, cpu_tmp2_i32);
1131 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
1132 gen_op_movl_T0_Dshift(ot);
1133 gen_op_add_reg_T0(s->aflag, R_EDI);
1134 gen_bpt_io(s, cpu_tmp2_i32, ot);
1135 if (s->base.tb->cflags & CF_USE_ICOUNT) {
1140 static inline void gen_outs(DisasContext *s, TCGMemOp ot)
1142 if (s->base.tb->cflags & CF_USE_ICOUNT) {
1145 gen_string_movl_A0_ESI(s);
1146 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1148 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1149 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1150 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T0);
1151 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1152 gen_op_movl_T0_Dshift(ot);
1153 gen_op_add_reg_T0(s->aflag, R_ESI);
1154 gen_bpt_io(s, cpu_tmp2_i32, ot);
1155 if (s->base.tb->cflags & CF_USE_ICOUNT) {
1160 /* same method as Valgrind : we generate jumps to current or next
1162 #define GEN_REPZ(op) \
1163 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1164 target_ulong cur_eip, target_ulong next_eip) \
1167 gen_update_cc_op(s); \
1168 l2 = gen_jz_ecx_string(s, next_eip); \
1169 gen_ ## op(s, ot); \
1170 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1171 /* a loop would cause two single step exceptions if ECX = 1 \
1172 before rep string_insn */ \
1174 gen_op_jz_ecx(s->aflag, l2); \
1175 gen_jmp(s, cur_eip); \
1178 #define GEN_REPZ2(op) \
1179 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1180 target_ulong cur_eip, \
1181 target_ulong next_eip, \
1185 gen_update_cc_op(s); \
1186 l2 = gen_jz_ecx_string(s, next_eip); \
1187 gen_ ## op(s, ot); \
1188 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1189 gen_update_cc_op(s); \
1190 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1192 gen_op_jz_ecx(s->aflag, l2); \
1193 gen_jmp(s, cur_eip); \
1204 static void gen_helper_fp_arith_ST0_FT0(int op)
1208 gen_helper_fadd_ST0_FT0(cpu_env);
1211 gen_helper_fmul_ST0_FT0(cpu_env);
1214 gen_helper_fcom_ST0_FT0(cpu_env);
1217 gen_helper_fcom_ST0_FT0(cpu_env);
1220 gen_helper_fsub_ST0_FT0(cpu_env);
1223 gen_helper_fsubr_ST0_FT0(cpu_env);
1226 gen_helper_fdiv_ST0_FT0(cpu_env);
1229 gen_helper_fdivr_ST0_FT0(cpu_env);
1234 /* NOTE the exception in "r" op ordering */
1235 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1237 TCGv_i32 tmp = tcg_const_i32(opreg);
1240 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1243 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1246 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1249 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1252 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1255 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1260 /* if d == OR_TMP0, it means memory operand (address in A0) */
1261 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
1264 gen_op_mov_v_reg(ot, cpu_T0, d);
1265 } else if (!(s1->prefix & PREFIX_LOCK)) {
1266 gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
1270 gen_compute_eflags_c(s1, cpu_tmp4);
1271 if (s1->prefix & PREFIX_LOCK) {
1272 tcg_gen_add_tl(cpu_T0, cpu_tmp4, cpu_T1);
1273 tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
1274 s1->mem_index, ot | MO_LE);
1276 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
1277 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_tmp4);
1278 gen_op_st_rm_T0_A0(s1, ot, d);
1280 gen_op_update3_cc(cpu_tmp4);
1281 set_cc_op(s1, CC_OP_ADCB + ot);
1284 gen_compute_eflags_c(s1, cpu_tmp4);
1285 if (s1->prefix & PREFIX_LOCK) {
1286 tcg_gen_add_tl(cpu_T0, cpu_T1, cpu_tmp4);
1287 tcg_gen_neg_tl(cpu_T0, cpu_T0);
1288 tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
1289 s1->mem_index, ot | MO_LE);
1291 tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
1292 tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_tmp4);
1293 gen_op_st_rm_T0_A0(s1, ot, d);
1295 gen_op_update3_cc(cpu_tmp4);
1296 set_cc_op(s1, CC_OP_SBBB + ot);
1299 if (s1->prefix & PREFIX_LOCK) {
1300 tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
1301 s1->mem_index, ot | MO_LE);
1303 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
1304 gen_op_st_rm_T0_A0(s1, ot, d);
1306 gen_op_update2_cc();
1307 set_cc_op(s1, CC_OP_ADDB + ot);
1310 if (s1->prefix & PREFIX_LOCK) {
1311 tcg_gen_neg_tl(cpu_T0, cpu_T1);
1312 tcg_gen_atomic_fetch_add_tl(cpu_cc_srcT, cpu_A0, cpu_T0,
1313 s1->mem_index, ot | MO_LE);
1314 tcg_gen_sub_tl(cpu_T0, cpu_cc_srcT, cpu_T1);
1316 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
1317 tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
1318 gen_op_st_rm_T0_A0(s1, ot, d);
1320 gen_op_update2_cc();
1321 set_cc_op(s1, CC_OP_SUBB + ot);
1325 if (s1->prefix & PREFIX_LOCK) {
1326 tcg_gen_atomic_and_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
1327 s1->mem_index, ot | MO_LE);
1329 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
1330 gen_op_st_rm_T0_A0(s1, ot, d);
1332 gen_op_update1_cc();
1333 set_cc_op(s1, CC_OP_LOGICB + ot);
1336 if (s1->prefix & PREFIX_LOCK) {
1337 tcg_gen_atomic_or_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
1338 s1->mem_index, ot | MO_LE);
1340 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
1341 gen_op_st_rm_T0_A0(s1, ot, d);
1343 gen_op_update1_cc();
1344 set_cc_op(s1, CC_OP_LOGICB + ot);
1347 if (s1->prefix & PREFIX_LOCK) {
1348 tcg_gen_atomic_xor_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
1349 s1->mem_index, ot | MO_LE);
1351 tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_T1);
1352 gen_op_st_rm_T0_A0(s1, ot, d);
1354 gen_op_update1_cc();
1355 set_cc_op(s1, CC_OP_LOGICB + ot);
1358 tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
1359 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
1360 tcg_gen_sub_tl(cpu_cc_dst, cpu_T0, cpu_T1);
1361 set_cc_op(s1, CC_OP_SUBB + ot);
1366 /* if d == OR_TMP0, it means memory operand (address in A0) */
1367 static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
1369 if (s1->prefix & PREFIX_LOCK) {
1370 tcg_gen_movi_tl(cpu_T0, c > 0 ? 1 : -1);
1371 tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
1372 s1->mem_index, ot | MO_LE);
1375 gen_op_mov_v_reg(ot, cpu_T0, d);
1377 gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
1379 tcg_gen_addi_tl(cpu_T0, cpu_T0, (c > 0 ? 1 : -1));
1380 gen_op_st_rm_T0_A0(s1, ot, d);
1383 gen_compute_eflags_c(s1, cpu_cc_src);
1384 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
1385 set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot);
1388 static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
1389 TCGv shm1, TCGv count, bool is_right)
1391 TCGv_i32 z32, s32, oldop;
1394 /* Store the results into the CC variables. If we know that the
1395 variable must be dead, store unconditionally. Otherwise we'll
1396 need to not disrupt the current contents. */
1397 z_tl = tcg_const_tl(0);
1398 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1399 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1400 result, cpu_cc_dst);
1402 tcg_gen_mov_tl(cpu_cc_dst, result);
1404 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1405 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1408 tcg_gen_mov_tl(cpu_cc_src, shm1);
1410 tcg_temp_free(z_tl);
1412 /* Get the two potential CC_OP values into temporaries. */
1413 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1414 if (s->cc_op == CC_OP_DYNAMIC) {
1417 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1418 oldop = cpu_tmp3_i32;
1421 /* Conditionally store the CC_OP value. */
1422 z32 = tcg_const_i32(0);
1423 s32 = tcg_temp_new_i32();
1424 tcg_gen_trunc_tl_i32(s32, count);
1425 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1426 tcg_temp_free_i32(z32);
1427 tcg_temp_free_i32(s32);
1429 /* The CC_OP value is no longer predictable. */
1430 set_cc_op(s, CC_OP_DYNAMIC);
1433 static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1434 int is_right, int is_arith)
1436 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1439 if (op1 == OR_TMP0) {
1440 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1442 gen_op_mov_v_reg(ot, cpu_T0, op1);
1445 tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
1446 tcg_gen_subi_tl(cpu_tmp0, cpu_T1, 1);
1450 gen_exts(ot, cpu_T0);
1451 tcg_gen_sar_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1452 tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
1454 gen_extu(ot, cpu_T0);
1455 tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1456 tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
1459 tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1460 tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
1464 gen_op_st_rm_T0_A0(s, ot, op1);
1466 gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, cpu_T1, is_right);
1469 static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1470 int is_right, int is_arith)
1472 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1476 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1478 gen_op_mov_v_reg(ot, cpu_T0, op1);
1484 gen_exts(ot, cpu_T0);
1485 tcg_gen_sari_tl(cpu_tmp4, cpu_T0, op2 - 1);
1486 tcg_gen_sari_tl(cpu_T0, cpu_T0, op2);
1488 gen_extu(ot, cpu_T0);
1489 tcg_gen_shri_tl(cpu_tmp4, cpu_T0, op2 - 1);
1490 tcg_gen_shri_tl(cpu_T0, cpu_T0, op2);
1493 tcg_gen_shli_tl(cpu_tmp4, cpu_T0, op2 - 1);
1494 tcg_gen_shli_tl(cpu_T0, cpu_T0, op2);
1499 gen_op_st_rm_T0_A0(s, ot, op1);
1501 /* update eflags if non zero shift */
1503 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1504 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
1505 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1509 static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
1511 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1515 if (op1 == OR_TMP0) {
1516 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1518 gen_op_mov_v_reg(ot, cpu_T0, op1);
1521 tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
1525 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1526 tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
1527 tcg_gen_muli_tl(cpu_T0, cpu_T0, 0x01010101);
1530 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1531 tcg_gen_deposit_tl(cpu_T0, cpu_T0, cpu_T0, 16, 16);
1534 #ifdef TARGET_X86_64
1536 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
1537 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
1539 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1541 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1543 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
1548 tcg_gen_rotr_tl(cpu_T0, cpu_T0, cpu_T1);
1550 tcg_gen_rotl_tl(cpu_T0, cpu_T0, cpu_T1);
1556 gen_op_st_rm_T0_A0(s, ot, op1);
1558 /* We'll need the flags computed into CC_SRC. */
1559 gen_compute_eflags(s);
1561 /* The value that was "rotated out" is now present at the other end
1562 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1563 since we've computed the flags into CC_SRC, these variables are
1566 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
1567 tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
1568 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1570 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
1571 tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
1573 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1574 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1576 /* Now conditionally store the new CC_OP value. If the shift count
1577 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1578 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1579 exactly as we computed above. */
1580 t0 = tcg_const_i32(0);
1581 t1 = tcg_temp_new_i32();
1582 tcg_gen_trunc_tl_i32(t1, cpu_T1);
1583 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1584 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1585 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1586 cpu_tmp2_i32, cpu_tmp3_i32);
1587 tcg_temp_free_i32(t0);
1588 tcg_temp_free_i32(t1);
1590 /* The CC_OP value is no longer predictable. */
1591 set_cc_op(s, CC_OP_DYNAMIC);
1594 static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1597 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1601 if (op1 == OR_TMP0) {
1602 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1604 gen_op_mov_v_reg(ot, cpu_T0, op1);
1610 #ifdef TARGET_X86_64
1612 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
1614 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1616 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1618 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
1623 tcg_gen_rotri_tl(cpu_T0, cpu_T0, op2);
1625 tcg_gen_rotli_tl(cpu_T0, cpu_T0, op2);
1636 shift = mask + 1 - shift;
1638 gen_extu(ot, cpu_T0);
1639 tcg_gen_shli_tl(cpu_tmp0, cpu_T0, shift);
1640 tcg_gen_shri_tl(cpu_T0, cpu_T0, mask + 1 - shift);
1641 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
1647 gen_op_st_rm_T0_A0(s, ot, op1);
1650 /* Compute the flags into CC_SRC. */
1651 gen_compute_eflags(s);
1653 /* The value that was "rotated out" is now present at the other end
1654 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1655 since we've computed the flags into CC_SRC, these variables are
1658 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
1659 tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
1660 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1662 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
1663 tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
1665 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1666 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1667 set_cc_op(s, CC_OP_ADCOX);
1671 /* XXX: add faster immediate = 1 case */
1672 static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1675 gen_compute_eflags(s);
1676 assert(s->cc_op == CC_OP_EFLAGS);
1680 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1682 gen_op_mov_v_reg(ot, cpu_T0, op1);
1687 gen_helper_rcrb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1690 gen_helper_rcrw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1693 gen_helper_rcrl(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1695 #ifdef TARGET_X86_64
1697 gen_helper_rcrq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1706 gen_helper_rclb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1709 gen_helper_rclw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1712 gen_helper_rcll(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1714 #ifdef TARGET_X86_64
1716 gen_helper_rclq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1724 gen_op_st_rm_T0_A0(s, ot, op1);
1727 /* XXX: add faster immediate case */
1728 static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1729 bool is_right, TCGv count_in)
1731 target_ulong mask = (ot == MO_64 ? 63 : 31);
1735 if (op1 == OR_TMP0) {
1736 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1738 gen_op_mov_v_reg(ot, cpu_T0, op1);
1741 count = tcg_temp_new();
1742 tcg_gen_andi_tl(count, count_in, mask);
1746 /* Note: we implement the Intel behaviour for shift count > 16.
1747 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1748 portion by constructing it as a 32-bit value. */
1750 tcg_gen_deposit_tl(cpu_tmp0, cpu_T0, cpu_T1, 16, 16);
1751 tcg_gen_mov_tl(cpu_T1, cpu_T0);
1752 tcg_gen_mov_tl(cpu_T0, cpu_tmp0);
1754 tcg_gen_deposit_tl(cpu_T1, cpu_T0, cpu_T1, 16, 16);
1757 #ifdef TARGET_X86_64
1759 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1760 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1762 tcg_gen_concat_tl_i64(cpu_T0, cpu_T0, cpu_T1);
1763 tcg_gen_shr_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
1764 tcg_gen_shr_i64(cpu_T0, cpu_T0, count);
1766 tcg_gen_concat_tl_i64(cpu_T0, cpu_T1, cpu_T0);
1767 tcg_gen_shl_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
1768 tcg_gen_shl_i64(cpu_T0, cpu_T0, count);
1769 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1770 tcg_gen_shri_i64(cpu_T0, cpu_T0, 32);
1775 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1777 tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1779 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1780 tcg_gen_shr_tl(cpu_T0, cpu_T0, count);
1781 tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_tmp4);
1783 tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1785 /* Only needed if count > 16, for Intel behaviour. */
1786 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
1787 tcg_gen_shr_tl(cpu_tmp4, cpu_T1, cpu_tmp4);
1788 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
1791 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1792 tcg_gen_shl_tl(cpu_T0, cpu_T0, count);
1793 tcg_gen_shr_tl(cpu_T1, cpu_T1, cpu_tmp4);
1795 tcg_gen_movi_tl(cpu_tmp4, 0);
1796 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T1, count, cpu_tmp4,
1798 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
1803 gen_op_st_rm_T0_A0(s, ot, op1);
1805 gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, count, is_right);
1806 tcg_temp_free(count);
1809 static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
1812 gen_op_mov_v_reg(ot, cpu_T1, s);
1815 gen_rot_rm_T1(s1, ot, d, 0);
1818 gen_rot_rm_T1(s1, ot, d, 1);
1822 gen_shift_rm_T1(s1, ot, d, 0, 0);
1825 gen_shift_rm_T1(s1, ot, d, 1, 0);
1828 gen_shift_rm_T1(s1, ot, d, 1, 1);
1831 gen_rotc_rm_T1(s1, ot, d, 0);
1834 gen_rotc_rm_T1(s1, ot, d, 1);
1839 static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
1843 gen_rot_rm_im(s1, ot, d, c, 0);
1846 gen_rot_rm_im(s1, ot, d, c, 1);
1850 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1853 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1856 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1859 /* currently not optimized */
1860 tcg_gen_movi_tl(cpu_T1, c);
1861 gen_shift(s1, op, ot, d, OR_TMP1);
1866 /* Decompose an address. */
1868 typedef struct AddressParts {
1876 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1879 int def_seg, base, index, scale, mod, rm;
1888 mod = (modrm >> 6) & 3;
1890 base = rm | REX_B(s);
1893 /* Normally filtered out earlier, but including this path
1894 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
1903 int code = cpu_ldub_code(env, s->pc++);
1904 scale = (code >> 6) & 3;
1905 index = ((code >> 3) & 7) | REX_X(s);
1907 index = -1; /* no index */
1909 base = (code & 7) | REX_B(s);
1915 if ((base & 7) == 5) {
1917 disp = (int32_t)cpu_ldl_code(env, s->pc);
1919 if (CODE64(s) && !havesib) {
1921 disp += s->pc + s->rip_offset;
1926 disp = (int8_t)cpu_ldub_code(env, s->pc++);
1930 disp = (int32_t)cpu_ldl_code(env, s->pc);
1935 /* For correct popl handling with esp. */
1936 if (base == R_ESP && s->popl_esp_hack) {
1937 disp += s->popl_esp_hack;
1939 if (base == R_EBP || base == R_ESP) {
1948 disp = cpu_lduw_code(env, s->pc);
1952 } else if (mod == 1) {
1953 disp = (int8_t)cpu_ldub_code(env, s->pc++);
1955 disp = (int16_t)cpu_lduw_code(env, s->pc);
2000 return (AddressParts){ def_seg, base, index, scale, disp };
2003 /* Compute the address, with a minimum number of TCG ops. */
2004 static TCGv gen_lea_modrm_1(AddressParts a)
2011 ea = cpu_regs[a.index];
2013 tcg_gen_shli_tl(cpu_A0, cpu_regs[a.index], a.scale);
2017 tcg_gen_add_tl(cpu_A0, ea, cpu_regs[a.base]);
2020 } else if (a.base >= 0) {
2021 ea = cpu_regs[a.base];
2023 if (TCGV_IS_UNUSED(ea)) {
2024 tcg_gen_movi_tl(cpu_A0, a.disp);
2026 } else if (a.disp != 0) {
2027 tcg_gen_addi_tl(cpu_A0, ea, a.disp);
2034 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
2036 AddressParts a = gen_lea_modrm_0(env, s, modrm);
2037 TCGv ea = gen_lea_modrm_1(a);
2038 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
2041 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2043 (void)gen_lea_modrm_0(env, s, modrm);
2046 /* Used for BNDCL, BNDCU, BNDCN. */
2047 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
2048 TCGCond cond, TCGv_i64 bndv)
2050 TCGv ea = gen_lea_modrm_1(gen_lea_modrm_0(env, s, modrm));
2052 tcg_gen_extu_tl_i64(cpu_tmp1_i64, ea);
2054 tcg_gen_ext32u_i64(cpu_tmp1_i64, cpu_tmp1_i64);
2056 tcg_gen_setcond_i64(cond, cpu_tmp1_i64, cpu_tmp1_i64, bndv);
2057 tcg_gen_extrl_i64_i32(cpu_tmp2_i32, cpu_tmp1_i64);
2058 gen_helper_bndck(cpu_env, cpu_tmp2_i32);
2061 /* used for LEA and MOV AX, mem */
2062 static void gen_add_A0_ds_seg(DisasContext *s)
2064 gen_lea_v_seg(s, s->aflag, cpu_A0, R_DS, s->override);
2067 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2069 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2070 TCGMemOp ot, int reg, int is_store)
2074 mod = (modrm >> 6) & 3;
2075 rm = (modrm & 7) | REX_B(s);
2079 gen_op_mov_v_reg(ot, cpu_T0, reg);
2080 gen_op_mov_reg_v(ot, rm, cpu_T0);
2082 gen_op_mov_v_reg(ot, cpu_T0, rm);
2084 gen_op_mov_reg_v(ot, reg, cpu_T0);
2087 gen_lea_modrm(env, s, modrm);
2090 gen_op_mov_v_reg(ot, cpu_T0, reg);
2091 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
2093 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
2095 gen_op_mov_reg_v(ot, reg, cpu_T0);
2100 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
2106 ret = cpu_ldub_code(env, s->pc);
2110 ret = cpu_lduw_code(env, s->pc);
2114 #ifdef TARGET_X86_64
2117 ret = cpu_ldl_code(env, s->pc);
2126 static inline int insn_const_size(TCGMemOp ot)
2135 static inline bool use_goto_tb(DisasContext *s, target_ulong pc)
2137 #ifndef CONFIG_USER_ONLY
2138 return (pc & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
2139 (pc & TARGET_PAGE_MASK) == (s->pc_start & TARGET_PAGE_MASK);
2145 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2147 target_ulong pc = s->cs_base + eip;
2149 if (use_goto_tb(s, pc)) {
2150 /* jump to same page: we can use a direct jump */
2151 tcg_gen_goto_tb(tb_num);
2153 tcg_gen_exit_tb((uintptr_t)s->base.tb + tb_num);
2154 s->base.is_jmp = DISAS_NORETURN;
2156 /* jump to another page */
2158 gen_jr(s, cpu_tmp0);
2162 static inline void gen_jcc(DisasContext *s, int b,
2163 target_ulong val, target_ulong next_eip)
2168 l1 = gen_new_label();
2171 gen_goto_tb(s, 0, next_eip);
2174 gen_goto_tb(s, 1, val);
2176 l1 = gen_new_label();
2177 l2 = gen_new_label();
2180 gen_jmp_im(next_eip);
2190 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
2195 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2197 cc = gen_prepare_cc(s, b, cpu_T1);
2198 if (cc.mask != -1) {
2199 TCGv t0 = tcg_temp_new();
2200 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2204 cc.reg2 = tcg_const_tl(cc.imm);
2207 tcg_gen_movcond_tl(cc.cond, cpu_T0, cc.reg, cc.reg2,
2208 cpu_T0, cpu_regs[reg]);
2209 gen_op_mov_reg_v(ot, reg, cpu_T0);
2211 if (cc.mask != -1) {
2212 tcg_temp_free(cc.reg);
2215 tcg_temp_free(cc.reg2);
2219 static inline void gen_op_movl_T0_seg(int seg_reg)
2221 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
2222 offsetof(CPUX86State,segs[seg_reg].selector));
2225 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2227 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
2228 tcg_gen_st32_tl(cpu_T0, cpu_env,
2229 offsetof(CPUX86State,segs[seg_reg].selector));
2230 tcg_gen_shli_tl(cpu_seg_base[seg_reg], cpu_T0, 4);
2233 /* move T0 to seg_reg and compute if the CPU state may change. Never
2234 call this function with seg_reg == R_CS */
2235 static void gen_movl_seg_T0(DisasContext *s, int seg_reg)
2237 if (s->pe && !s->vm86) {
2238 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
2239 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2240 /* abort translation because the addseg value may change or
2241 because ss32 may change. For R_SS, translation must always
2242 stop as a special handling must be done to disable hardware
2243 interrupts for the next instruction */
2244 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS)) {
2245 s->base.is_jmp = DISAS_TOO_MANY;
2248 gen_op_movl_seg_T0_vm(seg_reg);
2249 if (seg_reg == R_SS) {
2250 s->base.is_jmp = DISAS_TOO_MANY;
2255 static inline int svm_is_rep(int prefixes)
2257 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2261 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2262 uint32_t type, uint64_t param)
2264 /* no SVM activated; fast case */
2265 if (likely(!(s->flags & HF_SVMI_MASK)))
2267 gen_update_cc_op(s);
2268 gen_jmp_im(pc_start - s->cs_base);
2269 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
2270 tcg_const_i64(param));
2274 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2276 gen_svm_check_intercept_param(s, pc_start, type, 0);
2279 static inline void gen_stack_update(DisasContext *s, int addend)
2281 gen_op_add_reg_im(mo_stacksize(s), R_ESP, addend);
2284 /* Generate a push. It depends on ss32, addseg and dflag. */
2285 static void gen_push_v(DisasContext *s, TCGv val)
2287 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2288 TCGMemOp a_ot = mo_stacksize(s);
2289 int size = 1 << d_ot;
2290 TCGv new_esp = cpu_A0;
2292 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_ESP], size);
2297 tcg_gen_mov_tl(new_esp, cpu_A0);
2299 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2302 gen_op_st_v(s, d_ot, val, cpu_A0);
2303 gen_op_mov_reg_v(a_ot, R_ESP, new_esp);
2306 /* two step pop is necessary for precise exceptions */
2307 static TCGMemOp gen_pop_T0(DisasContext *s)
2309 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2311 gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1);
2312 gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
2317 static inline void gen_pop_update(DisasContext *s, TCGMemOp ot)
2319 gen_stack_update(s, 1 << ot);
2322 static inline void gen_stack_A0(DisasContext *s)
2324 gen_lea_v_seg(s, s->ss32 ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
2327 static void gen_pusha(DisasContext *s)
2329 TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
2330 TCGMemOp d_ot = s->dflag;
2331 int size = 1 << d_ot;
2334 for (i = 0; i < 8; i++) {
2335 tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], (i - 8) * size);
2336 gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
2337 gen_op_st_v(s, d_ot, cpu_regs[7 - i], cpu_A0);
2340 gen_stack_update(s, -8 * size);
2343 static void gen_popa(DisasContext *s)
2345 TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
2346 TCGMemOp d_ot = s->dflag;
2347 int size = 1 << d_ot;
2350 for (i = 0; i < 8; i++) {
2351 /* ESP is not reloaded */
2352 if (7 - i == R_ESP) {
2355 tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], i * size);
2356 gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
2357 gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
2358 gen_op_mov_reg_v(d_ot, 7 - i, cpu_T0);
2361 gen_stack_update(s, 8 * size);
2364 static void gen_enter(DisasContext *s, int esp_addend, int level)
2366 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2367 TCGMemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
2368 int size = 1 << d_ot;
2370 /* Push BP; compute FrameTemp into T1. */
2371 tcg_gen_subi_tl(cpu_T1, cpu_regs[R_ESP], size);
2372 gen_lea_v_seg(s, a_ot, cpu_T1, R_SS, -1);
2373 gen_op_st_v(s, d_ot, cpu_regs[R_EBP], cpu_A0);
2379 /* Copy level-1 pointers from the previous frame. */
2380 for (i = 1; i < level; ++i) {
2381 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_EBP], size * i);
2382 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2383 gen_op_ld_v(s, d_ot, cpu_tmp0, cpu_A0);
2385 tcg_gen_subi_tl(cpu_A0, cpu_T1, size * i);
2386 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2387 gen_op_st_v(s, d_ot, cpu_tmp0, cpu_A0);
2390 /* Push the current FrameTemp as the last level. */
2391 tcg_gen_subi_tl(cpu_A0, cpu_T1, size * level);
2392 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2393 gen_op_st_v(s, d_ot, cpu_T1, cpu_A0);
2396 /* Copy the FrameTemp value to EBP. */
2397 gen_op_mov_reg_v(a_ot, R_EBP, cpu_T1);
2399 /* Compute the final value of ESP. */
2400 tcg_gen_subi_tl(cpu_T1, cpu_T1, esp_addend + size * level);
2401 gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
2404 static void gen_leave(DisasContext *s)
2406 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2407 TCGMemOp a_ot = mo_stacksize(s);
2409 gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
2410 gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
2412 tcg_gen_addi_tl(cpu_T1, cpu_regs[R_EBP], 1 << d_ot);
2414 gen_op_mov_reg_v(d_ot, R_EBP, cpu_T0);
2415 gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
2418 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2420 gen_update_cc_op(s);
2421 gen_jmp_im(cur_eip);
2422 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2423 s->base.is_jmp = DISAS_NORETURN;
2426 /* Generate #UD for the current instruction. The assumption here is that
2427 the instruction is known, but it isn't allowed in the current cpu mode. */
2428 static void gen_illegal_opcode(DisasContext *s)
2430 gen_exception(s, EXCP06_ILLOP, s->pc_start - s->cs_base);
2433 /* Similarly, except that the assumption here is that we don't decode
2434 the instruction at all -- either a missing opcode, an unimplemented
2435 feature, or just a bogus instruction stream. */
2436 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2438 gen_illegal_opcode(s);
2440 if (qemu_loglevel_mask(LOG_UNIMP)) {
2441 target_ulong pc = s->pc_start, end = s->pc;
2443 qemu_log("ILLOPC: " TARGET_FMT_lx ":", pc);
2444 for (; pc < end; ++pc) {
2445 qemu_log(" %02x", cpu_ldub_code(env, pc));
2452 /* an interrupt is different from an exception because of the
2454 static void gen_interrupt(DisasContext *s, int intno,
2455 target_ulong cur_eip, target_ulong next_eip)
2457 gen_update_cc_op(s);
2458 gen_jmp_im(cur_eip);
2459 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2460 tcg_const_i32(next_eip - cur_eip));
2461 s->base.is_jmp = DISAS_NORETURN;
2464 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2466 gen_update_cc_op(s);
2467 gen_jmp_im(cur_eip);
2468 gen_helper_debug(cpu_env);
2469 s->base.is_jmp = DISAS_NORETURN;
2472 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2474 if ((s->flags & mask) == 0) {
2475 TCGv_i32 t = tcg_temp_new_i32();
2476 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2477 tcg_gen_ori_i32(t, t, mask);
2478 tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2479 tcg_temp_free_i32(t);
2484 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2486 if (s->flags & mask) {
2487 TCGv_i32 t = tcg_temp_new_i32();
2488 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2489 tcg_gen_andi_i32(t, t, ~mask);
2490 tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2491 tcg_temp_free_i32(t);
2496 /* Clear BND registers during legacy branches. */
2497 static void gen_bnd_jmp(DisasContext *s)
2499 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2500 and if the BNDREGs are known to be in use (non-zero) already.
2501 The helper itself will check BNDPRESERVE at runtime. */
2502 if ((s->prefix & PREFIX_REPNZ) == 0
2503 && (s->flags & HF_MPX_EN_MASK) != 0
2504 && (s->flags & HF_MPX_IU_MASK) != 0) {
2505 gen_helper_bnd_jmp(cpu_env);
2509 /* Generate an end of block. Trace exception is also generated if needed.
2510 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2511 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2512 S->TF. This is used by the syscall/sysret insns. */
2514 do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, TCGv jr)
2516 gen_update_cc_op(s);
2518 /* If several instructions disable interrupts, only the first does it. */
2519 if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
2520 gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2522 gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2525 if (s->base.tb->flags & HF_RF_MASK) {
2526 gen_helper_reset_rf(cpu_env);
2528 if (s->base.singlestep_enabled) {
2529 gen_helper_debug(cpu_env);
2530 } else if (recheck_tf) {
2531 gen_helper_rechecking_single_step(cpu_env);
2534 gen_helper_single_step(cpu_env);
2535 } else if (!TCGV_IS_UNUSED(jr)) {
2536 TCGv vaddr = tcg_temp_new();
2538 tcg_gen_add_tl(vaddr, jr, cpu_seg_base[R_CS]);
2539 tcg_gen_lookup_and_goto_ptr(vaddr);
2540 tcg_temp_free(vaddr);
2544 s->base.is_jmp = DISAS_NORETURN;
2548 gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
2552 TCGV_UNUSED(unused);
2553 do_gen_eob_worker(s, inhibit, recheck_tf, unused);
2557 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2558 static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
2560 gen_eob_worker(s, inhibit, false);
2563 /* End of block, resetting the inhibit irq flag. */
2564 static void gen_eob(DisasContext *s)
2566 gen_eob_worker(s, false, false);
2569 /* Jump to register */
2570 static void gen_jr(DisasContext *s, TCGv dest)
2572 do_gen_eob_worker(s, false, false, dest);
2575 /* generate a jump to eip. No segment change must happen before as a
2576 direct call to the next block may occur */
2577 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2579 gen_update_cc_op(s);
2580 set_cc_op(s, CC_OP_DYNAMIC);
2582 gen_goto_tb(s, tb_num, eip);
2589 static void gen_jmp(DisasContext *s, target_ulong eip)
2591 gen_jmp_tb(s, eip, 0);
2594 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2596 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2597 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2600 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2602 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2603 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2606 static inline void gen_ldo_env_A0(DisasContext *s, int offset)
2608 int mem_index = s->mem_index;
2609 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2610 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
2611 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2612 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2613 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
2616 static inline void gen_sto_env_A0(DisasContext *s, int offset)
2618 int mem_index = s->mem_index;
2619 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
2620 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2621 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2622 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
2623 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2626 static inline void gen_op_movo(int d_offset, int s_offset)
2628 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(0)));
2629 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(0)));
2630 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(1)));
2631 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(1)));
2634 static inline void gen_op_movq(int d_offset, int s_offset)
2636 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2637 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2640 static inline void gen_op_movl(int d_offset, int s_offset)
2642 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2643 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2646 static inline void gen_op_movq_env_0(int d_offset)
2648 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2649 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2652 typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2653 typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2654 typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2655 typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2656 typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2657 typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2659 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2660 typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2663 #define SSE_SPECIAL ((void *)1)
2664 #define SSE_DUMMY ((void *)2)
2666 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2667 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2668 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2670 static const SSEFunc_0_epp sse_op_table1[256][4] = {
2671 /* 3DNow! extensions */
2672 [0x0e] = { SSE_DUMMY }, /* femms */
2673 [0x0f] = { SSE_DUMMY }, /* pf... */
2674 /* pure SSE operations */
2675 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2676 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2677 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2678 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2679 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2680 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2681 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2682 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2684 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2685 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2686 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2687 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2688 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2689 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2690 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2691 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2692 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2693 [0x51] = SSE_FOP(sqrt),
2694 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2695 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2696 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2697 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2698 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2699 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2700 [0x58] = SSE_FOP(add),
2701 [0x59] = SSE_FOP(mul),
2702 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2703 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2704 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2705 [0x5c] = SSE_FOP(sub),
2706 [0x5d] = SSE_FOP(min),
2707 [0x5e] = SSE_FOP(div),
2708 [0x5f] = SSE_FOP(max),
2710 [0xc2] = SSE_FOP(cmpeq),
2711 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2712 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
2714 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2715 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2716 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2718 /* MMX ops and their SSE extensions */
2719 [0x60] = MMX_OP2(punpcklbw),
2720 [0x61] = MMX_OP2(punpcklwd),
2721 [0x62] = MMX_OP2(punpckldq),
2722 [0x63] = MMX_OP2(packsswb),
2723 [0x64] = MMX_OP2(pcmpgtb),
2724 [0x65] = MMX_OP2(pcmpgtw),
2725 [0x66] = MMX_OP2(pcmpgtl),
2726 [0x67] = MMX_OP2(packuswb),
2727 [0x68] = MMX_OP2(punpckhbw),
2728 [0x69] = MMX_OP2(punpckhwd),
2729 [0x6a] = MMX_OP2(punpckhdq),
2730 [0x6b] = MMX_OP2(packssdw),
2731 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2732 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2733 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2734 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2735 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2736 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2737 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2738 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2739 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2740 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2741 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2742 [0x74] = MMX_OP2(pcmpeqb),
2743 [0x75] = MMX_OP2(pcmpeqw),
2744 [0x76] = MMX_OP2(pcmpeql),
2745 [0x77] = { SSE_DUMMY }, /* emms */
2746 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2747 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2748 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2749 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2750 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2751 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2752 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2753 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2754 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2755 [0xd1] = MMX_OP2(psrlw),
2756 [0xd2] = MMX_OP2(psrld),
2757 [0xd3] = MMX_OP2(psrlq),
2758 [0xd4] = MMX_OP2(paddq),
2759 [0xd5] = MMX_OP2(pmullw),
2760 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2761 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2762 [0xd8] = MMX_OP2(psubusb),
2763 [0xd9] = MMX_OP2(psubusw),
2764 [0xda] = MMX_OP2(pminub),
2765 [0xdb] = MMX_OP2(pand),
2766 [0xdc] = MMX_OP2(paddusb),
2767 [0xdd] = MMX_OP2(paddusw),
2768 [0xde] = MMX_OP2(pmaxub),
2769 [0xdf] = MMX_OP2(pandn),
2770 [0xe0] = MMX_OP2(pavgb),
2771 [0xe1] = MMX_OP2(psraw),
2772 [0xe2] = MMX_OP2(psrad),
2773 [0xe3] = MMX_OP2(pavgw),
2774 [0xe4] = MMX_OP2(pmulhuw),
2775 [0xe5] = MMX_OP2(pmulhw),
2776 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2777 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2778 [0xe8] = MMX_OP2(psubsb),
2779 [0xe9] = MMX_OP2(psubsw),
2780 [0xea] = MMX_OP2(pminsw),
2781 [0xeb] = MMX_OP2(por),
2782 [0xec] = MMX_OP2(paddsb),
2783 [0xed] = MMX_OP2(paddsw),
2784 [0xee] = MMX_OP2(pmaxsw),
2785 [0xef] = MMX_OP2(pxor),
2786 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2787 [0xf1] = MMX_OP2(psllw),
2788 [0xf2] = MMX_OP2(pslld),
2789 [0xf3] = MMX_OP2(psllq),
2790 [0xf4] = MMX_OP2(pmuludq),
2791 [0xf5] = MMX_OP2(pmaddwd),
2792 [0xf6] = MMX_OP2(psadbw),
2793 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2794 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
2795 [0xf8] = MMX_OP2(psubb),
2796 [0xf9] = MMX_OP2(psubw),
2797 [0xfa] = MMX_OP2(psubl),
2798 [0xfb] = MMX_OP2(psubq),
2799 [0xfc] = MMX_OP2(paddb),
2800 [0xfd] = MMX_OP2(paddw),
2801 [0xfe] = MMX_OP2(paddl),
2804 static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
2805 [0 + 2] = MMX_OP2(psrlw),
2806 [0 + 4] = MMX_OP2(psraw),
2807 [0 + 6] = MMX_OP2(psllw),
2808 [8 + 2] = MMX_OP2(psrld),
2809 [8 + 4] = MMX_OP2(psrad),
2810 [8 + 6] = MMX_OP2(pslld),
2811 [16 + 2] = MMX_OP2(psrlq),
2812 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2813 [16 + 6] = MMX_OP2(psllq),
2814 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2817 static const SSEFunc_0_epi sse_op_table3ai[] = {
2818 gen_helper_cvtsi2ss,
2822 #ifdef TARGET_X86_64
2823 static const SSEFunc_0_epl sse_op_table3aq[] = {
2824 gen_helper_cvtsq2ss,
2829 static const SSEFunc_i_ep sse_op_table3bi[] = {
2830 gen_helper_cvttss2si,
2831 gen_helper_cvtss2si,
2832 gen_helper_cvttsd2si,
2836 #ifdef TARGET_X86_64
2837 static const SSEFunc_l_ep sse_op_table3bq[] = {
2838 gen_helper_cvttss2sq,
2839 gen_helper_cvtss2sq,
2840 gen_helper_cvttsd2sq,
2845 static const SSEFunc_0_epp sse_op_table4[8][4] = {
2856 static const SSEFunc_0_epp sse_op_table5[256] = {
2857 [0x0c] = gen_helper_pi2fw,
2858 [0x0d] = gen_helper_pi2fd,
2859 [0x1c] = gen_helper_pf2iw,
2860 [0x1d] = gen_helper_pf2id,
2861 [0x8a] = gen_helper_pfnacc,
2862 [0x8e] = gen_helper_pfpnacc,
2863 [0x90] = gen_helper_pfcmpge,
2864 [0x94] = gen_helper_pfmin,
2865 [0x96] = gen_helper_pfrcp,
2866 [0x97] = gen_helper_pfrsqrt,
2867 [0x9a] = gen_helper_pfsub,
2868 [0x9e] = gen_helper_pfadd,
2869 [0xa0] = gen_helper_pfcmpgt,
2870 [0xa4] = gen_helper_pfmax,
2871 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2872 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2873 [0xaa] = gen_helper_pfsubr,
2874 [0xae] = gen_helper_pfacc,
2875 [0xb0] = gen_helper_pfcmpeq,
2876 [0xb4] = gen_helper_pfmul,
2877 [0xb6] = gen_helper_movq, /* pfrcpit2 */
2878 [0xb7] = gen_helper_pmulhrw_mmx,
2879 [0xbb] = gen_helper_pswapd,
2880 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
2883 struct SSEOpHelper_epp {
2884 SSEFunc_0_epp op[2];
2888 struct SSEOpHelper_eppi {
2889 SSEFunc_0_eppi op[2];
2893 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2894 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2895 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2896 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2897 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2898 CPUID_EXT_PCLMULQDQ }
2899 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
2901 static const struct SSEOpHelper_epp sse_op_table6[256] = {
2902 [0x00] = SSSE3_OP(pshufb),
2903 [0x01] = SSSE3_OP(phaddw),
2904 [0x02] = SSSE3_OP(phaddd),
2905 [0x03] = SSSE3_OP(phaddsw),
2906 [0x04] = SSSE3_OP(pmaddubsw),
2907 [0x05] = SSSE3_OP(phsubw),
2908 [0x06] = SSSE3_OP(phsubd),
2909 [0x07] = SSSE3_OP(phsubsw),
2910 [0x08] = SSSE3_OP(psignb),
2911 [0x09] = SSSE3_OP(psignw),
2912 [0x0a] = SSSE3_OP(psignd),
2913 [0x0b] = SSSE3_OP(pmulhrsw),
2914 [0x10] = SSE41_OP(pblendvb),
2915 [0x14] = SSE41_OP(blendvps),
2916 [0x15] = SSE41_OP(blendvpd),
2917 [0x17] = SSE41_OP(ptest),
2918 [0x1c] = SSSE3_OP(pabsb),
2919 [0x1d] = SSSE3_OP(pabsw),
2920 [0x1e] = SSSE3_OP(pabsd),
2921 [0x20] = SSE41_OP(pmovsxbw),
2922 [0x21] = SSE41_OP(pmovsxbd),
2923 [0x22] = SSE41_OP(pmovsxbq),
2924 [0x23] = SSE41_OP(pmovsxwd),
2925 [0x24] = SSE41_OP(pmovsxwq),
2926 [0x25] = SSE41_OP(pmovsxdq),
2927 [0x28] = SSE41_OP(pmuldq),
2928 [0x29] = SSE41_OP(pcmpeqq),
2929 [0x2a] = SSE41_SPECIAL, /* movntqda */
2930 [0x2b] = SSE41_OP(packusdw),
2931 [0x30] = SSE41_OP(pmovzxbw),
2932 [0x31] = SSE41_OP(pmovzxbd),
2933 [0x32] = SSE41_OP(pmovzxbq),
2934 [0x33] = SSE41_OP(pmovzxwd),
2935 [0x34] = SSE41_OP(pmovzxwq),
2936 [0x35] = SSE41_OP(pmovzxdq),
2937 [0x37] = SSE42_OP(pcmpgtq),
2938 [0x38] = SSE41_OP(pminsb),
2939 [0x39] = SSE41_OP(pminsd),
2940 [0x3a] = SSE41_OP(pminuw),
2941 [0x3b] = SSE41_OP(pminud),
2942 [0x3c] = SSE41_OP(pmaxsb),
2943 [0x3d] = SSE41_OP(pmaxsd),
2944 [0x3e] = SSE41_OP(pmaxuw),
2945 [0x3f] = SSE41_OP(pmaxud),
2946 [0x40] = SSE41_OP(pmulld),
2947 [0x41] = SSE41_OP(phminposuw),
2948 [0xdb] = AESNI_OP(aesimc),
2949 [0xdc] = AESNI_OP(aesenc),
2950 [0xdd] = AESNI_OP(aesenclast),
2951 [0xde] = AESNI_OP(aesdec),
2952 [0xdf] = AESNI_OP(aesdeclast),
2955 static const struct SSEOpHelper_eppi sse_op_table7[256] = {
2956 [0x08] = SSE41_OP(roundps),
2957 [0x09] = SSE41_OP(roundpd),
2958 [0x0a] = SSE41_OP(roundss),
2959 [0x0b] = SSE41_OP(roundsd),
2960 [0x0c] = SSE41_OP(blendps),
2961 [0x0d] = SSE41_OP(blendpd),
2962 [0x0e] = SSE41_OP(pblendw),
2963 [0x0f] = SSSE3_OP(palignr),
2964 [0x14] = SSE41_SPECIAL, /* pextrb */
2965 [0x15] = SSE41_SPECIAL, /* pextrw */
2966 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
2967 [0x17] = SSE41_SPECIAL, /* extractps */
2968 [0x20] = SSE41_SPECIAL, /* pinsrb */
2969 [0x21] = SSE41_SPECIAL, /* insertps */
2970 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
2971 [0x40] = SSE41_OP(dpps),
2972 [0x41] = SSE41_OP(dppd),
2973 [0x42] = SSE41_OP(mpsadbw),
2974 [0x44] = PCLMULQDQ_OP(pclmulqdq),
2975 [0x60] = SSE42_OP(pcmpestrm),
2976 [0x61] = SSE42_OP(pcmpestri),
2977 [0x62] = SSE42_OP(pcmpistrm),
2978 [0x63] = SSE42_OP(pcmpistri),
2979 [0xdf] = AESNI_OP(aeskeygenassist),
2982 static void gen_sse(CPUX86State *env, DisasContext *s, int b,
2983 target_ulong pc_start, int rex_r)
2985 int b1, op1_offset, op2_offset, is_xmm, val;
2986 int modrm, mod, rm, reg;
2987 SSEFunc_0_epp sse_fn_epp;
2988 SSEFunc_0_eppi sse_fn_eppi;
2989 SSEFunc_0_ppi sse_fn_ppi;
2990 SSEFunc_0_eppt sse_fn_eppt;
2994 if (s->prefix & PREFIX_DATA)
2996 else if (s->prefix & PREFIX_REPZ)
2998 else if (s->prefix & PREFIX_REPNZ)
3002 sse_fn_epp = sse_op_table1[b][b1];
3006 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3016 /* simple MMX/SSE operation */
3017 if (s->flags & HF_TS_MASK) {
3018 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3021 if (s->flags & HF_EM_MASK) {
3023 gen_illegal_opcode(s);
3027 && !(s->flags & HF_OSFXSR_MASK)
3028 && ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))) {
3032 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
3033 /* If we were fully decoding this we might use illegal_op. */
3037 gen_helper_emms(cpu_env);
3042 gen_helper_emms(cpu_env);
3045 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3046 the static cpu state) */
3048 gen_helper_enter_mmx(cpu_env);
3051 modrm = cpu_ldub_code(env, s->pc++);
3052 reg = ((modrm >> 3) & 7);
3055 mod = (modrm >> 6) & 3;
3056 if (sse_fn_epp == SSE_SPECIAL) {
3059 case 0x0e7: /* movntq */
3063 gen_lea_modrm(env, s, modrm);
3064 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3066 case 0x1e7: /* movntdq */
3067 case 0x02b: /* movntps */
3068 case 0x12b: /* movntps */
3071 gen_lea_modrm(env, s, modrm);
3072 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3074 case 0x3f0: /* lddqu */
3077 gen_lea_modrm(env, s, modrm);
3078 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3080 case 0x22b: /* movntss */
3081 case 0x32b: /* movntsd */
3084 gen_lea_modrm(env, s, modrm);
3086 gen_stq_env_A0(s, offsetof(CPUX86State,
3087 xmm_regs[reg].ZMM_Q(0)));
3089 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
3090 xmm_regs[reg].ZMM_L(0)));
3091 gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
3094 case 0x6e: /* movd mm, ea */
3095 #ifdef TARGET_X86_64
3096 if (s->dflag == MO_64) {
3097 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3098 tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3102 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3103 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3104 offsetof(CPUX86State,fpregs[reg].mmx));
3105 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
3106 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3109 case 0x16e: /* movd xmm, ea */
3110 #ifdef TARGET_X86_64
3111 if (s->dflag == MO_64) {
3112 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3113 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3114 offsetof(CPUX86State,xmm_regs[reg]));
3115 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T0);
3119 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3120 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3121 offsetof(CPUX86State,xmm_regs[reg]));
3122 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
3123 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3126 case 0x6f: /* movq mm, ea */
3128 gen_lea_modrm(env, s, modrm);
3129 gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3132 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3133 offsetof(CPUX86State,fpregs[rm].mmx));
3134 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3135 offsetof(CPUX86State,fpregs[reg].mmx));
3138 case 0x010: /* movups */
3139 case 0x110: /* movupd */
3140 case 0x028: /* movaps */
3141 case 0x128: /* movapd */
3142 case 0x16f: /* movdqa xmm, ea */
3143 case 0x26f: /* movdqu xmm, ea */
3145 gen_lea_modrm(env, s, modrm);
3146 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3148 rm = (modrm & 7) | REX_B(s);
3149 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3150 offsetof(CPUX86State,xmm_regs[rm]));
3153 case 0x210: /* movss xmm, ea */
3155 gen_lea_modrm(env, s, modrm);
3156 gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
3157 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3158 tcg_gen_movi_tl(cpu_T0, 0);
3159 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
3160 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3161 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
3163 rm = (modrm & 7) | REX_B(s);
3164 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3165 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
3168 case 0x310: /* movsd xmm, ea */
3170 gen_lea_modrm(env, s, modrm);
3171 gen_ldq_env_A0(s, offsetof(CPUX86State,
3172 xmm_regs[reg].ZMM_Q(0)));
3173 tcg_gen_movi_tl(cpu_T0, 0);
3174 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3175 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
3177 rm = (modrm & 7) | REX_B(s);
3178 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3179 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3182 case 0x012: /* movlps */
3183 case 0x112: /* movlpd */
3185 gen_lea_modrm(env, s, modrm);
3186 gen_ldq_env_A0(s, offsetof(CPUX86State,
3187 xmm_regs[reg].ZMM_Q(0)));
3190 rm = (modrm & 7) | REX_B(s);
3191 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3192 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
3195 case 0x212: /* movsldup */
3197 gen_lea_modrm(env, s, modrm);
3198 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3200 rm = (modrm & 7) | REX_B(s);
3201 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3202 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
3203 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
3204 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(2)));
3206 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
3207 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3208 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
3209 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3211 case 0x312: /* movddup */
3213 gen_lea_modrm(env, s, modrm);
3214 gen_ldq_env_A0(s, offsetof(CPUX86State,
3215 xmm_regs[reg].ZMM_Q(0)));
3217 rm = (modrm & 7) | REX_B(s);
3218 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3219 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3221 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
3222 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3224 case 0x016: /* movhps */
3225 case 0x116: /* movhpd */
3227 gen_lea_modrm(env, s, modrm);
3228 gen_ldq_env_A0(s, offsetof(CPUX86State,
3229 xmm_regs[reg].ZMM_Q(1)));
3232 rm = (modrm & 7) | REX_B(s);
3233 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
3234 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3237 case 0x216: /* movshdup */
3239 gen_lea_modrm(env, s, modrm);
3240 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3242 rm = (modrm & 7) | REX_B(s);
3243 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
3244 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(1)));
3245 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
3246 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(3)));
3248 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3249 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
3250 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
3251 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
3256 int bit_index, field_length;
3258 if (b1 == 1 && reg != 0)
3260 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3261 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
3262 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3263 offsetof(CPUX86State,xmm_regs[reg]));
3265 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3266 tcg_const_i32(bit_index),
3267 tcg_const_i32(field_length));
3269 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3270 tcg_const_i32(bit_index),
3271 tcg_const_i32(field_length));
3274 case 0x7e: /* movd ea, mm */
3275 #ifdef TARGET_X86_64
3276 if (s->dflag == MO_64) {
3277 tcg_gen_ld_i64(cpu_T0, cpu_env,
3278 offsetof(CPUX86State,fpregs[reg].mmx));
3279 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3283 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
3284 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3285 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3288 case 0x17e: /* movd ea, xmm */
3289 #ifdef TARGET_X86_64
3290 if (s->dflag == MO_64) {
3291 tcg_gen_ld_i64(cpu_T0, cpu_env,
3292 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3293 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3297 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
3298 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3299 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3302 case 0x27e: /* movq xmm, ea */
3304 gen_lea_modrm(env, s, modrm);
3305 gen_ldq_env_A0(s, offsetof(CPUX86State,
3306 xmm_regs[reg].ZMM_Q(0)));
3308 rm = (modrm & 7) | REX_B(s);
3309 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3310 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3312 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
3314 case 0x7f: /* movq ea, mm */
3316 gen_lea_modrm(env, s, modrm);
3317 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3320 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3321 offsetof(CPUX86State,fpregs[reg].mmx));
3324 case 0x011: /* movups */
3325 case 0x111: /* movupd */
3326 case 0x029: /* movaps */
3327 case 0x129: /* movapd */
3328 case 0x17f: /* movdqa ea, xmm */
3329 case 0x27f: /* movdqu ea, xmm */
3331 gen_lea_modrm(env, s, modrm);
3332 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3334 rm = (modrm & 7) | REX_B(s);
3335 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3336 offsetof(CPUX86State,xmm_regs[reg]));
3339 case 0x211: /* movss ea, xmm */
3341 gen_lea_modrm(env, s, modrm);
3342 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3343 gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
3345 rm = (modrm & 7) | REX_B(s);
3346 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)),
3347 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3350 case 0x311: /* movsd ea, xmm */
3352 gen_lea_modrm(env, s, modrm);
3353 gen_stq_env_A0(s, offsetof(CPUX86State,
3354 xmm_regs[reg].ZMM_Q(0)));
3356 rm = (modrm & 7) | REX_B(s);
3357 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
3358 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3361 case 0x013: /* movlps */
3362 case 0x113: /* movlpd */
3364 gen_lea_modrm(env, s, modrm);
3365 gen_stq_env_A0(s, offsetof(CPUX86State,
3366 xmm_regs[reg].ZMM_Q(0)));
3371 case 0x017: /* movhps */
3372 case 0x117: /* movhpd */
3374 gen_lea_modrm(env, s, modrm);
3375 gen_stq_env_A0(s, offsetof(CPUX86State,
3376 xmm_regs[reg].ZMM_Q(1)));
3381 case 0x71: /* shift mm, im */
3384 case 0x171: /* shift xmm, im */
3390 val = cpu_ldub_code(env, s->pc++);
3392 tcg_gen_movi_tl(cpu_T0, val);
3393 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
3394 tcg_gen_movi_tl(cpu_T0, 0);
3395 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(1)));
3396 op1_offset = offsetof(CPUX86State,xmm_t0);
3398 tcg_gen_movi_tl(cpu_T0, val);
3399 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3400 tcg_gen_movi_tl(cpu_T0, 0);
3401 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3402 op1_offset = offsetof(CPUX86State,mmx_t0);
3404 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3405 (((modrm >> 3)) & 7)][b1];
3410 rm = (modrm & 7) | REX_B(s);
3411 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3414 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3416 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3417 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3418 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3420 case 0x050: /* movmskps */
3421 rm = (modrm & 7) | REX_B(s);
3422 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3423 offsetof(CPUX86State,xmm_regs[rm]));
3424 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3425 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3427 case 0x150: /* movmskpd */
3428 rm = (modrm & 7) | REX_B(s);
3429 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3430 offsetof(CPUX86State,xmm_regs[rm]));
3431 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3432 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3434 case 0x02a: /* cvtpi2ps */
3435 case 0x12a: /* cvtpi2pd */
3436 gen_helper_enter_mmx(cpu_env);
3438 gen_lea_modrm(env, s, modrm);
3439 op2_offset = offsetof(CPUX86State,mmx_t0);
3440 gen_ldq_env_A0(s, op2_offset);
3443 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3445 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3446 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3447 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3450 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
3454 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
3458 case 0x22a: /* cvtsi2ss */
3459 case 0x32a: /* cvtsi2sd */
3460 ot = mo_64_32(s->dflag);
3461 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3462 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3463 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3465 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
3466 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
3467 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
3469 #ifdef TARGET_X86_64
3470 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3471 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T0);
3477 case 0x02c: /* cvttps2pi */
3478 case 0x12c: /* cvttpd2pi */
3479 case 0x02d: /* cvtps2pi */
3480 case 0x12d: /* cvtpd2pi */
3481 gen_helper_enter_mmx(cpu_env);
3483 gen_lea_modrm(env, s, modrm);
3484 op2_offset = offsetof(CPUX86State,xmm_t0);
3485 gen_ldo_env_A0(s, op2_offset);
3487 rm = (modrm & 7) | REX_B(s);
3488 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3490 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3491 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3492 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3495 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3498 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3501 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3504 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3508 case 0x22c: /* cvttss2si */
3509 case 0x32c: /* cvttsd2si */
3510 case 0x22d: /* cvtss2si */
3511 case 0x32d: /* cvtsd2si */
3512 ot = mo_64_32(s->dflag);
3514 gen_lea_modrm(env, s, modrm);
3516 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0)));
3518 gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
3519 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
3521 op2_offset = offsetof(CPUX86State,xmm_t0);
3523 rm = (modrm & 7) | REX_B(s);
3524 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3526 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3528 SSEFunc_i_ep sse_fn_i_ep =
3529 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3530 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3531 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
3533 #ifdef TARGET_X86_64
3534 SSEFunc_l_ep sse_fn_l_ep =
3535 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3536 sse_fn_l_ep(cpu_T0, cpu_env, cpu_ptr0);
3541 gen_op_mov_reg_v(ot, reg, cpu_T0);
3543 case 0xc4: /* pinsrw */
3546 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
3547 val = cpu_ldub_code(env, s->pc++);
3550 tcg_gen_st16_tl(cpu_T0, cpu_env,
3551 offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val)));
3554 tcg_gen_st16_tl(cpu_T0, cpu_env,
3555 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3558 case 0xc5: /* pextrw */
3562 ot = mo_64_32(s->dflag);
3563 val = cpu_ldub_code(env, s->pc++);
3566 rm = (modrm & 7) | REX_B(s);
3567 tcg_gen_ld16u_tl(cpu_T0, cpu_env,
3568 offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val)));
3572 tcg_gen_ld16u_tl(cpu_T0, cpu_env,
3573 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3575 reg = ((modrm >> 3) & 7) | rex_r;
3576 gen_op_mov_reg_v(ot, reg, cpu_T0);
3578 case 0x1d6: /* movq ea, xmm */
3580 gen_lea_modrm(env, s, modrm);
3581 gen_stq_env_A0(s, offsetof(CPUX86State,
3582 xmm_regs[reg].ZMM_Q(0)));
3584 rm = (modrm & 7) | REX_B(s);
3585 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
3586 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3587 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
3590 case 0x2d6: /* movq2dq */
3591 gen_helper_enter_mmx(cpu_env);
3593 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3594 offsetof(CPUX86State,fpregs[rm].mmx));
3595 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
3597 case 0x3d6: /* movdq2q */
3598 gen_helper_enter_mmx(cpu_env);
3599 rm = (modrm & 7) | REX_B(s);
3600 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3601 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3603 case 0xd7: /* pmovmskb */
3608 rm = (modrm & 7) | REX_B(s);
3609 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3610 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3613 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3614 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3616 reg = ((modrm >> 3) & 7) | rex_r;
3617 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3623 if ((b & 0xf0) == 0xf0) {
3626 modrm = cpu_ldub_code(env, s->pc++);
3628 reg = ((modrm >> 3) & 7) | rex_r;
3629 mod = (modrm >> 6) & 3;
3634 sse_fn_epp = sse_op_table6[b].op[b1];
3638 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3642 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3644 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3646 op2_offset = offsetof(CPUX86State,xmm_t0);
3647 gen_lea_modrm(env, s, modrm);
3649 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3650 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3651 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3652 gen_ldq_env_A0(s, op2_offset +
3653 offsetof(ZMMReg, ZMM_Q(0)));
3655 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3656 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3657 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
3658 s->mem_index, MO_LEUL);
3659 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3660 offsetof(ZMMReg, ZMM_L(0)));
3662 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3663 tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
3664 s->mem_index, MO_LEUW);
3665 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3666 offsetof(ZMMReg, ZMM_W(0)));
3668 case 0x2a: /* movntqda */
3669 gen_ldo_env_A0(s, op1_offset);
3672 gen_ldo_env_A0(s, op2_offset);
3676 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3678 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3680 op2_offset = offsetof(CPUX86State,mmx_t0);
3681 gen_lea_modrm(env, s, modrm);
3682 gen_ldq_env_A0(s, op2_offset);
3685 if (sse_fn_epp == SSE_SPECIAL) {
3689 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3690 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3691 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3694 set_cc_op(s, CC_OP_EFLAGS);
3701 /* Various integer extensions at 0f 38 f[0-f]. */
3702 b = modrm | (b1 << 8);
3703 modrm = cpu_ldub_code(env, s->pc++);
3704 reg = ((modrm >> 3) & 7) | rex_r;
3707 case 0x3f0: /* crc32 Gd,Eb */
3708 case 0x3f1: /* crc32 Gd,Ey */
3710 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3713 if ((b & 0xff) == 0xf0) {
3715 } else if (s->dflag != MO_64) {
3716 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3721 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
3722 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3723 gen_helper_crc32(cpu_T0, cpu_tmp2_i32,
3724 cpu_T0, tcg_const_i32(8 << ot));
3726 ot = mo_64_32(s->dflag);
3727 gen_op_mov_reg_v(ot, reg, cpu_T0);
3730 case 0x1f0: /* crc32 or movbe */
3732 /* For these insns, the f3 prefix is supposed to have priority
3733 over the 66 prefix, but that's not what we implement above
3735 if (s->prefix & PREFIX_REPNZ) {
3739 case 0x0f0: /* movbe Gy,My */
3740 case 0x0f1: /* movbe My,Gy */
3741 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3744 if (s->dflag != MO_64) {
3745 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3750 gen_lea_modrm(env, s, modrm);
3752 tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
3753 s->mem_index, ot | MO_BE);
3754 gen_op_mov_reg_v(ot, reg, cpu_T0);
3756 tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
3757 s->mem_index, ot | MO_BE);
3761 case 0x0f2: /* andn Gy, By, Ey */
3762 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3763 || !(s->prefix & PREFIX_VEX)
3767 ot = mo_64_32(s->dflag);
3768 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3769 tcg_gen_andc_tl(cpu_T0, cpu_regs[s->vex_v], cpu_T0);
3770 gen_op_mov_reg_v(ot, reg, cpu_T0);
3771 gen_op_update1_cc();
3772 set_cc_op(s, CC_OP_LOGICB + ot);
3775 case 0x0f7: /* bextr Gy, Ey, By */
3776 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3777 || !(s->prefix & PREFIX_VEX)
3781 ot = mo_64_32(s->dflag);
3785 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3786 /* Extract START, and shift the operand.
3787 Shifts larger than operand size get zeros. */
3788 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
3789 tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_A0);
3791 bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3792 zero = tcg_const_tl(0);
3793 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T0, cpu_A0, bound,
3795 tcg_temp_free(zero);
3797 /* Extract the LEN into a mask. Lengths larger than
3798 operand size get all ones. */
3799 tcg_gen_extract_tl(cpu_A0, cpu_regs[s->vex_v], 8, 8);
3800 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
3802 tcg_temp_free(bound);
3803 tcg_gen_movi_tl(cpu_T1, 1);
3804 tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_A0);
3805 tcg_gen_subi_tl(cpu_T1, cpu_T1, 1);
3806 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
3808 gen_op_mov_reg_v(ot, reg, cpu_T0);
3809 gen_op_update1_cc();
3810 set_cc_op(s, CC_OP_LOGICB + ot);
3814 case 0x0f5: /* bzhi Gy, Ey, By */
3815 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3816 || !(s->prefix & PREFIX_VEX)
3820 ot = mo_64_32(s->dflag);
3821 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3822 tcg_gen_ext8u_tl(cpu_T1, cpu_regs[s->vex_v]);
3824 TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3825 /* Note that since we're using BMILG (in order to get O
3826 cleared) we need to store the inverse into C. */
3827 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
3829 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T1, cpu_T1,
3830 bound, bound, cpu_T1);
3831 tcg_temp_free(bound);
3833 tcg_gen_movi_tl(cpu_A0, -1);
3834 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T1);
3835 tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_A0);
3836 gen_op_mov_reg_v(ot, reg, cpu_T0);
3837 gen_op_update1_cc();
3838 set_cc_op(s, CC_OP_BMILGB + ot);
3841 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3842 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3843 || !(s->prefix & PREFIX_VEX)
3847 ot = mo_64_32(s->dflag);
3848 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3851 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
3852 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
3853 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
3854 cpu_tmp2_i32, cpu_tmp3_i32);
3855 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
3856 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
3858 #ifdef TARGET_X86_64
3860 tcg_gen_mulu2_i64(cpu_T0, cpu_T1,
3861 cpu_T0, cpu_regs[R_EDX]);
3862 tcg_gen_mov_i64(cpu_regs[s->vex_v], cpu_T0);
3863 tcg_gen_mov_i64(cpu_regs[reg], cpu_T1);
3869 case 0x3f5: /* pdep Gy, By, Ey */
3870 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3871 || !(s->prefix & PREFIX_VEX)
3875 ot = mo_64_32(s->dflag);
3876 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3877 /* Note that by zero-extending the mask operand, we
3878 automatically handle zero-extending the result. */
3880 tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
3882 tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
3884 gen_helper_pdep(cpu_regs[reg], cpu_T0, cpu_T1);
3887 case 0x2f5: /* pext Gy, By, Ey */
3888 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3889 || !(s->prefix & PREFIX_VEX)
3893 ot = mo_64_32(s->dflag);
3894 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3895 /* Note that by zero-extending the mask operand, we
3896 automatically handle zero-extending the result. */
3898 tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
3900 tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
3902 gen_helper_pext(cpu_regs[reg], cpu_T0, cpu_T1);
3905 case 0x1f6: /* adcx Gy, Ey */
3906 case 0x2f6: /* adox Gy, Ey */
3907 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
3910 TCGv carry_in, carry_out, zero;
3913 ot = mo_64_32(s->dflag);
3914 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3916 /* Re-use the carry-out from a previous round. */
3917 TCGV_UNUSED(carry_in);
3918 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
3922 carry_in = cpu_cc_dst;
3923 end_op = CC_OP_ADCX;
3925 end_op = CC_OP_ADCOX;
3930 end_op = CC_OP_ADCOX;
3932 carry_in = cpu_cc_src2;
3933 end_op = CC_OP_ADOX;
3937 end_op = CC_OP_ADCOX;
3938 carry_in = carry_out;
3941 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
3944 /* If we can't reuse carry-out, get it out of EFLAGS. */
3945 if (TCGV_IS_UNUSED(carry_in)) {
3946 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
3947 gen_compute_eflags(s);
3949 carry_in = cpu_tmp0;
3950 tcg_gen_extract_tl(carry_in, cpu_cc_src,
3951 ctz32(b == 0x1f6 ? CC_C : CC_O), 1);
3955 #ifdef TARGET_X86_64
3957 /* If we know TL is 64-bit, and we want a 32-bit
3958 result, just do everything in 64-bit arithmetic. */
3959 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
3960 tcg_gen_ext32u_i64(cpu_T0, cpu_T0);
3961 tcg_gen_add_i64(cpu_T0, cpu_T0, cpu_regs[reg]);
3962 tcg_gen_add_i64(cpu_T0, cpu_T0, carry_in);
3963 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T0);
3964 tcg_gen_shri_i64(carry_out, cpu_T0, 32);
3968 /* Otherwise compute the carry-out in two steps. */
3969 zero = tcg_const_tl(0);
3970 tcg_gen_add2_tl(cpu_T0, carry_out,
3973 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
3974 cpu_regs[reg], carry_out,
3976 tcg_temp_free(zero);
3979 set_cc_op(s, end_op);
3983 case 0x1f7: /* shlx Gy, Ey, By */
3984 case 0x2f7: /* sarx Gy, Ey, By */
3985 case 0x3f7: /* shrx Gy, Ey, By */
3986 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3987 || !(s->prefix & PREFIX_VEX)
3991 ot = mo_64_32(s->dflag);
3992 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3994 tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 63);
3996 tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 31);
3999 tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
4000 } else if (b == 0x2f7) {
4002 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
4004 tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
4007 tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
4009 tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
4011 gen_op_mov_reg_v(ot, reg, cpu_T0);
4017 case 0x3f3: /* Group 17 */
4018 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4019 || !(s->prefix & PREFIX_VEX)
4023 ot = mo_64_32(s->dflag);
4024 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4027 case 1: /* blsr By,Ey */
4028 tcg_gen_neg_tl(cpu_T1, cpu_T0);
4029 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
4030 gen_op_mov_reg_v(ot, s->vex_v, cpu_T0);
4031 gen_op_update2_cc();
4032 set_cc_op(s, CC_OP_BMILGB + ot);
4035 case 2: /* blsmsk By,Ey */
4036 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
4037 tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
4038 tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_cc_src);
4039 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4040 set_cc_op(s, CC_OP_BMILGB + ot);
4043 case 3: /* blsi By, Ey */
4044 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
4045 tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
4046 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_cc_src);
4047 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4048 set_cc_op(s, CC_OP_BMILGB + ot);
4064 modrm = cpu_ldub_code(env, s->pc++);
4066 reg = ((modrm >> 3) & 7) | rex_r;
4067 mod = (modrm >> 6) & 3;
4072 sse_fn_eppi = sse_op_table7[b].op[b1];
4076 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
4081 if (sse_fn_eppi == SSE_SPECIAL) {
4082 ot = mo_64_32(s->dflag);
4083 rm = (modrm & 7) | REX_B(s);
4085 gen_lea_modrm(env, s, modrm);
4086 reg = ((modrm >> 3) & 7) | rex_r;
4087 val = cpu_ldub_code(env, s->pc++);
4089 case 0x14: /* pextrb */
4090 tcg_gen_ld8u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
4091 xmm_regs[reg].ZMM_B(val & 15)));
4093 gen_op_mov_reg_v(ot, rm, cpu_T0);
4095 tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
4096 s->mem_index, MO_UB);
4099 case 0x15: /* pextrw */
4100 tcg_gen_ld16u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
4101 xmm_regs[reg].ZMM_W(val & 7)));
4103 gen_op_mov_reg_v(ot, rm, cpu_T0);
4105 tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
4106 s->mem_index, MO_LEUW);
4110 if (ot == MO_32) { /* pextrd */
4111 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4112 offsetof(CPUX86State,
4113 xmm_regs[reg].ZMM_L(val & 3)));
4115 tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
4117 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
4118 s->mem_index, MO_LEUL);
4120 } else { /* pextrq */
4121 #ifdef TARGET_X86_64
4122 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4123 offsetof(CPUX86State,
4124 xmm_regs[reg].ZMM_Q(val & 1)));
4126 tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
4128 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
4129 s->mem_index, MO_LEQ);
4136 case 0x17: /* extractps */
4137 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
4138 xmm_regs[reg].ZMM_L(val & 3)));
4140 gen_op_mov_reg_v(ot, rm, cpu_T0);
4142 tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
4143 s->mem_index, MO_LEUL);
4146 case 0x20: /* pinsrb */
4148 gen_op_mov_v_reg(MO_32, cpu_T0, rm);
4150 tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
4151 s->mem_index, MO_UB);
4153 tcg_gen_st8_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
4154 xmm_regs[reg].ZMM_B(val & 15)));
4156 case 0x21: /* insertps */
4158 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4159 offsetof(CPUX86State,xmm_regs[rm]
4160 .ZMM_L((val >> 6) & 3)));
4162 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4163 s->mem_index, MO_LEUL);
4165 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4166 offsetof(CPUX86State,xmm_regs[reg]
4167 .ZMM_L((val >> 4) & 3)));
4169 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4170 cpu_env, offsetof(CPUX86State,
4171 xmm_regs[reg].ZMM_L(0)));
4173 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4174 cpu_env, offsetof(CPUX86State,
4175 xmm_regs[reg].ZMM_L(1)));
4177 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4178 cpu_env, offsetof(CPUX86State,
4179 xmm_regs[reg].ZMM_L(2)));
4181 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4182 cpu_env, offsetof(CPUX86State,
4183 xmm_regs[reg].ZMM_L(3)));
4186 if (ot == MO_32) { /* pinsrd */
4188 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
4190 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4191 s->mem_index, MO_LEUL);
4193 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4194 offsetof(CPUX86State,
4195 xmm_regs[reg].ZMM_L(val & 3)));
4196 } else { /* pinsrq */
4197 #ifdef TARGET_X86_64
4199 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4201 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
4202 s->mem_index, MO_LEQ);
4204 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4205 offsetof(CPUX86State,
4206 xmm_regs[reg].ZMM_Q(val & 1)));
4217 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4219 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4221 op2_offset = offsetof(CPUX86State,xmm_t0);
4222 gen_lea_modrm(env, s, modrm);
4223 gen_ldo_env_A0(s, op2_offset);
4226 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4228 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4230 op2_offset = offsetof(CPUX86State,mmx_t0);
4231 gen_lea_modrm(env, s, modrm);
4232 gen_ldq_env_A0(s, op2_offset);
4235 val = cpu_ldub_code(env, s->pc++);
4237 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4238 set_cc_op(s, CC_OP_EFLAGS);
4240 if (s->dflag == MO_64) {
4241 /* The helper must use entire 64-bit gp registers */
4246 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4247 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4248 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4252 /* Various integer extensions at 0f 3a f[0-f]. */
4253 b = modrm | (b1 << 8);
4254 modrm = cpu_ldub_code(env, s->pc++);
4255 reg = ((modrm >> 3) & 7) | rex_r;
4258 case 0x3f0: /* rorx Gy,Ey, Ib */
4259 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4260 || !(s->prefix & PREFIX_VEX)
4264 ot = mo_64_32(s->dflag);
4265 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4266 b = cpu_ldub_code(env, s->pc++);
4268 tcg_gen_rotri_tl(cpu_T0, cpu_T0, b & 63);
4270 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4271 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
4272 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
4274 gen_op_mov_reg_v(ot, reg, cpu_T0);
4284 gen_unknown_opcode(env, s);
4288 /* generic MMX or SSE operation */
4290 case 0x70: /* pshufx insn */
4291 case 0xc6: /* pshufx insn */
4292 case 0xc2: /* compare insns */
4299 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4303 gen_lea_modrm(env, s, modrm);
4304 op2_offset = offsetof(CPUX86State,xmm_t0);
4310 /* Most sse scalar operations. */
4313 } else if (b1 == 3) {
4318 case 0x2e: /* ucomis[sd] */
4319 case 0x2f: /* comis[sd] */
4331 gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
4332 tcg_gen_st32_tl(cpu_T0, cpu_env,
4333 offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
4337 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_D(0)));
4340 /* 128 bit access */
4341 gen_ldo_env_A0(s, op2_offset);
4345 rm = (modrm & 7) | REX_B(s);
4346 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4349 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4351 gen_lea_modrm(env, s, modrm);
4352 op2_offset = offsetof(CPUX86State,mmx_t0);
4353 gen_ldq_env_A0(s, op2_offset);
4356 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4360 case 0x0f: /* 3DNow! data insns */
4361 val = cpu_ldub_code(env, s->pc++);
4362 sse_fn_epp = sse_op_table5[val];
4366 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
4369 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4370 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4371 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4373 case 0x70: /* pshufx insn */
4374 case 0xc6: /* pshufx insn */
4375 val = cpu_ldub_code(env, s->pc++);
4376 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4377 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4378 /* XXX: introduce a new table? */
4379 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
4380 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4384 val = cpu_ldub_code(env, s->pc++);
4387 sse_fn_epp = sse_op_table4[val][b1];
4389 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4390 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4391 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4394 /* maskmov : we must prepare A0 */
4397 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);
4398 gen_extu(s->aflag, cpu_A0);
4399 gen_add_A0_ds_seg(s);
4401 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4402 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4403 /* XXX: introduce a new table? */
4404 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4405 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
4408 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4409 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4410 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4413 if (b == 0x2e || b == 0x2f) {
4414 set_cc_op(s, CC_OP_EFLAGS);
4419 /* convert one instruction. s->base.is_jmp is set if the translation must
4420 be stopped. Return the next pc value */
4421 static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
4423 CPUX86State *env = cpu->env_ptr;
4426 TCGMemOp ot, aflag, dflag;
4427 int modrm, reg, rm, mod, op, opreg, val;
4428 target_ulong next_eip, tval;
4430 target_ulong pc_start = s->base.pc_next;
4432 s->pc_start = s->pc = pc_start;
4437 #ifdef TARGET_X86_64
4442 s->rip_offset = 0; /* for relative ip address */
4446 /* x86 has an upper limit of 15 bytes for an instruction. Since we
4447 * do not want to decode and generate IR for an illegal
4448 * instruction, the following check limits the instruction size to
4449 * 25 bytes: 14 prefix + 1 opc + 6 (modrm+sib+ofs) + 4 imm */
4450 if (s->pc - pc_start > 14) {
4453 b = cpu_ldub_code(env, s->pc);
4455 /* Collect prefixes. */
4458 prefixes |= PREFIX_REPZ;
4461 prefixes |= PREFIX_REPNZ;
4464 prefixes |= PREFIX_LOCK;
4485 prefixes |= PREFIX_DATA;
4488 prefixes |= PREFIX_ADR;
4490 #ifdef TARGET_X86_64
4494 rex_w = (b >> 3) & 1;
4495 rex_r = (b & 0x4) << 1;
4496 s->rex_x = (b & 0x2) << 2;
4497 REX_B(s) = (b & 0x1) << 3;
4498 x86_64_hregs = 1; /* select uniform byte register addressing */
4503 case 0xc5: /* 2-byte VEX */
4504 case 0xc4: /* 3-byte VEX */
4505 /* VEX prefixes cannot be used except in 32-bit mode.
4506 Otherwise the instruction is LES or LDS. */
4507 if (s->code32 && !s->vm86) {
4508 static const int pp_prefix[4] = {
4509 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4511 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4513 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4514 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4515 otherwise the instruction is LES or LDS. */
4520 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4521 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4522 | PREFIX_LOCK | PREFIX_DATA)) {
4525 #ifdef TARGET_X86_64
4530 rex_r = (~vex2 >> 4) & 8;
4533 b = cpu_ldub_code(env, s->pc++);
4535 #ifdef TARGET_X86_64
4536 s->rex_x = (~vex2 >> 3) & 8;
4537 s->rex_b = (~vex2 >> 2) & 8;
4539 vex3 = cpu_ldub_code(env, s->pc++);
4540 rex_w = (vex3 >> 7) & 1;
4541 switch (vex2 & 0x1f) {
4542 case 0x01: /* Implied 0f leading opcode bytes. */
4543 b = cpu_ldub_code(env, s->pc++) | 0x100;
4545 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4548 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4551 default: /* Reserved for future use. */
4555 s->vex_v = (~vex3 >> 3) & 0xf;
4556 s->vex_l = (vex3 >> 2) & 1;
4557 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4562 /* Post-process prefixes. */
4564 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4565 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4566 over 0x66 if both are present. */
4567 dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
4568 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4569 aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
4571 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4572 if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
4577 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4578 if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
4585 s->prefix = prefixes;
4589 /* now check op code */
4593 /**************************/
4594 /* extended op code */
4595 b = cpu_ldub_code(env, s->pc++) | 0x100;
4598 /**************************/
4613 ot = mo_b_d(b, dflag);
4616 case 0: /* OP Ev, Gv */
4617 modrm = cpu_ldub_code(env, s->pc++);
4618 reg = ((modrm >> 3) & 7) | rex_r;
4619 mod = (modrm >> 6) & 3;
4620 rm = (modrm & 7) | REX_B(s);
4622 gen_lea_modrm(env, s, modrm);
4624 } else if (op == OP_XORL && rm == reg) {
4626 /* xor reg, reg optimisation */
4627 set_cc_op(s, CC_OP_CLR);
4628 tcg_gen_movi_tl(cpu_T0, 0);
4629 gen_op_mov_reg_v(ot, reg, cpu_T0);
4634 gen_op_mov_v_reg(ot, cpu_T1, reg);
4635 gen_op(s, op, ot, opreg);
4637 case 1: /* OP Gv, Ev */
4638 modrm = cpu_ldub_code(env, s->pc++);
4639 mod = (modrm >> 6) & 3;
4640 reg = ((modrm >> 3) & 7) | rex_r;
4641 rm = (modrm & 7) | REX_B(s);
4643 gen_lea_modrm(env, s, modrm);
4644 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
4645 } else if (op == OP_XORL && rm == reg) {
4648 gen_op_mov_v_reg(ot, cpu_T1, rm);
4650 gen_op(s, op, ot, reg);
4652 case 2: /* OP A, Iv */
4653 val = insn_get(env, s, ot);
4654 tcg_gen_movi_tl(cpu_T1, val);
4655 gen_op(s, op, ot, OR_EAX);
4664 case 0x80: /* GRP1 */
4670 ot = mo_b_d(b, dflag);
4672 modrm = cpu_ldub_code(env, s->pc++);
4673 mod = (modrm >> 6) & 3;
4674 rm = (modrm & 7) | REX_B(s);
4675 op = (modrm >> 3) & 7;
4681 s->rip_offset = insn_const_size(ot);
4682 gen_lea_modrm(env, s, modrm);
4693 val = insn_get(env, s, ot);
4696 val = (int8_t)insn_get(env, s, MO_8);
4699 tcg_gen_movi_tl(cpu_T1, val);
4700 gen_op(s, op, ot, opreg);
4704 /**************************/
4705 /* inc, dec, and other misc arith */
4706 case 0x40 ... 0x47: /* inc Gv */
4708 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4710 case 0x48 ... 0x4f: /* dec Gv */
4712 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4714 case 0xf6: /* GRP3 */
4716 ot = mo_b_d(b, dflag);
4718 modrm = cpu_ldub_code(env, s->pc++);
4719 mod = (modrm >> 6) & 3;
4720 rm = (modrm & 7) | REX_B(s);
4721 op = (modrm >> 3) & 7;
4724 s->rip_offset = insn_const_size(ot);
4726 gen_lea_modrm(env, s, modrm);
4727 /* For those below that handle locked memory, don't load here. */
4728 if (!(s->prefix & PREFIX_LOCK)
4730 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
4733 gen_op_mov_v_reg(ot, cpu_T0, rm);
4738 val = insn_get(env, s, ot);
4739 tcg_gen_movi_tl(cpu_T1, val);
4740 gen_op_testl_T0_T1_cc();
4741 set_cc_op(s, CC_OP_LOGICB + ot);
4744 if (s->prefix & PREFIX_LOCK) {
4748 tcg_gen_movi_tl(cpu_T0, ~0);
4749 tcg_gen_atomic_xor_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
4750 s->mem_index, ot | MO_LE);
4752 tcg_gen_not_tl(cpu_T0, cpu_T0);
4754 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
4756 gen_op_mov_reg_v(ot, rm, cpu_T0);
4761 if (s->prefix & PREFIX_LOCK) {
4763 TCGv a0, t0, t1, t2;
4768 a0 = tcg_temp_local_new();
4769 t0 = tcg_temp_local_new();
4770 label1 = gen_new_label();
4772 tcg_gen_mov_tl(a0, cpu_A0);
4773 tcg_gen_mov_tl(t0, cpu_T0);
4775 gen_set_label(label1);
4776 t1 = tcg_temp_new();
4777 t2 = tcg_temp_new();
4778 tcg_gen_mov_tl(t2, t0);
4779 tcg_gen_neg_tl(t1, t0);
4780 tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
4781 s->mem_index, ot | MO_LE);
4783 tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
4787 tcg_gen_mov_tl(cpu_T0, t0);
4790 tcg_gen_neg_tl(cpu_T0, cpu_T0);
4792 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
4794 gen_op_mov_reg_v(ot, rm, cpu_T0);
4797 gen_op_update_neg_cc();
4798 set_cc_op(s, CC_OP_SUBB + ot);
4803 gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
4804 tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
4805 tcg_gen_ext8u_tl(cpu_T1, cpu_T1);
4806 /* XXX: use 32 bit mul which could be faster */
4807 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4808 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4809 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4810 tcg_gen_andi_tl(cpu_cc_src, cpu_T0, 0xff00);
4811 set_cc_op(s, CC_OP_MULB);
4814 gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
4815 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
4816 tcg_gen_ext16u_tl(cpu_T1, cpu_T1);
4817 /* XXX: use 32 bit mul which could be faster */
4818 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4819 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4820 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4821 tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
4822 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
4823 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
4824 set_cc_op(s, CC_OP_MULW);
4828 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4829 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4830 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4831 cpu_tmp2_i32, cpu_tmp3_i32);
4832 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4833 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4834 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4835 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4836 set_cc_op(s, CC_OP_MULL);
4838 #ifdef TARGET_X86_64
4840 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4841 cpu_T0, cpu_regs[R_EAX]);
4842 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4843 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4844 set_cc_op(s, CC_OP_MULQ);
4852 gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
4853 tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
4854 tcg_gen_ext8s_tl(cpu_T1, cpu_T1);
4855 /* XXX: use 32 bit mul which could be faster */
4856 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4857 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4858 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4859 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T0);
4860 tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
4861 set_cc_op(s, CC_OP_MULB);
4864 gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
4865 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
4866 tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
4867 /* XXX: use 32 bit mul which could be faster */
4868 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4869 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4870 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4871 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
4872 tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
4873 tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
4874 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
4875 set_cc_op(s, CC_OP_MULW);
4879 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4880 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4881 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4882 cpu_tmp2_i32, cpu_tmp3_i32);
4883 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4884 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4885 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
4886 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4887 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
4888 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
4889 set_cc_op(s, CC_OP_MULL);
4891 #ifdef TARGET_X86_64
4893 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4894 cpu_T0, cpu_regs[R_EAX]);
4895 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4896 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
4897 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
4898 set_cc_op(s, CC_OP_MULQ);
4906 gen_helper_divb_AL(cpu_env, cpu_T0);
4909 gen_helper_divw_AX(cpu_env, cpu_T0);
4913 gen_helper_divl_EAX(cpu_env, cpu_T0);
4915 #ifdef TARGET_X86_64
4917 gen_helper_divq_EAX(cpu_env, cpu_T0);
4925 gen_helper_idivb_AL(cpu_env, cpu_T0);
4928 gen_helper_idivw_AX(cpu_env, cpu_T0);
4932 gen_helper_idivl_EAX(cpu_env, cpu_T0);
4934 #ifdef TARGET_X86_64
4936 gen_helper_idivq_EAX(cpu_env, cpu_T0);
4946 case 0xfe: /* GRP4 */
4947 case 0xff: /* GRP5 */
4948 ot = mo_b_d(b, dflag);
4950 modrm = cpu_ldub_code(env, s->pc++);
4951 mod = (modrm >> 6) & 3;
4952 rm = (modrm & 7) | REX_B(s);
4953 op = (modrm >> 3) & 7;
4954 if (op >= 2 && b == 0xfe) {
4958 if (op == 2 || op == 4) {
4959 /* operand size for jumps is 64 bit */
4961 } else if (op == 3 || op == 5) {
4962 ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
4963 } else if (op == 6) {
4964 /* default push size is 64 bit */
4965 ot = mo_pushpop(s, dflag);
4969 gen_lea_modrm(env, s, modrm);
4970 if (op >= 2 && op != 3 && op != 5)
4971 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
4973 gen_op_mov_v_reg(ot, cpu_T0, rm);
4977 case 0: /* inc Ev */
4982 gen_inc(s, ot, opreg, 1);
4984 case 1: /* dec Ev */
4989 gen_inc(s, ot, opreg, -1);
4991 case 2: /* call Ev */
4992 /* XXX: optimize if memory (no 'and' is necessary) */
4993 if (dflag == MO_16) {
4994 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
4996 next_eip = s->pc - s->cs_base;
4997 tcg_gen_movi_tl(cpu_T1, next_eip);
4998 gen_push_v(s, cpu_T1);
4999 gen_op_jmp_v(cpu_T0);
5003 case 3: /* lcall Ev */
5004 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
5005 gen_add_A0_im(s, 1 << ot);
5006 gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
5008 if (s->pe && !s->vm86) {
5009 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
5010 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
5011 tcg_const_i32(dflag - 1),
5012 tcg_const_tl(s->pc - s->cs_base));
5014 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
5015 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T1,
5016 tcg_const_i32(dflag - 1),
5017 tcg_const_i32(s->pc - s->cs_base));
5019 tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip));
5020 gen_jr(s, cpu_tmp4);
5022 case 4: /* jmp Ev */
5023 if (dflag == MO_16) {
5024 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
5026 gen_op_jmp_v(cpu_T0);
5030 case 5: /* ljmp Ev */
5031 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
5032 gen_add_A0_im(s, 1 << ot);
5033 gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
5035 if (s->pe && !s->vm86) {
5036 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
5037 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
5038 tcg_const_tl(s->pc - s->cs_base));
5040 gen_op_movl_seg_T0_vm(R_CS);
5041 gen_op_jmp_v(cpu_T1);
5043 tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip));
5044 gen_jr(s, cpu_tmp4);
5046 case 6: /* push Ev */
5047 gen_push_v(s, cpu_T0);
5054 case 0x84: /* test Ev, Gv */
5056 ot = mo_b_d(b, dflag);
5058 modrm = cpu_ldub_code(env, s->pc++);
5059 reg = ((modrm >> 3) & 7) | rex_r;
5061 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5062 gen_op_mov_v_reg(ot, cpu_T1, reg);
5063 gen_op_testl_T0_T1_cc();
5064 set_cc_op(s, CC_OP_LOGICB + ot);
5067 case 0xa8: /* test eAX, Iv */
5069 ot = mo_b_d(b, dflag);
5070 val = insn_get(env, s, ot);
5072 gen_op_mov_v_reg(ot, cpu_T0, OR_EAX);
5073 tcg_gen_movi_tl(cpu_T1, val);
5074 gen_op_testl_T0_T1_cc();
5075 set_cc_op(s, CC_OP_LOGICB + ot);
5078 case 0x98: /* CWDE/CBW */
5080 #ifdef TARGET_X86_64
5082 gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
5083 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
5084 gen_op_mov_reg_v(MO_64, R_EAX, cpu_T0);
5088 gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
5089 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
5090 gen_op_mov_reg_v(MO_32, R_EAX, cpu_T0);
5093 gen_op_mov_v_reg(MO_8, cpu_T0, R_EAX);
5094 tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
5095 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
5101 case 0x99: /* CDQ/CWD */
5103 #ifdef TARGET_X86_64
5105 gen_op_mov_v_reg(MO_64, cpu_T0, R_EAX);
5106 tcg_gen_sari_tl(cpu_T0, cpu_T0, 63);
5107 gen_op_mov_reg_v(MO_64, R_EDX, cpu_T0);
5111 gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
5112 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
5113 tcg_gen_sari_tl(cpu_T0, cpu_T0, 31);
5114 gen_op_mov_reg_v(MO_32, R_EDX, cpu_T0);
5117 gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
5118 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
5119 tcg_gen_sari_tl(cpu_T0, cpu_T0, 15);
5120 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
5126 case 0x1af: /* imul Gv, Ev */
5127 case 0x69: /* imul Gv, Ev, I */
5130 modrm = cpu_ldub_code(env, s->pc++);
5131 reg = ((modrm >> 3) & 7) | rex_r;
5133 s->rip_offset = insn_const_size(ot);
5136 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5138 val = insn_get(env, s, ot);
5139 tcg_gen_movi_tl(cpu_T1, val);
5140 } else if (b == 0x6b) {
5141 val = (int8_t)insn_get(env, s, MO_8);
5142 tcg_gen_movi_tl(cpu_T1, val);
5144 gen_op_mov_v_reg(ot, cpu_T1, reg);
5147 #ifdef TARGET_X86_64
5149 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, cpu_T0, cpu_T1);
5150 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5151 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
5152 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T1);
5156 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
5157 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
5158 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5159 cpu_tmp2_i32, cpu_tmp3_i32);
5160 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
5161 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5162 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5163 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5164 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5167 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
5168 tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
5169 /* XXX: use 32 bit mul which could be faster */
5170 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
5171 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
5172 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
5173 tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
5174 gen_op_mov_reg_v(ot, reg, cpu_T0);
5177 set_cc_op(s, CC_OP_MULB + ot);
5180 case 0x1c1: /* xadd Ev, Gv */
5181 ot = mo_b_d(b, dflag);
5182 modrm = cpu_ldub_code(env, s->pc++);
5183 reg = ((modrm >> 3) & 7) | rex_r;
5184 mod = (modrm >> 6) & 3;
5185 gen_op_mov_v_reg(ot, cpu_T0, reg);
5187 rm = (modrm & 7) | REX_B(s);
5188 gen_op_mov_v_reg(ot, cpu_T1, rm);
5189 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
5190 gen_op_mov_reg_v(ot, reg, cpu_T1);
5191 gen_op_mov_reg_v(ot, rm, cpu_T0);
5193 gen_lea_modrm(env, s, modrm);
5194 if (s->prefix & PREFIX_LOCK) {
5195 tcg_gen_atomic_fetch_add_tl(cpu_T1, cpu_A0, cpu_T0,
5196 s->mem_index, ot | MO_LE);
5197 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
5199 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
5200 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
5201 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
5203 gen_op_mov_reg_v(ot, reg, cpu_T1);
5205 gen_op_update2_cc();
5206 set_cc_op(s, CC_OP_ADDB + ot);
5209 case 0x1b1: /* cmpxchg Ev, Gv */
5211 TCGv oldv, newv, cmpv;
5213 ot = mo_b_d(b, dflag);
5214 modrm = cpu_ldub_code(env, s->pc++);
5215 reg = ((modrm >> 3) & 7) | rex_r;
5216 mod = (modrm >> 6) & 3;
5217 oldv = tcg_temp_new();
5218 newv = tcg_temp_new();
5219 cmpv = tcg_temp_new();
5220 gen_op_mov_v_reg(ot, newv, reg);
5221 tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
5223 if (s->prefix & PREFIX_LOCK) {
5227 gen_lea_modrm(env, s, modrm);
5228 tcg_gen_atomic_cmpxchg_tl(oldv, cpu_A0, cmpv, newv,
5229 s->mem_index, ot | MO_LE);
5230 gen_op_mov_reg_v(ot, R_EAX, oldv);
5233 rm = (modrm & 7) | REX_B(s);
5234 gen_op_mov_v_reg(ot, oldv, rm);
5236 gen_lea_modrm(env, s, modrm);
5237 gen_op_ld_v(s, ot, oldv, cpu_A0);
5238 rm = 0; /* avoid warning */
5242 /* store value = (old == cmp ? new : old); */
5243 tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
5245 gen_op_mov_reg_v(ot, R_EAX, oldv);
5246 gen_op_mov_reg_v(ot, rm, newv);
5248 /* Perform an unconditional store cycle like physical cpu;
5249 must be before changing accumulator to ensure
5250 idempotency if the store faults and the instruction
5252 gen_op_st_v(s, ot, newv, cpu_A0);
5253 gen_op_mov_reg_v(ot, R_EAX, oldv);
5256 tcg_gen_mov_tl(cpu_cc_src, oldv);
5257 tcg_gen_mov_tl(cpu_cc_srcT, cmpv);
5258 tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
5259 set_cc_op(s, CC_OP_SUBB + ot);
5260 tcg_temp_free(oldv);
5261 tcg_temp_free(newv);
5262 tcg_temp_free(cmpv);
5265 case 0x1c7: /* cmpxchg8b */
5266 modrm = cpu_ldub_code(env, s->pc++);
5267 mod = (modrm >> 6) & 3;
5268 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5270 #ifdef TARGET_X86_64
5271 if (dflag == MO_64) {
5272 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5274 gen_lea_modrm(env, s, modrm);
5275 if ((s->prefix & PREFIX_LOCK) && parallel_cpus) {
5276 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
5278 gen_helper_cmpxchg16b_unlocked(cpu_env, cpu_A0);
5283 if (!(s->cpuid_features & CPUID_CX8))
5285 gen_lea_modrm(env, s, modrm);
5286 if ((s->prefix & PREFIX_LOCK) && parallel_cpus) {
5287 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
5289 gen_helper_cmpxchg8b_unlocked(cpu_env, cpu_A0);
5292 set_cc_op(s, CC_OP_EFLAGS);
5295 /**************************/
5297 case 0x50 ... 0x57: /* push */
5298 gen_op_mov_v_reg(MO_32, cpu_T0, (b & 7) | REX_B(s));
5299 gen_push_v(s, cpu_T0);
5301 case 0x58 ... 0x5f: /* pop */
5303 /* NOTE: order is important for pop %sp */
5304 gen_pop_update(s, ot);
5305 gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T0);
5307 case 0x60: /* pusha */
5312 case 0x61: /* popa */
5317 case 0x68: /* push Iv */
5319 ot = mo_pushpop(s, dflag);
5321 val = insn_get(env, s, ot);
5323 val = (int8_t)insn_get(env, s, MO_8);
5324 tcg_gen_movi_tl(cpu_T0, val);
5325 gen_push_v(s, cpu_T0);
5327 case 0x8f: /* pop Ev */
5328 modrm = cpu_ldub_code(env, s->pc++);
5329 mod = (modrm >> 6) & 3;
5332 /* NOTE: order is important for pop %sp */
5333 gen_pop_update(s, ot);
5334 rm = (modrm & 7) | REX_B(s);
5335 gen_op_mov_reg_v(ot, rm, cpu_T0);
5337 /* NOTE: order is important too for MMU exceptions */
5338 s->popl_esp_hack = 1 << ot;
5339 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5340 s->popl_esp_hack = 0;
5341 gen_pop_update(s, ot);
5344 case 0xc8: /* enter */
5347 val = cpu_lduw_code(env, s->pc);
5349 level = cpu_ldub_code(env, s->pc++);
5350 gen_enter(s, val, level);
5353 case 0xc9: /* leave */
5356 case 0x06: /* push es */
5357 case 0x0e: /* push cs */
5358 case 0x16: /* push ss */
5359 case 0x1e: /* push ds */
5362 gen_op_movl_T0_seg(b >> 3);
5363 gen_push_v(s, cpu_T0);
5365 case 0x1a0: /* push fs */
5366 case 0x1a8: /* push gs */
5367 gen_op_movl_T0_seg((b >> 3) & 7);
5368 gen_push_v(s, cpu_T0);
5370 case 0x07: /* pop es */
5371 case 0x17: /* pop ss */
5372 case 0x1f: /* pop ds */
5377 gen_movl_seg_T0(s, reg);
5378 gen_pop_update(s, ot);
5379 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5380 if (s->base.is_jmp) {
5381 gen_jmp_im(s->pc - s->cs_base);
5384 gen_eob_inhibit_irq(s, true);
5390 case 0x1a1: /* pop fs */
5391 case 0x1a9: /* pop gs */
5393 gen_movl_seg_T0(s, (b >> 3) & 7);
5394 gen_pop_update(s, ot);
5395 if (s->base.is_jmp) {
5396 gen_jmp_im(s->pc - s->cs_base);
5401 /**************************/
5404 case 0x89: /* mov Gv, Ev */
5405 ot = mo_b_d(b, dflag);
5406 modrm = cpu_ldub_code(env, s->pc++);
5407 reg = ((modrm >> 3) & 7) | rex_r;
5409 /* generate a generic store */
5410 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
5413 case 0xc7: /* mov Ev, Iv */
5414 ot = mo_b_d(b, dflag);
5415 modrm = cpu_ldub_code(env, s->pc++);
5416 mod = (modrm >> 6) & 3;
5418 s->rip_offset = insn_const_size(ot);
5419 gen_lea_modrm(env, s, modrm);
5421 val = insn_get(env, s, ot);
5422 tcg_gen_movi_tl(cpu_T0, val);
5424 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
5426 gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T0);
5430 case 0x8b: /* mov Ev, Gv */
5431 ot = mo_b_d(b, dflag);
5432 modrm = cpu_ldub_code(env, s->pc++);
5433 reg = ((modrm >> 3) & 7) | rex_r;
5435 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5436 gen_op_mov_reg_v(ot, reg, cpu_T0);
5438 case 0x8e: /* mov seg, Gv */
5439 modrm = cpu_ldub_code(env, s->pc++);
5440 reg = (modrm >> 3) & 7;
5441 if (reg >= 6 || reg == R_CS)
5443 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5444 gen_movl_seg_T0(s, reg);
5445 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5446 if (s->base.is_jmp) {
5447 gen_jmp_im(s->pc - s->cs_base);
5450 gen_eob_inhibit_irq(s, true);
5456 case 0x8c: /* mov Gv, seg */
5457 modrm = cpu_ldub_code(env, s->pc++);
5458 reg = (modrm >> 3) & 7;
5459 mod = (modrm >> 6) & 3;
5462 gen_op_movl_T0_seg(reg);
5463 ot = mod == 3 ? dflag : MO_16;
5464 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5467 case 0x1b6: /* movzbS Gv, Eb */
5468 case 0x1b7: /* movzwS Gv, Eb */
5469 case 0x1be: /* movsbS Gv, Eb */
5470 case 0x1bf: /* movswS Gv, Eb */
5475 /* d_ot is the size of destination */
5477 /* ot is the size of source */
5478 ot = (b & 1) + MO_8;
5479 /* s_ot is the sign+size of source */
5480 s_ot = b & 8 ? MO_SIGN | ot : ot;
5482 modrm = cpu_ldub_code(env, s->pc++);
5483 reg = ((modrm >> 3) & 7) | rex_r;
5484 mod = (modrm >> 6) & 3;
5485 rm = (modrm & 7) | REX_B(s);
5488 if (s_ot == MO_SB && byte_reg_is_xH(rm)) {
5489 tcg_gen_sextract_tl(cpu_T0, cpu_regs[rm - 4], 8, 8);
5491 gen_op_mov_v_reg(ot, cpu_T0, rm);
5494 tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
5497 tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
5500 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
5504 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
5508 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
5510 gen_lea_modrm(env, s, modrm);
5511 gen_op_ld_v(s, s_ot, cpu_T0, cpu_A0);
5512 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
5517 case 0x8d: /* lea */
5518 modrm = cpu_ldub_code(env, s->pc++);
5519 mod = (modrm >> 6) & 3;
5522 reg = ((modrm >> 3) & 7) | rex_r;
5524 AddressParts a = gen_lea_modrm_0(env, s, modrm);
5525 TCGv ea = gen_lea_modrm_1(a);
5526 gen_lea_v_seg(s, s->aflag, ea, -1, -1);
5527 gen_op_mov_reg_v(dflag, reg, cpu_A0);
5531 case 0xa0: /* mov EAX, Ov */
5533 case 0xa2: /* mov Ov, EAX */
5536 target_ulong offset_addr;
5538 ot = mo_b_d(b, dflag);
5540 #ifdef TARGET_X86_64
5542 offset_addr = cpu_ldq_code(env, s->pc);
5547 offset_addr = insn_get(env, s, s->aflag);
5550 tcg_gen_movi_tl(cpu_A0, offset_addr);
5551 gen_add_A0_ds_seg(s);
5553 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
5554 gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
5556 gen_op_mov_v_reg(ot, cpu_T0, R_EAX);
5557 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
5561 case 0xd7: /* xlat */
5562 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
5563 tcg_gen_ext8u_tl(cpu_T0, cpu_regs[R_EAX]);
5564 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T0);
5565 gen_extu(s->aflag, cpu_A0);
5566 gen_add_A0_ds_seg(s);
5567 gen_op_ld_v(s, MO_8, cpu_T0, cpu_A0);
5568 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
5570 case 0xb0 ... 0xb7: /* mov R, Ib */
5571 val = insn_get(env, s, MO_8);
5572 tcg_gen_movi_tl(cpu_T0, val);
5573 gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T0);
5575 case 0xb8 ... 0xbf: /* mov R, Iv */
5576 #ifdef TARGET_X86_64
5577 if (dflag == MO_64) {
5580 tmp = cpu_ldq_code(env, s->pc);
5582 reg = (b & 7) | REX_B(s);
5583 tcg_gen_movi_tl(cpu_T0, tmp);
5584 gen_op_mov_reg_v(MO_64, reg, cpu_T0);
5589 val = insn_get(env, s, ot);
5590 reg = (b & 7) | REX_B(s);
5591 tcg_gen_movi_tl(cpu_T0, val);
5592 gen_op_mov_reg_v(ot, reg, cpu_T0);
5596 case 0x91 ... 0x97: /* xchg R, EAX */
5599 reg = (b & 7) | REX_B(s);
5603 case 0x87: /* xchg Ev, Gv */
5604 ot = mo_b_d(b, dflag);
5605 modrm = cpu_ldub_code(env, s->pc++);
5606 reg = ((modrm >> 3) & 7) | rex_r;
5607 mod = (modrm >> 6) & 3;
5609 rm = (modrm & 7) | REX_B(s);
5611 gen_op_mov_v_reg(ot, cpu_T0, reg);
5612 gen_op_mov_v_reg(ot, cpu_T1, rm);
5613 gen_op_mov_reg_v(ot, rm, cpu_T0);
5614 gen_op_mov_reg_v(ot, reg, cpu_T1);
5616 gen_lea_modrm(env, s, modrm);
5617 gen_op_mov_v_reg(ot, cpu_T0, reg);
5618 /* for xchg, lock is implicit */
5619 tcg_gen_atomic_xchg_tl(cpu_T1, cpu_A0, cpu_T0,
5620 s->mem_index, ot | MO_LE);
5621 gen_op_mov_reg_v(ot, reg, cpu_T1);
5624 case 0xc4: /* les Gv */
5625 /* In CODE64 this is VEX3; see above. */
5628 case 0xc5: /* lds Gv */
5629 /* In CODE64 this is VEX2; see above. */
5632 case 0x1b2: /* lss Gv */
5635 case 0x1b4: /* lfs Gv */
5638 case 0x1b5: /* lgs Gv */
5641 ot = dflag != MO_16 ? MO_32 : MO_16;
5642 modrm = cpu_ldub_code(env, s->pc++);
5643 reg = ((modrm >> 3) & 7) | rex_r;
5644 mod = (modrm >> 6) & 3;
5647 gen_lea_modrm(env, s, modrm);
5648 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
5649 gen_add_A0_im(s, 1 << ot);
5650 /* load the segment first to handle exceptions properly */
5651 gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
5652 gen_movl_seg_T0(s, op);
5653 /* then put the data */
5654 gen_op_mov_reg_v(ot, reg, cpu_T1);
5655 if (s->base.is_jmp) {
5656 gen_jmp_im(s->pc - s->cs_base);
5661 /************************/
5669 ot = mo_b_d(b, dflag);
5670 modrm = cpu_ldub_code(env, s->pc++);
5671 mod = (modrm >> 6) & 3;
5672 op = (modrm >> 3) & 7;
5678 gen_lea_modrm(env, s, modrm);
5681 opreg = (modrm & 7) | REX_B(s);
5686 gen_shift(s, op, ot, opreg, OR_ECX);
5689 shift = cpu_ldub_code(env, s->pc++);
5691 gen_shifti(s, op, ot, opreg, shift);
5706 case 0x1a4: /* shld imm */
5710 case 0x1a5: /* shld cl */
5714 case 0x1ac: /* shrd imm */
5718 case 0x1ad: /* shrd cl */
5723 modrm = cpu_ldub_code(env, s->pc++);
5724 mod = (modrm >> 6) & 3;
5725 rm = (modrm & 7) | REX_B(s);
5726 reg = ((modrm >> 3) & 7) | rex_r;
5728 gen_lea_modrm(env, s, modrm);
5733 gen_op_mov_v_reg(ot, cpu_T1, reg);
5736 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5737 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5740 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
5744 /************************/
5747 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5748 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5749 /* XXX: what to do if illegal op ? */
5750 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5753 modrm = cpu_ldub_code(env, s->pc++);
5754 mod = (modrm >> 6) & 3;
5756 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5759 gen_lea_modrm(env, s, modrm);
5761 case 0x00 ... 0x07: /* fxxxs */
5762 case 0x10 ... 0x17: /* fixxxl */
5763 case 0x20 ... 0x27: /* fxxxl */
5764 case 0x30 ... 0x37: /* fixxx */
5771 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5772 s->mem_index, MO_LEUL);
5773 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
5776 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5777 s->mem_index, MO_LEUL);
5778 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5781 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5782 s->mem_index, MO_LEQ);
5783 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
5787 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5788 s->mem_index, MO_LESW);
5789 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5793 gen_helper_fp_arith_ST0_FT0(op1);
5795 /* fcomp needs pop */
5796 gen_helper_fpop(cpu_env);
5800 case 0x08: /* flds */
5801 case 0x0a: /* fsts */
5802 case 0x0b: /* fstps */
5803 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5804 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5805 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5810 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5811 s->mem_index, MO_LEUL);
5812 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
5815 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5816 s->mem_index, MO_LEUL);
5817 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5820 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5821 s->mem_index, MO_LEQ);
5822 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
5826 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5827 s->mem_index, MO_LESW);
5828 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5833 /* XXX: the corresponding CPUID bit must be tested ! */
5836 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
5837 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5838 s->mem_index, MO_LEUL);
5841 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
5842 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5843 s->mem_index, MO_LEQ);
5847 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
5848 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5849 s->mem_index, MO_LEUW);
5852 gen_helper_fpop(cpu_env);
5857 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
5858 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5859 s->mem_index, MO_LEUL);
5862 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
5863 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5864 s->mem_index, MO_LEUL);
5867 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
5868 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5869 s->mem_index, MO_LEQ);
5873 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
5874 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5875 s->mem_index, MO_LEUW);
5879 gen_helper_fpop(cpu_env);
5883 case 0x0c: /* fldenv mem */
5884 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5886 case 0x0d: /* fldcw mem */
5887 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5888 s->mem_index, MO_LEUW);
5889 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
5891 case 0x0e: /* fnstenv mem */
5892 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5894 case 0x0f: /* fnstcw mem */
5895 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
5896 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5897 s->mem_index, MO_LEUW);
5899 case 0x1d: /* fldt mem */
5900 gen_helper_fldt_ST0(cpu_env, cpu_A0);
5902 case 0x1f: /* fstpt mem */
5903 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5904 gen_helper_fpop(cpu_env);
5906 case 0x2c: /* frstor mem */
5907 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5909 case 0x2e: /* fnsave mem */
5910 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5912 case 0x2f: /* fnstsw mem */
5913 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
5914 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5915 s->mem_index, MO_LEUW);
5917 case 0x3c: /* fbld */
5918 gen_helper_fbld_ST0(cpu_env, cpu_A0);
5920 case 0x3e: /* fbstp */
5921 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5922 gen_helper_fpop(cpu_env);
5924 case 0x3d: /* fildll */
5925 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5926 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
5928 case 0x3f: /* fistpll */
5929 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
5930 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5931 gen_helper_fpop(cpu_env);
5937 /* register float ops */
5941 case 0x08: /* fld sti */
5942 gen_helper_fpush(cpu_env);
5943 gen_helper_fmov_ST0_STN(cpu_env,
5944 tcg_const_i32((opreg + 1) & 7));
5946 case 0x09: /* fxchg sti */
5947 case 0x29: /* fxchg4 sti, undocumented op */
5948 case 0x39: /* fxchg7 sti, undocumented op */
5949 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
5951 case 0x0a: /* grp d9/2 */
5954 /* check exceptions (FreeBSD FPU probe) */
5955 gen_helper_fwait(cpu_env);
5961 case 0x0c: /* grp d9/4 */
5964 gen_helper_fchs_ST0(cpu_env);
5967 gen_helper_fabs_ST0(cpu_env);
5970 gen_helper_fldz_FT0(cpu_env);
5971 gen_helper_fcom_ST0_FT0(cpu_env);
5974 gen_helper_fxam_ST0(cpu_env);
5980 case 0x0d: /* grp d9/5 */
5984 gen_helper_fpush(cpu_env);
5985 gen_helper_fld1_ST0(cpu_env);
5988 gen_helper_fpush(cpu_env);
5989 gen_helper_fldl2t_ST0(cpu_env);
5992 gen_helper_fpush(cpu_env);
5993 gen_helper_fldl2e_ST0(cpu_env);
5996 gen_helper_fpush(cpu_env);
5997 gen_helper_fldpi_ST0(cpu_env);
6000 gen_helper_fpush(cpu_env);
6001 gen_helper_fldlg2_ST0(cpu_env);
6004 gen_helper_fpush(cpu_env);
6005 gen_helper_fldln2_ST0(cpu_env);
6008 gen_helper_fpush(cpu_env);
6009 gen_helper_fldz_ST0(cpu_env);
6016 case 0x0e: /* grp d9/6 */
6019 gen_helper_f2xm1(cpu_env);
6022 gen_helper_fyl2x(cpu_env);
6025 gen_helper_fptan(cpu_env);
6027 case 3: /* fpatan */
6028 gen_helper_fpatan(cpu_env);
6030 case 4: /* fxtract */
6031 gen_helper_fxtract(cpu_env);
6033 case 5: /* fprem1 */
6034 gen_helper_fprem1(cpu_env);
6036 case 6: /* fdecstp */
6037 gen_helper_fdecstp(cpu_env);
6040 case 7: /* fincstp */
6041 gen_helper_fincstp(cpu_env);
6045 case 0x0f: /* grp d9/7 */
6048 gen_helper_fprem(cpu_env);
6050 case 1: /* fyl2xp1 */
6051 gen_helper_fyl2xp1(cpu_env);
6054 gen_helper_fsqrt(cpu_env);
6056 case 3: /* fsincos */
6057 gen_helper_fsincos(cpu_env);
6059 case 5: /* fscale */
6060 gen_helper_fscale(cpu_env);
6062 case 4: /* frndint */
6063 gen_helper_frndint(cpu_env);
6066 gen_helper_fsin(cpu_env);
6070 gen_helper_fcos(cpu_env);
6074 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6075 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6076 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6082 gen_helper_fp_arith_STN_ST0(op1, opreg);
6084 gen_helper_fpop(cpu_env);
6086 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6087 gen_helper_fp_arith_ST0_FT0(op1);
6091 case 0x02: /* fcom */
6092 case 0x22: /* fcom2, undocumented op */
6093 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6094 gen_helper_fcom_ST0_FT0(cpu_env);
6096 case 0x03: /* fcomp */
6097 case 0x23: /* fcomp3, undocumented op */
6098 case 0x32: /* fcomp5, undocumented op */
6099 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6100 gen_helper_fcom_ST0_FT0(cpu_env);
6101 gen_helper_fpop(cpu_env);
6103 case 0x15: /* da/5 */
6105 case 1: /* fucompp */
6106 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6107 gen_helper_fucom_ST0_FT0(cpu_env);
6108 gen_helper_fpop(cpu_env);
6109 gen_helper_fpop(cpu_env);
6117 case 0: /* feni (287 only, just do nop here) */
6119 case 1: /* fdisi (287 only, just do nop here) */
6122 gen_helper_fclex(cpu_env);
6124 case 3: /* fninit */
6125 gen_helper_fninit(cpu_env);
6127 case 4: /* fsetpm (287 only, just do nop here) */
6133 case 0x1d: /* fucomi */
6134 if (!(s->cpuid_features & CPUID_CMOV)) {
6137 gen_update_cc_op(s);
6138 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6139 gen_helper_fucomi_ST0_FT0(cpu_env);
6140 set_cc_op(s, CC_OP_EFLAGS);
6142 case 0x1e: /* fcomi */
6143 if (!(s->cpuid_features & CPUID_CMOV)) {
6146 gen_update_cc_op(s);
6147 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6148 gen_helper_fcomi_ST0_FT0(cpu_env);
6149 set_cc_op(s, CC_OP_EFLAGS);
6151 case 0x28: /* ffree sti */
6152 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6154 case 0x2a: /* fst sti */
6155 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6157 case 0x2b: /* fstp sti */
6158 case 0x0b: /* fstp1 sti, undocumented op */
6159 case 0x3a: /* fstp8 sti, undocumented op */
6160 case 0x3b: /* fstp9 sti, undocumented op */
6161 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6162 gen_helper_fpop(cpu_env);
6164 case 0x2c: /* fucom st(i) */
6165 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6166 gen_helper_fucom_ST0_FT0(cpu_env);
6168 case 0x2d: /* fucomp st(i) */
6169 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6170 gen_helper_fucom_ST0_FT0(cpu_env);
6171 gen_helper_fpop(cpu_env);
6173 case 0x33: /* de/3 */
6175 case 1: /* fcompp */
6176 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6177 gen_helper_fcom_ST0_FT0(cpu_env);
6178 gen_helper_fpop(cpu_env);
6179 gen_helper_fpop(cpu_env);
6185 case 0x38: /* ffreep sti, undocumented op */
6186 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6187 gen_helper_fpop(cpu_env);
6189 case 0x3c: /* df/4 */
6192 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
6193 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
6194 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
6200 case 0x3d: /* fucomip */
6201 if (!(s->cpuid_features & CPUID_CMOV)) {
6204 gen_update_cc_op(s);
6205 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6206 gen_helper_fucomi_ST0_FT0(cpu_env);
6207 gen_helper_fpop(cpu_env);
6208 set_cc_op(s, CC_OP_EFLAGS);
6210 case 0x3e: /* fcomip */
6211 if (!(s->cpuid_features & CPUID_CMOV)) {
6214 gen_update_cc_op(s);
6215 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6216 gen_helper_fcomi_ST0_FT0(cpu_env);
6217 gen_helper_fpop(cpu_env);
6218 set_cc_op(s, CC_OP_EFLAGS);
6220 case 0x10 ... 0x13: /* fcmovxx */
6225 static const uint8_t fcmov_cc[8] = {
6232 if (!(s->cpuid_features & CPUID_CMOV)) {
6235 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6236 l1 = gen_new_label();
6237 gen_jcc1_noeob(s, op1, l1);
6238 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
6247 /************************/
6250 case 0xa4: /* movsS */
6252 ot = mo_b_d(b, dflag);
6253 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6254 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6260 case 0xaa: /* stosS */
6262 ot = mo_b_d(b, dflag);
6263 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6264 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6269 case 0xac: /* lodsS */
6271 ot = mo_b_d(b, dflag);
6272 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6273 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6278 case 0xae: /* scasS */
6280 ot = mo_b_d(b, dflag);
6281 if (prefixes & PREFIX_REPNZ) {
6282 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6283 } else if (prefixes & PREFIX_REPZ) {
6284 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6290 case 0xa6: /* cmpsS */
6292 ot = mo_b_d(b, dflag);
6293 if (prefixes & PREFIX_REPNZ) {
6294 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6295 } else if (prefixes & PREFIX_REPZ) {
6296 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6301 case 0x6c: /* insS */
6303 ot = mo_b_d32(b, dflag);
6304 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
6305 gen_check_io(s, ot, pc_start - s->cs_base,
6306 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6307 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6308 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6311 if (s->base.tb->cflags & CF_USE_ICOUNT) {
6312 gen_jmp(s, s->pc - s->cs_base);
6316 case 0x6e: /* outsS */
6318 ot = mo_b_d32(b, dflag);
6319 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
6320 gen_check_io(s, ot, pc_start - s->cs_base,
6321 svm_is_rep(prefixes) | 4);
6322 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6323 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6326 if (s->base.tb->cflags & CF_USE_ICOUNT) {
6327 gen_jmp(s, s->pc - s->cs_base);
6332 /************************/
6337 ot = mo_b_d32(b, dflag);
6338 val = cpu_ldub_code(env, s->pc++);
6339 tcg_gen_movi_tl(cpu_T0, val);
6340 gen_check_io(s, ot, pc_start - s->cs_base,
6341 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6342 if (s->base.tb->cflags & CF_USE_ICOUNT) {
6345 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6346 gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
6347 gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
6348 gen_bpt_io(s, cpu_tmp2_i32, ot);
6349 if (s->base.tb->cflags & CF_USE_ICOUNT) {
6351 gen_jmp(s, s->pc - s->cs_base);
6356 ot = mo_b_d32(b, dflag);
6357 val = cpu_ldub_code(env, s->pc++);
6358 tcg_gen_movi_tl(cpu_T0, val);
6359 gen_check_io(s, ot, pc_start - s->cs_base,
6360 svm_is_rep(prefixes));
6361 gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
6363 if (s->base.tb->cflags & CF_USE_ICOUNT) {
6366 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6367 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
6368 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6369 gen_bpt_io(s, cpu_tmp2_i32, ot);
6370 if (s->base.tb->cflags & CF_USE_ICOUNT) {
6372 gen_jmp(s, s->pc - s->cs_base);
6377 ot = mo_b_d32(b, dflag);
6378 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
6379 gen_check_io(s, ot, pc_start - s->cs_base,
6380 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6381 if (s->base.tb->cflags & CF_USE_ICOUNT) {
6384 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
6385 gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
6386 gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
6387 gen_bpt_io(s, cpu_tmp2_i32, ot);
6388 if (s->base.tb->cflags & CF_USE_ICOUNT) {
6390 gen_jmp(s, s->pc - s->cs_base);
6395 ot = mo_b_d32(b, dflag);
6396 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
6397 gen_check_io(s, ot, pc_start - s->cs_base,
6398 svm_is_rep(prefixes));
6399 gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
6401 if (s->base.tb->cflags & CF_USE_ICOUNT) {
6404 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
6405 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
6406 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6407 gen_bpt_io(s, cpu_tmp2_i32, ot);
6408 if (s->base.tb->cflags & CF_USE_ICOUNT) {
6410 gen_jmp(s, s->pc - s->cs_base);
6414 /************************/
6416 case 0xc2: /* ret im */
6417 val = cpu_ldsw_code(env, s->pc);
6420 gen_stack_update(s, val + (1 << ot));
6421 /* Note that gen_pop_T0 uses a zero-extending load. */
6422 gen_op_jmp_v(cpu_T0);
6426 case 0xc3: /* ret */
6428 gen_pop_update(s, ot);
6429 /* Note that gen_pop_T0 uses a zero-extending load. */
6430 gen_op_jmp_v(cpu_T0);
6434 case 0xca: /* lret im */
6435 val = cpu_ldsw_code(env, s->pc);
6438 if (s->pe && !s->vm86) {
6439 gen_update_cc_op(s);
6440 gen_jmp_im(pc_start - s->cs_base);
6441 gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
6442 tcg_const_i32(val));
6446 gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
6447 /* NOTE: keeping EIP updated is not a problem in case of
6449 gen_op_jmp_v(cpu_T0);
6451 gen_add_A0_im(s, 1 << dflag);
6452 gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
6453 gen_op_movl_seg_T0_vm(R_CS);
6454 /* add stack offset */
6455 gen_stack_update(s, val + (2 << dflag));
6459 case 0xcb: /* lret */
6462 case 0xcf: /* iret */
6463 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6466 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6467 set_cc_op(s, CC_OP_EFLAGS);
6468 } else if (s->vm86) {
6470 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6472 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6473 set_cc_op(s, CC_OP_EFLAGS);
6476 gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
6477 tcg_const_i32(s->pc - s->cs_base));
6478 set_cc_op(s, CC_OP_EFLAGS);
6482 case 0xe8: /* call im */
6484 if (dflag != MO_16) {
6485 tval = (int32_t)insn_get(env, s, MO_32);
6487 tval = (int16_t)insn_get(env, s, MO_16);
6489 next_eip = s->pc - s->cs_base;
6491 if (dflag == MO_16) {
6493 } else if (!CODE64(s)) {
6496 tcg_gen_movi_tl(cpu_T0, next_eip);
6497 gen_push_v(s, cpu_T0);
6502 case 0x9a: /* lcall im */
6504 unsigned int selector, offset;
6509 offset = insn_get(env, s, ot);
6510 selector = insn_get(env, s, MO_16);
6512 tcg_gen_movi_tl(cpu_T0, selector);
6513 tcg_gen_movi_tl(cpu_T1, offset);
6516 case 0xe9: /* jmp im */
6517 if (dflag != MO_16) {
6518 tval = (int32_t)insn_get(env, s, MO_32);
6520 tval = (int16_t)insn_get(env, s, MO_16);
6522 tval += s->pc - s->cs_base;
6523 if (dflag == MO_16) {
6525 } else if (!CODE64(s)) {
6531 case 0xea: /* ljmp im */
6533 unsigned int selector, offset;
6538 offset = insn_get(env, s, ot);
6539 selector = insn_get(env, s, MO_16);
6541 tcg_gen_movi_tl(cpu_T0, selector);
6542 tcg_gen_movi_tl(cpu_T1, offset);
6545 case 0xeb: /* jmp Jb */
6546 tval = (int8_t)insn_get(env, s, MO_8);
6547 tval += s->pc - s->cs_base;
6548 if (dflag == MO_16) {
6553 case 0x70 ... 0x7f: /* jcc Jb */
6554 tval = (int8_t)insn_get(env, s, MO_8);
6556 case 0x180 ... 0x18f: /* jcc Jv */
6557 if (dflag != MO_16) {
6558 tval = (int32_t)insn_get(env, s, MO_32);
6560 tval = (int16_t)insn_get(env, s, MO_16);
6563 next_eip = s->pc - s->cs_base;
6565 if (dflag == MO_16) {
6569 gen_jcc(s, b, tval, next_eip);
6572 case 0x190 ... 0x19f: /* setcc Gv */
6573 modrm = cpu_ldub_code(env, s->pc++);
6574 gen_setcc1(s, b, cpu_T0);
6575 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
6577 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6578 if (!(s->cpuid_features & CPUID_CMOV)) {
6582 modrm = cpu_ldub_code(env, s->pc++);
6583 reg = ((modrm >> 3) & 7) | rex_r;
6584 gen_cmovcc1(env, s, ot, b, modrm, reg);
6587 /************************/
6589 case 0x9c: /* pushf */
6590 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6591 if (s->vm86 && s->iopl != 3) {
6592 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6594 gen_update_cc_op(s);
6595 gen_helper_read_eflags(cpu_T0, cpu_env);
6596 gen_push_v(s, cpu_T0);
6599 case 0x9d: /* popf */
6600 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6601 if (s->vm86 && s->iopl != 3) {
6602 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6606 if (dflag != MO_16) {
6607 gen_helper_write_eflags(cpu_env, cpu_T0,
6608 tcg_const_i32((TF_MASK | AC_MASK |
6613 gen_helper_write_eflags(cpu_env, cpu_T0,
6614 tcg_const_i32((TF_MASK | AC_MASK |
6616 IF_MASK | IOPL_MASK)
6620 if (s->cpl <= s->iopl) {
6621 if (dflag != MO_16) {
6622 gen_helper_write_eflags(cpu_env, cpu_T0,
6623 tcg_const_i32((TF_MASK |
6629 gen_helper_write_eflags(cpu_env, cpu_T0,
6630 tcg_const_i32((TF_MASK |
6638 if (dflag != MO_16) {
6639 gen_helper_write_eflags(cpu_env, cpu_T0,
6640 tcg_const_i32((TF_MASK | AC_MASK |
6641 ID_MASK | NT_MASK)));
6643 gen_helper_write_eflags(cpu_env, cpu_T0,
6644 tcg_const_i32((TF_MASK | AC_MASK |
6650 gen_pop_update(s, ot);
6651 set_cc_op(s, CC_OP_EFLAGS);
6652 /* abort translation because TF/AC flag may change */
6653 gen_jmp_im(s->pc - s->cs_base);
6657 case 0x9e: /* sahf */
6658 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6660 gen_op_mov_v_reg(MO_8, cpu_T0, R_AH);
6661 gen_compute_eflags(s);
6662 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6663 tcg_gen_andi_tl(cpu_T0, cpu_T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
6664 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T0);
6666 case 0x9f: /* lahf */
6667 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6669 gen_compute_eflags(s);
6670 /* Note: gen_compute_eflags() only gives the condition codes */
6671 tcg_gen_ori_tl(cpu_T0, cpu_cc_src, 0x02);
6672 gen_op_mov_reg_v(MO_8, R_AH, cpu_T0);
6674 case 0xf5: /* cmc */
6675 gen_compute_eflags(s);
6676 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6678 case 0xf8: /* clc */
6679 gen_compute_eflags(s);
6680 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6682 case 0xf9: /* stc */
6683 gen_compute_eflags(s);
6684 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6686 case 0xfc: /* cld */
6687 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6688 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6690 case 0xfd: /* std */
6691 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6692 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6695 /************************/
6696 /* bit operations */
6697 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6699 modrm = cpu_ldub_code(env, s->pc++);
6700 op = (modrm >> 3) & 7;
6701 mod = (modrm >> 6) & 3;
6702 rm = (modrm & 7) | REX_B(s);
6705 gen_lea_modrm(env, s, modrm);
6706 if (!(s->prefix & PREFIX_LOCK)) {
6707 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
6710 gen_op_mov_v_reg(ot, cpu_T0, rm);
6713 val = cpu_ldub_code(env, s->pc++);
6714 tcg_gen_movi_tl(cpu_T1, val);
6719 case 0x1a3: /* bt Gv, Ev */
6722 case 0x1ab: /* bts */
6725 case 0x1b3: /* btr */
6728 case 0x1bb: /* btc */
6732 modrm = cpu_ldub_code(env, s->pc++);
6733 reg = ((modrm >> 3) & 7) | rex_r;
6734 mod = (modrm >> 6) & 3;
6735 rm = (modrm & 7) | REX_B(s);
6736 gen_op_mov_v_reg(MO_32, cpu_T1, reg);
6738 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6739 /* specific case: we need to add a displacement */
6740 gen_exts(ot, cpu_T1);
6741 tcg_gen_sari_tl(cpu_tmp0, cpu_T1, 3 + ot);
6742 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6743 tcg_gen_add_tl(cpu_A0, gen_lea_modrm_1(a), cpu_tmp0);
6744 gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
6745 if (!(s->prefix & PREFIX_LOCK)) {
6746 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
6749 gen_op_mov_v_reg(ot, cpu_T0, rm);
6752 tcg_gen_andi_tl(cpu_T1, cpu_T1, (1 << (3 + ot)) - 1);
6753 tcg_gen_movi_tl(cpu_tmp0, 1);
6754 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
6755 if (s->prefix & PREFIX_LOCK) {
6758 /* Needs no atomic ops; we surpressed the normal
6759 memory load for LOCK above so do it now. */
6760 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
6763 tcg_gen_atomic_fetch_or_tl(cpu_T0, cpu_A0, cpu_tmp0,
6764 s->mem_index, ot | MO_LE);
6767 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6768 tcg_gen_atomic_fetch_and_tl(cpu_T0, cpu_A0, cpu_tmp0,
6769 s->mem_index, ot | MO_LE);
6773 tcg_gen_atomic_fetch_xor_tl(cpu_T0, cpu_A0, cpu_tmp0,
6774 s->mem_index, ot | MO_LE);
6777 tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
6779 tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
6782 /* Data already loaded; nothing to do. */
6785 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
6788 tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_tmp0);
6792 tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_tmp0);
6797 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
6799 gen_op_mov_reg_v(ot, rm, cpu_T0);
6804 /* Delay all CC updates until after the store above. Note that
6805 C is the result of the test, Z is unchanged, and the others
6806 are all undefined. */
6808 case CC_OP_MULB ... CC_OP_MULQ:
6809 case CC_OP_ADDB ... CC_OP_ADDQ:
6810 case CC_OP_ADCB ... CC_OP_ADCQ:
6811 case CC_OP_SUBB ... CC_OP_SUBQ:
6812 case CC_OP_SBBB ... CC_OP_SBBQ:
6813 case CC_OP_LOGICB ... CC_OP_LOGICQ:
6814 case CC_OP_INCB ... CC_OP_INCQ:
6815 case CC_OP_DECB ... CC_OP_DECQ:
6816 case CC_OP_SHLB ... CC_OP_SHLQ:
6817 case CC_OP_SARB ... CC_OP_SARQ:
6818 case CC_OP_BMILGB ... CC_OP_BMILGQ:
6819 /* Z was going to be computed from the non-zero status of CC_DST.
6820 We can get that same Z value (and the new C value) by leaving
6821 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6823 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6824 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
6827 /* Otherwise, generate EFLAGS and replace the C bit. */
6828 gen_compute_eflags(s);
6829 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4,
6834 case 0x1bc: /* bsf / tzcnt */
6835 case 0x1bd: /* bsr / lzcnt */
6837 modrm = cpu_ldub_code(env, s->pc++);
6838 reg = ((modrm >> 3) & 7) | rex_r;
6839 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6840 gen_extu(ot, cpu_T0);
6842 /* Note that lzcnt and tzcnt are in different extensions. */
6843 if ((prefixes & PREFIX_REPZ)
6845 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
6846 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
6848 /* For lzcnt/tzcnt, C bit is defined related to the input. */
6849 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
6851 /* For lzcnt, reduce the target_ulong result by the
6852 number of zeros that we expect to find at the top. */
6853 tcg_gen_clzi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS);
6854 tcg_gen_subi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - size);
6856 /* For tzcnt, a zero input must return the operand size. */
6857 tcg_gen_ctzi_tl(cpu_T0, cpu_T0, size);
6859 /* For lzcnt/tzcnt, Z bit is defined related to the result. */
6860 gen_op_update1_cc();
6861 set_cc_op(s, CC_OP_BMILGB + ot);
6863 /* For bsr/bsf, only the Z bit is defined and it is related
6864 to the input and not the result. */
6865 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
6866 set_cc_op(s, CC_OP_LOGICB + ot);
6868 /* ??? The manual says that the output is undefined when the
6869 input is zero, but real hardware leaves it unchanged, and
6870 real programs appear to depend on that. Accomplish this
6871 by passing the output as the value to return upon zero. */
6873 /* For bsr, return the bit index of the first 1 bit,
6874 not the count of leading zeros. */
6875 tcg_gen_xori_tl(cpu_T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
6876 tcg_gen_clz_tl(cpu_T0, cpu_T0, cpu_T1);
6877 tcg_gen_xori_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - 1);
6879 tcg_gen_ctz_tl(cpu_T0, cpu_T0, cpu_regs[reg]);
6882 gen_op_mov_reg_v(ot, reg, cpu_T0);
6884 /************************/
6886 case 0x27: /* daa */
6889 gen_update_cc_op(s);
6890 gen_helper_daa(cpu_env);
6891 set_cc_op(s, CC_OP_EFLAGS);
6893 case 0x2f: /* das */
6896 gen_update_cc_op(s);
6897 gen_helper_das(cpu_env);
6898 set_cc_op(s, CC_OP_EFLAGS);
6900 case 0x37: /* aaa */
6903 gen_update_cc_op(s);
6904 gen_helper_aaa(cpu_env);
6905 set_cc_op(s, CC_OP_EFLAGS);
6907 case 0x3f: /* aas */
6910 gen_update_cc_op(s);
6911 gen_helper_aas(cpu_env);
6912 set_cc_op(s, CC_OP_EFLAGS);
6914 case 0xd4: /* aam */
6917 val = cpu_ldub_code(env, s->pc++);
6919 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6921 gen_helper_aam(cpu_env, tcg_const_i32(val));
6922 set_cc_op(s, CC_OP_LOGICB);
6925 case 0xd5: /* aad */
6928 val = cpu_ldub_code(env, s->pc++);
6929 gen_helper_aad(cpu_env, tcg_const_i32(val));
6930 set_cc_op(s, CC_OP_LOGICB);
6932 /************************/
6934 case 0x90: /* nop */
6935 /* XXX: correct lock test for all insn */
6936 if (prefixes & PREFIX_LOCK) {
6939 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6941 goto do_xchg_reg_eax;
6943 if (prefixes & PREFIX_REPZ) {
6944 gen_update_cc_op(s);
6945 gen_jmp_im(pc_start - s->cs_base);
6946 gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
6947 s->base.is_jmp = DISAS_NORETURN;
6950 case 0x9b: /* fwait */
6951 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6952 (HF_MP_MASK | HF_TS_MASK)) {
6953 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6955 gen_helper_fwait(cpu_env);
6958 case 0xcc: /* int3 */
6959 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6961 case 0xcd: /* int N */
6962 val = cpu_ldub_code(env, s->pc++);
6963 if (s->vm86 && s->iopl != 3) {
6964 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6966 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6969 case 0xce: /* into */
6972 gen_update_cc_op(s);
6973 gen_jmp_im(pc_start - s->cs_base);
6974 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
6977 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6978 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6980 gen_debug(s, pc_start - s->cs_base);
6983 tb_flush(CPU(x86_env_get_cpu(env)));
6984 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6988 case 0xfa: /* cli */
6990 if (s->cpl <= s->iopl) {
6991 gen_helper_cli(cpu_env);
6993 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6997 gen_helper_cli(cpu_env);
6999 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7003 case 0xfb: /* sti */
7004 if (s->vm86 ? s->iopl == 3 : s->cpl <= s->iopl) {
7005 gen_helper_sti(cpu_env);
7006 /* interruptions are enabled only the first insn after sti */
7007 gen_jmp_im(s->pc - s->cs_base);
7008 gen_eob_inhibit_irq(s, true);
7010 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7013 case 0x62: /* bound */
7017 modrm = cpu_ldub_code(env, s->pc++);
7018 reg = (modrm >> 3) & 7;
7019 mod = (modrm >> 6) & 3;
7022 gen_op_mov_v_reg(ot, cpu_T0, reg);
7023 gen_lea_modrm(env, s, modrm);
7024 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
7026 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
7028 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
7031 case 0x1c8 ... 0x1cf: /* bswap reg */
7032 reg = (b & 7) | REX_B(s);
7033 #ifdef TARGET_X86_64
7034 if (dflag == MO_64) {
7035 gen_op_mov_v_reg(MO_64, cpu_T0, reg);
7036 tcg_gen_bswap64_i64(cpu_T0, cpu_T0);
7037 gen_op_mov_reg_v(MO_64, reg, cpu_T0);
7041 gen_op_mov_v_reg(MO_32, cpu_T0, reg);
7042 tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
7043 tcg_gen_bswap32_tl(cpu_T0, cpu_T0);
7044 gen_op_mov_reg_v(MO_32, reg, cpu_T0);
7047 case 0xd6: /* salc */
7050 gen_compute_eflags_c(s, cpu_T0);
7051 tcg_gen_neg_tl(cpu_T0, cpu_T0);
7052 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
7054 case 0xe0: /* loopnz */
7055 case 0xe1: /* loopz */
7056 case 0xe2: /* loop */
7057 case 0xe3: /* jecxz */
7059 TCGLabel *l1, *l2, *l3;
7061 tval = (int8_t)insn_get(env, s, MO_8);
7062 next_eip = s->pc - s->cs_base;
7064 if (dflag == MO_16) {
7068 l1 = gen_new_label();
7069 l2 = gen_new_label();
7070 l3 = gen_new_label();
7073 case 0: /* loopnz */
7075 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7076 gen_op_jz_ecx(s->aflag, l3);
7077 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
7080 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7081 gen_op_jnz_ecx(s->aflag, l1);
7085 gen_op_jz_ecx(s->aflag, l1);
7090 gen_jmp_im(next_eip);
7099 case 0x130: /* wrmsr */
7100 case 0x132: /* rdmsr */
7102 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7104 gen_update_cc_op(s);
7105 gen_jmp_im(pc_start - s->cs_base);
7107 gen_helper_rdmsr(cpu_env);
7109 gen_helper_wrmsr(cpu_env);
7113 case 0x131: /* rdtsc */
7114 gen_update_cc_op(s);
7115 gen_jmp_im(pc_start - s->cs_base);
7116 if (s->base.tb->cflags & CF_USE_ICOUNT) {
7119 gen_helper_rdtsc(cpu_env);
7120 if (s->base.tb->cflags & CF_USE_ICOUNT) {
7122 gen_jmp(s, s->pc - s->cs_base);
7125 case 0x133: /* rdpmc */
7126 gen_update_cc_op(s);
7127 gen_jmp_im(pc_start - s->cs_base);
7128 gen_helper_rdpmc(cpu_env);
7130 case 0x134: /* sysenter */
7131 /* For Intel SYSENTER is valid on 64-bit */
7132 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7135 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7137 gen_helper_sysenter(cpu_env);
7141 case 0x135: /* sysexit */
7142 /* For Intel SYSEXIT is valid on 64-bit */
7143 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7146 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7148 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
7152 #ifdef TARGET_X86_64
7153 case 0x105: /* syscall */
7154 /* XXX: is it usable in real mode ? */
7155 gen_update_cc_op(s);
7156 gen_jmp_im(pc_start - s->cs_base);
7157 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
7158 /* TF handling for the syscall insn is different. The TF bit is checked
7159 after the syscall insn completes. This allows #DB to not be
7160 generated after one has entered CPL0 if TF is set in FMASK. */
7161 gen_eob_worker(s, false, true);
7163 case 0x107: /* sysret */
7165 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7167 gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
7168 /* condition codes are modified only in long mode */
7170 set_cc_op(s, CC_OP_EFLAGS);
7172 /* TF handling for the sysret insn is different. The TF bit is
7173 checked after the sysret insn completes. This allows #DB to be
7174 generated "as if" the syscall insn in userspace has just
7176 gen_eob_worker(s, false, true);
7180 case 0x1a2: /* cpuid */
7181 gen_update_cc_op(s);
7182 gen_jmp_im(pc_start - s->cs_base);
7183 gen_helper_cpuid(cpu_env);
7185 case 0xf4: /* hlt */
7187 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7189 gen_update_cc_op(s);
7190 gen_jmp_im(pc_start - s->cs_base);
7191 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
7192 s->base.is_jmp = DISAS_NORETURN;
7196 modrm = cpu_ldub_code(env, s->pc++);
7197 mod = (modrm >> 6) & 3;
7198 op = (modrm >> 3) & 7;
7201 if (!s->pe || s->vm86)
7203 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7204 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
7205 offsetof(CPUX86State, ldt.selector));
7206 ot = mod == 3 ? dflag : MO_16;
7207 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7210 if (!s->pe || s->vm86)
7213 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7215 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7216 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7217 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
7218 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
7222 if (!s->pe || s->vm86)
7224 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7225 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
7226 offsetof(CPUX86State, tr.selector));
7227 ot = mod == 3 ? dflag : MO_16;
7228 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7231 if (!s->pe || s->vm86)
7234 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7236 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7237 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7238 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
7239 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
7244 if (!s->pe || s->vm86)
7246 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7247 gen_update_cc_op(s);
7249 gen_helper_verr(cpu_env, cpu_T0);
7251 gen_helper_verw(cpu_env, cpu_T0);
7253 set_cc_op(s, CC_OP_EFLAGS);
7261 modrm = cpu_ldub_code(env, s->pc++);
7263 CASE_MODRM_MEM_OP(0): /* sgdt */
7264 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7265 gen_lea_modrm(env, s, modrm);
7266 tcg_gen_ld32u_tl(cpu_T0,
7267 cpu_env, offsetof(CPUX86State, gdt.limit));
7268 gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
7269 gen_add_A0_im(s, 2);
7270 tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
7271 if (dflag == MO_16) {
7272 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7274 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7277 case 0xc8: /* monitor */
7278 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
7281 gen_update_cc_op(s);
7282 gen_jmp_im(pc_start - s->cs_base);
7283 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
7284 gen_extu(s->aflag, cpu_A0);
7285 gen_add_A0_ds_seg(s);
7286 gen_helper_monitor(cpu_env, cpu_A0);
7289 case 0xc9: /* mwait */
7290 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
7293 gen_update_cc_op(s);
7294 gen_jmp_im(pc_start - s->cs_base);
7295 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7299 case 0xca: /* clac */
7300 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
7304 gen_helper_clac(cpu_env);
7305 gen_jmp_im(s->pc - s->cs_base);
7309 case 0xcb: /* stac */
7310 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
7314 gen_helper_stac(cpu_env);
7315 gen_jmp_im(s->pc - s->cs_base);
7319 CASE_MODRM_MEM_OP(1): /* sidt */
7320 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7321 gen_lea_modrm(env, s, modrm);
7322 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit));
7323 gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
7324 gen_add_A0_im(s, 2);
7325 tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
7326 if (dflag == MO_16) {
7327 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7329 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7332 case 0xd0: /* xgetbv */
7333 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7334 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
7335 | PREFIX_REPZ | PREFIX_REPNZ))) {
7338 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7339 gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
7340 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
7343 case 0xd1: /* xsetbv */
7344 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7345 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
7346 | PREFIX_REPZ | PREFIX_REPNZ))) {
7350 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7353 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7355 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7356 gen_helper_xsetbv(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
7357 /* End TB because translation flags may change. */
7358 gen_jmp_im(s->pc - s->cs_base);
7362 case 0xd8: /* VMRUN */
7363 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7367 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7370 gen_update_cc_op(s);
7371 gen_jmp_im(pc_start - s->cs_base);
7372 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
7373 tcg_const_i32(s->pc - pc_start));
7375 s->base.is_jmp = DISAS_NORETURN;
7378 case 0xd9: /* VMMCALL */
7379 if (!(s->flags & HF_SVME_MASK)) {
7382 gen_update_cc_op(s);
7383 gen_jmp_im(pc_start - s->cs_base);
7384 gen_helper_vmmcall(cpu_env);
7387 case 0xda: /* VMLOAD */
7388 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7392 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7395 gen_update_cc_op(s);
7396 gen_jmp_im(pc_start - s->cs_base);
7397 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
7400 case 0xdb: /* VMSAVE */
7401 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7405 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7408 gen_update_cc_op(s);
7409 gen_jmp_im(pc_start - s->cs_base);
7410 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
7413 case 0xdc: /* STGI */
7414 if ((!(s->flags & HF_SVME_MASK)
7415 && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
7420 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7423 gen_update_cc_op(s);
7424 gen_jmp_im(pc_start - s->cs_base);
7425 gen_helper_stgi(cpu_env);
7428 case 0xdd: /* CLGI */
7429 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7433 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7436 gen_update_cc_op(s);
7437 gen_jmp_im(pc_start - s->cs_base);
7438 gen_helper_clgi(cpu_env);
7441 case 0xde: /* SKINIT */
7442 if ((!(s->flags & HF_SVME_MASK)
7443 && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
7447 gen_update_cc_op(s);
7448 gen_jmp_im(pc_start - s->cs_base);
7449 gen_helper_skinit(cpu_env);
7452 case 0xdf: /* INVLPGA */
7453 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7457 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7460 gen_update_cc_op(s);
7461 gen_jmp_im(pc_start - s->cs_base);
7462 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1));
7465 CASE_MODRM_MEM_OP(2): /* lgdt */
7467 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7470 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE);
7471 gen_lea_modrm(env, s, modrm);
7472 gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
7473 gen_add_A0_im(s, 2);
7474 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7475 if (dflag == MO_16) {
7476 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7478 tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
7479 tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit));
7482 CASE_MODRM_MEM_OP(3): /* lidt */
7484 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7487 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE);
7488 gen_lea_modrm(env, s, modrm);
7489 gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
7490 gen_add_A0_im(s, 2);
7491 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7492 if (dflag == MO_16) {
7493 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7495 tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
7496 tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit));
7499 CASE_MODRM_OP(4): /* smsw */
7500 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7501 tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0]));
7503 mod = (modrm >> 6) & 3;
7504 ot = (mod != 3 ? MO_16 : s->dflag);
7508 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7510 case 0xee: /* rdpkru */
7511 if (prefixes & PREFIX_LOCK) {
7514 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7515 gen_helper_rdpkru(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
7516 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
7518 case 0xef: /* wrpkru */
7519 if (prefixes & PREFIX_LOCK) {
7522 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7524 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7525 gen_helper_wrpkru(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
7527 CASE_MODRM_OP(6): /* lmsw */
7529 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7532 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7533 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7534 gen_helper_lmsw(cpu_env, cpu_T0);
7535 gen_jmp_im(s->pc - s->cs_base);
7539 CASE_MODRM_MEM_OP(7): /* invlpg */
7541 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7544 gen_update_cc_op(s);
7545 gen_jmp_im(pc_start - s->cs_base);
7546 gen_lea_modrm(env, s, modrm);
7547 gen_helper_invlpg(cpu_env, cpu_A0);
7548 gen_jmp_im(s->pc - s->cs_base);
7552 case 0xf8: /* swapgs */
7553 #ifdef TARGET_X86_64
7556 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7558 tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]);
7559 tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
7560 offsetof(CPUX86State, kernelgsbase));
7561 tcg_gen_st_tl(cpu_T0, cpu_env,
7562 offsetof(CPUX86State, kernelgsbase));
7569 case 0xf9: /* rdtscp */
7570 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
7573 gen_update_cc_op(s);
7574 gen_jmp_im(pc_start - s->cs_base);
7575 if (s->base.tb->cflags & CF_USE_ICOUNT) {
7578 gen_helper_rdtscp(cpu_env);
7579 if (s->base.tb->cflags & CF_USE_ICOUNT) {
7581 gen_jmp(s, s->pc - s->cs_base);
7590 case 0x108: /* invd */
7591 case 0x109: /* wbinvd */
7593 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7595 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7599 case 0x63: /* arpl or movslS (x86_64) */
7600 #ifdef TARGET_X86_64
7603 /* d_ot is the size of destination */
7606 modrm = cpu_ldub_code(env, s->pc++);
7607 reg = ((modrm >> 3) & 7) | rex_r;
7608 mod = (modrm >> 6) & 3;
7609 rm = (modrm & 7) | REX_B(s);
7612 gen_op_mov_v_reg(MO_32, cpu_T0, rm);
7614 if (d_ot == MO_64) {
7615 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
7617 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
7619 gen_lea_modrm(env, s, modrm);
7620 gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, cpu_A0);
7621 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
7627 TCGv t0, t1, t2, a0;
7629 if (!s->pe || s->vm86)
7631 t0 = tcg_temp_local_new();
7632 t1 = tcg_temp_local_new();
7633 t2 = tcg_temp_local_new();
7635 modrm = cpu_ldub_code(env, s->pc++);
7636 reg = (modrm >> 3) & 7;
7637 mod = (modrm >> 6) & 3;
7640 gen_lea_modrm(env, s, modrm);
7641 gen_op_ld_v(s, ot, t0, cpu_A0);
7642 a0 = tcg_temp_local_new();
7643 tcg_gen_mov_tl(a0, cpu_A0);
7645 gen_op_mov_v_reg(ot, t0, rm);
7648 gen_op_mov_v_reg(ot, t1, reg);
7649 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7650 tcg_gen_andi_tl(t1, t1, 3);
7651 tcg_gen_movi_tl(t2, 0);
7652 label1 = gen_new_label();
7653 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7654 tcg_gen_andi_tl(t0, t0, ~3);
7655 tcg_gen_or_tl(t0, t0, t1);
7656 tcg_gen_movi_tl(t2, CC_Z);
7657 gen_set_label(label1);
7659 gen_op_st_v(s, ot, t0, a0);
7662 gen_op_mov_reg_v(ot, rm, t0);
7664 gen_compute_eflags(s);
7665 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7666 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7672 case 0x102: /* lar */
7673 case 0x103: /* lsl */
7677 if (!s->pe || s->vm86)
7679 ot = dflag != MO_16 ? MO_32 : MO_16;
7680 modrm = cpu_ldub_code(env, s->pc++);
7681 reg = ((modrm >> 3) & 7) | rex_r;
7682 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7683 t0 = tcg_temp_local_new();
7684 gen_update_cc_op(s);
7686 gen_helper_lar(t0, cpu_env, cpu_T0);
7688 gen_helper_lsl(t0, cpu_env, cpu_T0);
7690 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7691 label1 = gen_new_label();
7692 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7693 gen_op_mov_reg_v(ot, reg, t0);
7694 gen_set_label(label1);
7695 set_cc_op(s, CC_OP_EFLAGS);
7700 modrm = cpu_ldub_code(env, s->pc++);
7701 mod = (modrm >> 6) & 3;
7702 op = (modrm >> 3) & 7;
7704 case 0: /* prefetchnta */
7705 case 1: /* prefetchnt0 */
7706 case 2: /* prefetchnt0 */
7707 case 3: /* prefetchnt0 */
7710 gen_nop_modrm(env, s, modrm);
7711 /* nothing more to do */
7713 default: /* nop (multi byte) */
7714 gen_nop_modrm(env, s, modrm);
7719 modrm = cpu_ldub_code(env, s->pc++);
7720 if (s->flags & HF_MPX_EN_MASK) {
7721 mod = (modrm >> 6) & 3;
7722 reg = ((modrm >> 3) & 7) | rex_r;
7723 if (prefixes & PREFIX_REPZ) {
7726 || (prefixes & PREFIX_LOCK)
7727 || s->aflag == MO_16) {
7730 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
7731 } else if (prefixes & PREFIX_REPNZ) {
7734 || (prefixes & PREFIX_LOCK)
7735 || s->aflag == MO_16) {
7738 TCGv_i64 notu = tcg_temp_new_i64();
7739 tcg_gen_not_i64(notu, cpu_bndu[reg]);
7740 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
7741 tcg_temp_free_i64(notu);
7742 } else if (prefixes & PREFIX_DATA) {
7743 /* bndmov -- from reg/mem */
7744 if (reg >= 4 || s->aflag == MO_16) {
7748 int reg2 = (modrm & 7) | REX_B(s);
7749 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
7752 if (s->flags & HF_MPX_IU_MASK) {
7753 tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
7754 tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
7757 gen_lea_modrm(env, s, modrm);
7759 tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
7760 s->mem_index, MO_LEQ);
7761 tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
7762 tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
7763 s->mem_index, MO_LEQ);
7765 tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
7766 s->mem_index, MO_LEUL);
7767 tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
7768 tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
7769 s->mem_index, MO_LEUL);
7771 /* bnd registers are now in-use */
7772 gen_set_hflag(s, HF_MPX_IU_MASK);
7774 } else if (mod != 3) {
7776 AddressParts a = gen_lea_modrm_0(env, s, modrm);
7778 || (prefixes & PREFIX_LOCK)
7779 || s->aflag == MO_16
7784 tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
7786 tcg_gen_movi_tl(cpu_A0, 0);
7788 gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
7790 tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
7792 tcg_gen_movi_tl(cpu_T0, 0);
7795 gen_helper_bndldx64(cpu_bndl[reg], cpu_env, cpu_A0, cpu_T0);
7796 tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
7797 offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
7799 gen_helper_bndldx32(cpu_bndu[reg], cpu_env, cpu_A0, cpu_T0);
7800 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
7801 tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
7803 gen_set_hflag(s, HF_MPX_IU_MASK);
7806 gen_nop_modrm(env, s, modrm);
7809 modrm = cpu_ldub_code(env, s->pc++);
7810 if (s->flags & HF_MPX_EN_MASK) {
7811 mod = (modrm >> 6) & 3;
7812 reg = ((modrm >> 3) & 7) | rex_r;
7813 if (mod != 3 && (prefixes & PREFIX_REPZ)) {
7816 || (prefixes & PREFIX_LOCK)
7817 || s->aflag == MO_16) {
7820 AddressParts a = gen_lea_modrm_0(env, s, modrm);
7822 tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
7824 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
7826 } else if (a.base == -1) {
7827 /* no base register has lower bound of 0 */
7828 tcg_gen_movi_i64(cpu_bndl[reg], 0);
7830 /* rip-relative generates #ud */
7833 tcg_gen_not_tl(cpu_A0, gen_lea_modrm_1(a));
7835 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
7837 tcg_gen_extu_tl_i64(cpu_bndu[reg], cpu_A0);
7838 /* bnd registers are now in-use */
7839 gen_set_hflag(s, HF_MPX_IU_MASK);
7841 } else if (prefixes & PREFIX_REPNZ) {
7844 || (prefixes & PREFIX_LOCK)
7845 || s->aflag == MO_16) {
7848 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
7849 } else if (prefixes & PREFIX_DATA) {
7850 /* bndmov -- to reg/mem */
7851 if (reg >= 4 || s->aflag == MO_16) {
7855 int reg2 = (modrm & 7) | REX_B(s);
7856 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
7859 if (s->flags & HF_MPX_IU_MASK) {
7860 tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
7861 tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
7864 gen_lea_modrm(env, s, modrm);
7866 tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
7867 s->mem_index, MO_LEQ);
7868 tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
7869 tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
7870 s->mem_index, MO_LEQ);
7872 tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
7873 s->mem_index, MO_LEUL);
7874 tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
7875 tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
7876 s->mem_index, MO_LEUL);
7879 } else if (mod != 3) {
7881 AddressParts a = gen_lea_modrm_0(env, s, modrm);
7883 || (prefixes & PREFIX_LOCK)
7884 || s->aflag == MO_16
7889 tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
7891 tcg_gen_movi_tl(cpu_A0, 0);
7893 gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
7895 tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
7897 tcg_gen_movi_tl(cpu_T0, 0);
7900 gen_helper_bndstx64(cpu_env, cpu_A0, cpu_T0,
7901 cpu_bndl[reg], cpu_bndu[reg]);
7903 gen_helper_bndstx32(cpu_env, cpu_A0, cpu_T0,
7904 cpu_bndl[reg], cpu_bndu[reg]);
7908 gen_nop_modrm(env, s, modrm);
7910 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
7911 modrm = cpu_ldub_code(env, s->pc++);
7912 gen_nop_modrm(env, s, modrm);
7914 case 0x120: /* mov reg, crN */
7915 case 0x122: /* mov crN, reg */
7917 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7919 modrm = cpu_ldub_code(env, s->pc++);
7920 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7921 * AMD documentation (24594.pdf) and testing of
7922 * intel 386 and 486 processors all show that the mod bits
7923 * are assumed to be 1's, regardless of actual values.
7925 rm = (modrm & 7) | REX_B(s);
7926 reg = ((modrm >> 3) & 7) | rex_r;
7931 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7932 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7941 gen_update_cc_op(s);
7942 gen_jmp_im(pc_start - s->cs_base);
7944 if (s->base.tb->cflags & CF_USE_ICOUNT) {
7947 gen_op_mov_v_reg(ot, cpu_T0, rm);
7948 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7950 if (s->base.tb->cflags & CF_USE_ICOUNT) {
7953 gen_jmp_im(s->pc - s->cs_base);
7956 if (s->base.tb->cflags & CF_USE_ICOUNT) {
7959 gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
7960 gen_op_mov_reg_v(ot, rm, cpu_T0);
7961 if (s->base.tb->cflags & CF_USE_ICOUNT) {
7971 case 0x121: /* mov reg, drN */
7972 case 0x123: /* mov drN, reg */
7974 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7976 modrm = cpu_ldub_code(env, s->pc++);
7977 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7978 * AMD documentation (24594.pdf) and testing of
7979 * intel 386 and 486 processors all show that the mod bits
7980 * are assumed to be 1's, regardless of actual values.
7982 rm = (modrm & 7) | REX_B(s);
7983 reg = ((modrm >> 3) & 7) | rex_r;
7992 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7993 gen_op_mov_v_reg(ot, cpu_T0, rm);
7994 tcg_gen_movi_i32(cpu_tmp2_i32, reg);
7995 gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T0);
7996 gen_jmp_im(s->pc - s->cs_base);
7999 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
8000 tcg_gen_movi_i32(cpu_tmp2_i32, reg);
8001 gen_helper_get_dr(cpu_T0, cpu_env, cpu_tmp2_i32);
8002 gen_op_mov_reg_v(ot, rm, cpu_T0);
8006 case 0x106: /* clts */
8008 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
8010 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
8011 gen_helper_clts(cpu_env);
8012 /* abort block because static cpu state changed */
8013 gen_jmp_im(s->pc - s->cs_base);
8017 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
8018 case 0x1c3: /* MOVNTI reg, mem */
8019 if (!(s->cpuid_features & CPUID_SSE2))
8021 ot = mo_64_32(dflag);
8022 modrm = cpu_ldub_code(env, s->pc++);
8023 mod = (modrm >> 6) & 3;
8026 reg = ((modrm >> 3) & 7) | rex_r;
8027 /* generate a generic store */
8028 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
8031 modrm = cpu_ldub_code(env, s->pc++);
8033 CASE_MODRM_MEM_OP(0): /* fxsave */
8034 if (!(s->cpuid_features & CPUID_FXSR)
8035 || (prefixes & PREFIX_LOCK)) {
8038 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
8039 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8042 gen_lea_modrm(env, s, modrm);
8043 gen_helper_fxsave(cpu_env, cpu_A0);
8046 CASE_MODRM_MEM_OP(1): /* fxrstor */
8047 if (!(s->cpuid_features & CPUID_FXSR)
8048 || (prefixes & PREFIX_LOCK)) {
8051 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
8052 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8055 gen_lea_modrm(env, s, modrm);
8056 gen_helper_fxrstor(cpu_env, cpu_A0);
8059 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
8060 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
8063 if (s->flags & HF_TS_MASK) {
8064 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8067 gen_lea_modrm(env, s, modrm);
8068 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL);
8069 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
8072 CASE_MODRM_MEM_OP(3): /* stmxcsr */
8073 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
8076 if (s->flags & HF_TS_MASK) {
8077 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8080 gen_lea_modrm(env, s, modrm);
8081 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr));
8082 gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
8085 CASE_MODRM_MEM_OP(4): /* xsave */
8086 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
8087 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
8088 | PREFIX_REPZ | PREFIX_REPNZ))) {
8091 gen_lea_modrm(env, s, modrm);
8092 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
8094 gen_helper_xsave(cpu_env, cpu_A0, cpu_tmp1_i64);
8097 CASE_MODRM_MEM_OP(5): /* xrstor */
8098 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
8099 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
8100 | PREFIX_REPZ | PREFIX_REPNZ))) {
8103 gen_lea_modrm(env, s, modrm);
8104 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
8106 gen_helper_xrstor(cpu_env, cpu_A0, cpu_tmp1_i64);
8107 /* XRSTOR is how MPX is enabled, which changes how
8108 we translate. Thus we need to end the TB. */
8109 gen_update_cc_op(s);
8110 gen_jmp_im(s->pc - s->cs_base);
8114 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
8115 if (prefixes & PREFIX_LOCK) {
8118 if (prefixes & PREFIX_DATA) {
8120 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
8123 gen_nop_modrm(env, s, modrm);
8126 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
8127 || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
8128 || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
8131 gen_lea_modrm(env, s, modrm);
8132 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
8134 gen_helper_xsaveopt(cpu_env, cpu_A0, cpu_tmp1_i64);
8138 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
8139 if (prefixes & PREFIX_LOCK) {
8142 if (prefixes & PREFIX_DATA) {
8144 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
8149 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
8150 || !(s->cpuid_features & CPUID_CLFLUSH)) {
8154 gen_nop_modrm(env, s, modrm);
8157 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
8158 case 0xc8 ... 0xc8: /* rdgsbase (f3 0f ae /1) */
8159 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
8160 case 0xd8 ... 0xd8: /* wrgsbase (f3 0f ae /3) */
8162 && (prefixes & PREFIX_REPZ)
8163 && !(prefixes & PREFIX_LOCK)
8164 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
8165 TCGv base, treg, src, dst;
8167 /* Preserve hflags bits by testing CR4 at runtime. */
8168 tcg_gen_movi_i32(cpu_tmp2_i32, CR4_FSGSBASE_MASK);
8169 gen_helper_cr4_testbit(cpu_env, cpu_tmp2_i32);
8171 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
8172 treg = cpu_regs[(modrm & 7) | REX_B(s)];
8176 dst = base, src = treg;
8179 dst = treg, src = base;
8182 if (s->dflag == MO_32) {
8183 tcg_gen_ext32u_tl(dst, src);
8185 tcg_gen_mov_tl(dst, src);
8191 case 0xf8: /* sfence / pcommit */
8192 if (prefixes & PREFIX_DATA) {
8194 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
8195 || (prefixes & PREFIX_LOCK)) {
8201 case 0xf9 ... 0xff: /* sfence */
8202 if (!(s->cpuid_features & CPUID_SSE)
8203 || (prefixes & PREFIX_LOCK)) {
8206 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
8208 case 0xe8 ... 0xef: /* lfence */
8209 if (!(s->cpuid_features & CPUID_SSE)
8210 || (prefixes & PREFIX_LOCK)) {
8213 tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
8215 case 0xf0 ... 0xf7: /* mfence */
8216 if (!(s->cpuid_features & CPUID_SSE2)
8217 || (prefixes & PREFIX_LOCK)) {
8220 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
8228 case 0x10d: /* 3DNow! prefetch(w) */
8229 modrm = cpu_ldub_code(env, s->pc++);
8230 mod = (modrm >> 6) & 3;
8233 gen_nop_modrm(env, s, modrm);
8235 case 0x1aa: /* rsm */
8236 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
8237 if (!(s->flags & HF_SMM_MASK))
8239 gen_update_cc_op(s);
8240 gen_jmp_im(s->pc - s->cs_base);
8241 gen_helper_rsm(cpu_env);
8244 case 0x1b8: /* SSE4.2 popcnt */
8245 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
8248 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
8251 modrm = cpu_ldub_code(env, s->pc++);
8252 reg = ((modrm >> 3) & 7) | rex_r;
8254 if (s->prefix & PREFIX_DATA) {
8257 ot = mo_64_32(dflag);
8260 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
8261 gen_extu(ot, cpu_T0);
8262 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
8263 tcg_gen_ctpop_tl(cpu_T0, cpu_T0);
8264 gen_op_mov_reg_v(ot, reg, cpu_T0);
8266 set_cc_op(s, CC_OP_POPCNT);
8268 case 0x10e ... 0x10f:
8269 /* 3DNow! instructions, ignore prefixes */
8270 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
8271 case 0x110 ... 0x117:
8272 case 0x128 ... 0x12f:
8273 case 0x138 ... 0x13a:
8274 case 0x150 ... 0x179:
8275 case 0x17c ... 0x17f:
8277 case 0x1c4 ... 0x1c6:
8278 case 0x1d0 ... 0x1fe:
8279 gen_sse(env, s, b, pc_start, rex_r);
8286 gen_illegal_opcode(s);
8289 gen_unknown_opcode(env, s);
8293 void tcg_x86_init(void)
8295 static const char reg_names[CPU_NB_REGS][4] = {
8296 #ifdef TARGET_X86_64
8324 static const char seg_base_names[6][8] = {
8332 static const char bnd_regl_names[4][8] = {
8333 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
8335 static const char bnd_regu_names[4][8] = {
8336 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
8339 static bool initialized;
8346 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
8347 tcg_ctx.tcg_env = cpu_env;
8348 cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
8349 offsetof(CPUX86State, cc_op), "cc_op");
8350 cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst),
8352 cpu_cc_src = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src),
8354 cpu_cc_src2 = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src2),
8357 for (i = 0; i < CPU_NB_REGS; ++i) {
8358 cpu_regs[i] = tcg_global_mem_new(cpu_env,
8359 offsetof(CPUX86State, regs[i]),
8363 for (i = 0; i < 6; ++i) {
8365 = tcg_global_mem_new(cpu_env,
8366 offsetof(CPUX86State, segs[i].base),
8370 for (i = 0; i < 4; ++i) {
8372 = tcg_global_mem_new_i64(cpu_env,
8373 offsetof(CPUX86State, bnd_regs[i].lb),
8376 = tcg_global_mem_new_i64(cpu_env,
8377 offsetof(CPUX86State, bnd_regs[i].ub),
8382 static int i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu,
8385 DisasContext *dc = container_of(dcbase, DisasContext, base);
8386 CPUX86State *env = cpu->env_ptr;
8387 uint32_t flags = dc->base.tb->flags;
8388 target_ulong cs_base = dc->base.tb->cs_base;
8390 dc->pe = (flags >> HF_PE_SHIFT) & 1;
8391 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
8392 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
8393 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
8395 dc->vm86 = (flags >> VM_SHIFT) & 1;
8396 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
8397 dc->iopl = (flags >> IOPL_SHIFT) & 3;
8398 dc->tf = (flags >> TF_SHIFT) & 1;
8399 dc->cc_op = CC_OP_DYNAMIC;
8400 dc->cc_op_dirty = false;
8401 dc->cs_base = cs_base;
8402 dc->popl_esp_hack = 0;
8403 /* select memory access functions */
8405 #ifdef CONFIG_SOFTMMU
8406 dc->mem_index = cpu_mmu_index(env, false);
8408 dc->cpuid_features = env->features[FEAT_1_EDX];
8409 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
8410 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
8411 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
8412 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
8413 dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
8414 #ifdef TARGET_X86_64
8415 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
8416 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
8419 dc->jmp_opt = !(dc->tf || dc->base.singlestep_enabled ||
8420 (flags & HF_INHIBIT_IRQ_MASK));
8421 /* Do not optimize repz jumps at all in icount mode, because
8422 rep movsS instructions are execured with different paths
8423 in !repz_opt and repz_opt modes. The first one was used
8424 always except single step mode. And this setting
8425 disables jumps optimization and control paths become
8426 equivalent in run and single step modes.
8427 Now there will be no jump optimization for repz in
8428 record/replay modes and there will always be an
8429 additional step for ecx=0 when icount is enabled.
8431 dc->repz_opt = !dc->jmp_opt && !(dc->base.tb->cflags & CF_USE_ICOUNT);
8433 /* check addseg logic */
8434 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
8435 printf("ERROR addseg\n");
8438 cpu_T0 = tcg_temp_new();
8439 cpu_T1 = tcg_temp_new();
8440 cpu_A0 = tcg_temp_new();
8442 cpu_tmp0 = tcg_temp_new();
8443 cpu_tmp1_i64 = tcg_temp_new_i64();
8444 cpu_tmp2_i32 = tcg_temp_new_i32();
8445 cpu_tmp3_i32 = tcg_temp_new_i32();
8446 cpu_tmp4 = tcg_temp_new();
8447 cpu_ptr0 = tcg_temp_new_ptr();
8448 cpu_ptr1 = tcg_temp_new_ptr();
8449 cpu_cc_srcT = tcg_temp_local_new();
8454 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
8458 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
8460 DisasContext *dc = container_of(dcbase, DisasContext, base);
8462 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
8465 static bool i386_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
8466 const CPUBreakpoint *bp)
8468 DisasContext *dc = container_of(dcbase, DisasContext, base);
8469 /* If RF is set, suppress an internally generated breakpoint. */
8470 int flags = dc->base.tb->flags & HF_RF_MASK ? BP_GDB : BP_ANY;
8471 if (bp->flags & flags) {
8472 gen_debug(dc, dc->base.pc_next - dc->cs_base);
8473 dc->base.is_jmp = DISAS_NORETURN;
8474 /* The address covered by the breakpoint must be included in
8475 [tb->pc, tb->pc + tb->size) in order to for it to be
8476 properly cleared -- thus we increment the PC here so that
8477 the generic logic setting tb->size later does the right thing. */
8478 dc->base.pc_next += 1;
8485 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
8487 DisasContext *dc = container_of(dcbase, DisasContext, base);
8488 target_ulong pc_next = disas_insn(dc, cpu);
8490 if (dc->tf || (dc->base.tb->flags & HF_INHIBIT_IRQ_MASK)) {
8491 /* if single step mode, we generate only one instruction and
8492 generate an exception */
8493 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8494 the flag and abort the translation to give the irqs a
8496 dc->base.is_jmp = DISAS_TOO_MANY;
8497 } else if ((dc->base.tb->cflags & CF_USE_ICOUNT)
8498 && ((dc->base.pc_next & TARGET_PAGE_MASK)
8499 != ((dc->base.pc_next + TARGET_MAX_INSN_SIZE - 1)
8501 || (dc->base.pc_next & ~TARGET_PAGE_MASK) == 0)) {
8502 /* Do not cross the boundary of the pages in icount mode,
8503 it can cause an exception. Do it only when boundary is
8504 crossed by the first instruction in the block.
8505 If current instruction already crossed the bound - it's ok,
8506 because an exception hasn't stopped this code.
8508 dc->base.is_jmp = DISAS_TOO_MANY;
8509 } else if ((pc_next - dc->base.pc_first) >= (TARGET_PAGE_SIZE - 32)) {
8510 dc->base.is_jmp = DISAS_TOO_MANY;
8513 dc->base.pc_next = pc_next;
8516 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
8518 DisasContext *dc = container_of(dcbase, DisasContext, base);
8520 if (dc->base.is_jmp == DISAS_TOO_MANY) {
8521 gen_jmp_im(dc->base.pc_next - dc->cs_base);
8526 static void i386_tr_disas_log(const DisasContextBase *dcbase,
8529 DisasContext *dc = container_of(dcbase, DisasContext, base);
8530 int disas_flags = !dc->code32;
8532 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
8533 #ifdef TARGET_X86_64
8538 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size, disas_flags);
8541 static const TranslatorOps i386_tr_ops = {
8542 .init_disas_context = i386_tr_init_disas_context,
8543 .tb_start = i386_tr_tb_start,
8544 .insn_start = i386_tr_insn_start,
8545 .breakpoint_check = i386_tr_breakpoint_check,
8546 .translate_insn = i386_tr_translate_insn,
8547 .tb_stop = i386_tr_tb_stop,
8548 .disas_log = i386_tr_disas_log,
8551 /* generate intermediate code for basic block 'tb'. */
8552 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
8556 translator_loop(&i386_tr_ops, &dc.base, cpu, tb);
8559 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
8562 int cc_op = data[1];
8563 env->eip = data[0] - tb->cs_base;
8564 if (cc_op != CC_OP_DYNAMIC) {