4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40 #define PREFIX_VEX 0x20
43 #define CODE64(s) ((s)->code64)
44 #define REX_X(s) ((s)->rex_x)
45 #define REX_B(s) ((s)->rex_b)
60 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
61 #define CASE_MODRM_MEM_OP(OP) \
62 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
63 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
64 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
66 #define CASE_MODRM_OP(OP) \
67 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
68 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
69 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
70 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
72 //#define MACRO_TEST 1
74 /* global register indexes */
75 static TCGv_env cpu_env;
77 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
78 static TCGv_i32 cpu_cc_op;
79 static TCGv cpu_regs[CPU_NB_REGS];
80 static TCGv cpu_seg_base[6];
81 static TCGv_i64 cpu_bndl[4];
82 static TCGv_i64 cpu_bndu[4];
84 static TCGv cpu_T0, cpu_T1;
85 /* local register indexes (only used inside old micro ops) */
86 static TCGv cpu_tmp0, cpu_tmp4;
87 static TCGv_ptr cpu_ptr0, cpu_ptr1;
88 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
89 static TCGv_i64 cpu_tmp1_i64;
91 #include "exec/gen-icount.h"
94 static int x86_64_hregs;
97 typedef struct DisasContext {
98 /* current insn context */
99 int override; /* -1 if no override */
103 target_ulong pc_start;
104 target_ulong pc; /* pc = eip + cs_base */
105 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
106 static state change (stop translation) */
107 /* current block context */
108 target_ulong cs_base; /* base of CS segment */
109 int pe; /* protected mode */
110 int code32; /* 32 bit code segment */
112 int lma; /* long mode active */
113 int code64; /* 64 bit code segment */
116 int vex_l; /* vex vector length */
117 int vex_v; /* vex vvvv register, without 1's compliment. */
118 int ss32; /* 32 bit stack segment */
119 CCOp cc_op; /* current CC operation */
121 int addseg; /* non zero if either DS/ES/SS have a non zero base */
122 int f_st; /* currently unused */
123 int vm86; /* vm86 mode */
126 int tf; /* TF cpu flag */
127 int singlestep_enabled; /* "hardware" single step enabled */
128 int jmp_opt; /* use direct block chaining for direct jumps */
129 int repz_opt; /* optimize jumps within repz instructions */
130 int mem_index; /* select memory access functions */
131 uint64_t flags; /* all execution flags */
132 struct TranslationBlock *tb;
133 int popl_esp_hack; /* for correct popl with esp base handling */
134 int rip_offset; /* only used in x86_64, but left for simplicity */
136 int cpuid_ext_features;
137 int cpuid_ext2_features;
138 int cpuid_ext3_features;
139 int cpuid_7_0_ebx_features;
140 int cpuid_xsave_features;
143 static void gen_eob(DisasContext *s);
144 static void gen_jmp(DisasContext *s, target_ulong eip);
145 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
146 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d);
148 /* i386 arith/logic operations */
168 OP_SHL1, /* undocumented */
184 /* I386 int registers */
185 OR_EAX, /* MUST be even numbered */
194 OR_TMP0 = 16, /* temporary operand register */
196 OR_A0, /* temporary register used when doing address evaluation */
206 /* Bit set if the global variable is live after setting CC_OP to X. */
207 static const uint8_t cc_op_live[CC_OP_NB] = {
208 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
209 [CC_OP_EFLAGS] = USES_CC_SRC,
210 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
211 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
212 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
213 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
214 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
215 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
216 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
217 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
218 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
219 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
220 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
221 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
222 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
223 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
225 [CC_OP_POPCNT] = USES_CC_SRC,
228 static void set_cc_op(DisasContext *s, CCOp op)
232 if (s->cc_op == op) {
236 /* Discard CC computation that will no longer be used. */
237 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
238 if (dead & USES_CC_DST) {
239 tcg_gen_discard_tl(cpu_cc_dst);
241 if (dead & USES_CC_SRC) {
242 tcg_gen_discard_tl(cpu_cc_src);
244 if (dead & USES_CC_SRC2) {
245 tcg_gen_discard_tl(cpu_cc_src2);
247 if (dead & USES_CC_SRCT) {
248 tcg_gen_discard_tl(cpu_cc_srcT);
251 if (op == CC_OP_DYNAMIC) {
252 /* The DYNAMIC setting is translator only, and should never be
253 stored. Thus we always consider it clean. */
254 s->cc_op_dirty = false;
256 /* Discard any computed CC_OP value (see shifts). */
257 if (s->cc_op == CC_OP_DYNAMIC) {
258 tcg_gen_discard_i32(cpu_cc_op);
260 s->cc_op_dirty = true;
265 static void gen_update_cc_op(DisasContext *s)
267 if (s->cc_op_dirty) {
268 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
269 s->cc_op_dirty = false;
275 #define NB_OP_SIZES 4
277 #else /* !TARGET_X86_64 */
279 #define NB_OP_SIZES 3
281 #endif /* !TARGET_X86_64 */
283 #if defined(HOST_WORDS_BIGENDIAN)
284 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
285 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
286 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
287 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
288 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
290 #define REG_B_OFFSET 0
291 #define REG_H_OFFSET 1
292 #define REG_W_OFFSET 0
293 #define REG_L_OFFSET 0
294 #define REG_LH_OFFSET 4
297 /* In instruction encodings for byte register accesses the
298 * register number usually indicates "low 8 bits of register N";
299 * however there are some special cases where N 4..7 indicates
300 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
301 * true for this special case, false otherwise.
303 static inline bool byte_reg_is_xH(int reg)
309 if (reg >= 8 || x86_64_hregs) {
316 /* Select the size of a push/pop operation. */
317 static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
320 return ot == MO_16 ? MO_16 : MO_64;
326 /* Select the size of the stack pointer. */
327 static inline TCGMemOp mo_stacksize(DisasContext *s)
329 return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
332 /* Select only size 64 else 32. Used for SSE operand sizes. */
333 static inline TCGMemOp mo_64_32(TCGMemOp ot)
336 return ot == MO_64 ? MO_64 : MO_32;
342 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
343 byte vs word opcodes. */
344 static inline TCGMemOp mo_b_d(int b, TCGMemOp ot)
346 return b & 1 ? ot : MO_8;
349 /* Select size 8 if lsb of B is clear, else OT capped at 32.
350 Used for decoding operand size of port opcodes. */
351 static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot)
353 return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
356 static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGv t0)
360 if (!byte_reg_is_xH(reg)) {
361 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
363 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
367 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
370 /* For x86_64, this sets the higher half of register to zero.
371 For i386, this is equivalent to a mov. */
372 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
376 tcg_gen_mov_tl(cpu_regs[reg], t0);
384 static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg)
386 if (ot == MO_8 && byte_reg_is_xH(reg)) {
387 tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
389 tcg_gen_mov_tl(t0, cpu_regs[reg]);
393 static void gen_add_A0_im(DisasContext *s, int val)
395 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
397 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
401 static inline void gen_op_jmp_v(TCGv dest)
403 tcg_gen_st_tl(dest, cpu_env, offsetof(CPUX86State, eip));
406 static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val)
408 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
409 gen_op_mov_reg_v(size, reg, cpu_tmp0);
412 static inline void gen_op_add_reg_T0(TCGMemOp size, int reg)
414 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T0);
415 gen_op_mov_reg_v(size, reg, cpu_tmp0);
418 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
420 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
423 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
425 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
428 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
431 gen_op_st_v(s, idx, cpu_T0, cpu_A0);
433 gen_op_mov_reg_v(idx, d, cpu_T0);
437 static inline void gen_jmp_im(target_ulong pc)
439 tcg_gen_movi_tl(cpu_tmp0, pc);
440 gen_op_jmp_v(cpu_tmp0);
443 /* Compute SEG:REG into A0. SEG is selected from the override segment
444 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
445 indicate no override. */
446 static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0,
447 int def_seg, int ovr_seg)
453 tcg_gen_mov_tl(cpu_A0, a0);
460 if (ovr_seg < 0 && s->addseg) {
464 tcg_gen_ext32u_tl(cpu_A0, a0);
470 tcg_gen_ext16u_tl(cpu_A0, a0);
485 TCGv seg = cpu_seg_base[ovr_seg];
487 if (aflag == MO_64) {
488 tcg_gen_add_tl(cpu_A0, a0, seg);
489 } else if (CODE64(s)) {
490 tcg_gen_ext32u_tl(cpu_A0, a0);
491 tcg_gen_add_tl(cpu_A0, cpu_A0, seg);
493 tcg_gen_add_tl(cpu_A0, a0, seg);
494 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
499 static inline void gen_string_movl_A0_ESI(DisasContext *s)
501 gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
504 static inline void gen_string_movl_A0_EDI(DisasContext *s)
506 gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
509 static inline void gen_op_movl_T0_Dshift(TCGMemOp ot)
511 tcg_gen_ld32s_tl(cpu_T0, cpu_env, offsetof(CPUX86State, df));
512 tcg_gen_shli_tl(cpu_T0, cpu_T0, ot);
515 static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
520 tcg_gen_ext8s_tl(dst, src);
522 tcg_gen_ext8u_tl(dst, src);
527 tcg_gen_ext16s_tl(dst, src);
529 tcg_gen_ext16u_tl(dst, src);
535 tcg_gen_ext32s_tl(dst, src);
537 tcg_gen_ext32u_tl(dst, src);
546 static void gen_extu(TCGMemOp ot, TCGv reg)
548 gen_ext_tl(reg, reg, ot, false);
551 static void gen_exts(TCGMemOp ot, TCGv reg)
553 gen_ext_tl(reg, reg, ot, true);
556 static inline void gen_op_jnz_ecx(TCGMemOp size, TCGLabel *label1)
558 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
559 gen_extu(size, cpu_tmp0);
560 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
563 static inline void gen_op_jz_ecx(TCGMemOp size, TCGLabel *label1)
565 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
566 gen_extu(size, cpu_tmp0);
567 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
570 static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n)
574 gen_helper_inb(v, cpu_env, n);
577 gen_helper_inw(v, cpu_env, n);
580 gen_helper_inl(v, cpu_env, n);
587 static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
591 gen_helper_outb(cpu_env, v, n);
594 gen_helper_outw(cpu_env, v, n);
597 gen_helper_outl(cpu_env, v, n);
604 static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
607 target_ulong next_eip;
609 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
610 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
613 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
616 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
619 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
625 if(s->flags & HF_SVMI_MASK) {
628 svm_flags |= (1 << (4 + ot));
629 next_eip = s->pc - s->cs_base;
630 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
631 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
632 tcg_const_i32(svm_flags),
633 tcg_const_i32(next_eip - cur_eip));
637 static inline void gen_movs(DisasContext *s, TCGMemOp ot)
639 gen_string_movl_A0_ESI(s);
640 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
641 gen_string_movl_A0_EDI(s);
642 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
643 gen_op_movl_T0_Dshift(ot);
644 gen_op_add_reg_T0(s->aflag, R_ESI);
645 gen_op_add_reg_T0(s->aflag, R_EDI);
648 static void gen_op_update1_cc(void)
650 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
653 static void gen_op_update2_cc(void)
655 tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
656 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
659 static void gen_op_update3_cc(TCGv reg)
661 tcg_gen_mov_tl(cpu_cc_src2, reg);
662 tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
663 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
666 static inline void gen_op_testl_T0_T1_cc(void)
668 tcg_gen_and_tl(cpu_cc_dst, cpu_T0, cpu_T1);
671 static void gen_op_update_neg_cc(void)
673 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
674 tcg_gen_neg_tl(cpu_cc_src, cpu_T0);
675 tcg_gen_movi_tl(cpu_cc_srcT, 0);
678 /* compute all eflags to cc_src */
679 static void gen_compute_eflags(DisasContext *s)
681 TCGv zero, dst, src1, src2;
684 if (s->cc_op == CC_OP_EFLAGS) {
687 if (s->cc_op == CC_OP_CLR) {
688 tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
689 set_cc_op(s, CC_OP_EFLAGS);
698 /* Take care to not read values that are not live. */
699 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
700 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
702 zero = tcg_const_tl(0);
703 if (dead & USES_CC_DST) {
706 if (dead & USES_CC_SRC) {
709 if (dead & USES_CC_SRC2) {
715 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
716 set_cc_op(s, CC_OP_EFLAGS);
723 typedef struct CCPrepare {
733 /* compute eflags.C to reg */
734 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
740 case CC_OP_SUBB ... CC_OP_SUBQ:
741 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
742 size = s->cc_op - CC_OP_SUBB;
743 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
744 /* If no temporary was used, be careful not to alias t1 and t0. */
745 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
746 tcg_gen_mov_tl(t0, cpu_cc_srcT);
750 case CC_OP_ADDB ... CC_OP_ADDQ:
751 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
752 size = s->cc_op - CC_OP_ADDB;
753 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
754 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
756 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
757 .reg2 = t1, .mask = -1, .use_reg2 = true };
759 case CC_OP_LOGICB ... CC_OP_LOGICQ:
762 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
764 case CC_OP_INCB ... CC_OP_INCQ:
765 case CC_OP_DECB ... CC_OP_DECQ:
766 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
767 .mask = -1, .no_setcond = true };
769 case CC_OP_SHLB ... CC_OP_SHLQ:
770 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
771 size = s->cc_op - CC_OP_SHLB;
772 shift = (8 << size) - 1;
773 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
774 .mask = (target_ulong)1 << shift };
776 case CC_OP_MULB ... CC_OP_MULQ:
777 return (CCPrepare) { .cond = TCG_COND_NE,
778 .reg = cpu_cc_src, .mask = -1 };
780 case CC_OP_BMILGB ... CC_OP_BMILGQ:
781 size = s->cc_op - CC_OP_BMILGB;
782 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
783 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
787 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
788 .mask = -1, .no_setcond = true };
791 case CC_OP_SARB ... CC_OP_SARQ:
793 return (CCPrepare) { .cond = TCG_COND_NE,
794 .reg = cpu_cc_src, .mask = CC_C };
797 /* The need to compute only C from CC_OP_DYNAMIC is important
798 in efficiently implementing e.g. INC at the start of a TB. */
800 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
801 cpu_cc_src2, cpu_cc_op);
802 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
803 .mask = -1, .no_setcond = true };
807 /* compute eflags.P to reg */
808 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
810 gen_compute_eflags(s);
811 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
815 /* compute eflags.S to reg */
816 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
820 gen_compute_eflags(s);
826 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
830 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
833 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
834 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
835 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
840 /* compute eflags.O to reg */
841 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
846 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
847 .mask = -1, .no_setcond = true };
850 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
852 gen_compute_eflags(s);
853 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
858 /* compute eflags.Z to reg */
859 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
863 gen_compute_eflags(s);
869 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
872 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
874 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src,
878 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
879 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
880 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
885 /* perform a conditional store into register 'reg' according to jump opcode
886 value 'b'. In the fast case, T0 is guaranted not to be used. */
887 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
889 int inv, jcc_op, cond;
895 jcc_op = (b >> 1) & 7;
898 case CC_OP_SUBB ... CC_OP_SUBQ:
899 /* We optimize relational operators for the cmp/jcc case. */
900 size = s->cc_op - CC_OP_SUBB;
903 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
904 gen_extu(size, cpu_tmp4);
905 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
906 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
907 .reg2 = t0, .mask = -1, .use_reg2 = true };
916 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
917 gen_exts(size, cpu_tmp4);
918 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
919 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
920 .reg2 = t0, .mask = -1, .use_reg2 = true };
930 /* This actually generates good code for JC, JZ and JS. */
933 cc = gen_prepare_eflags_o(s, reg);
936 cc = gen_prepare_eflags_c(s, reg);
939 cc = gen_prepare_eflags_z(s, reg);
942 gen_compute_eflags(s);
943 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
944 .mask = CC_Z | CC_C };
947 cc = gen_prepare_eflags_s(s, reg);
950 cc = gen_prepare_eflags_p(s, reg);
953 gen_compute_eflags(s);
954 if (TCGV_EQUAL(reg, cpu_cc_src)) {
957 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
958 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
959 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
964 gen_compute_eflags(s);
965 if (TCGV_EQUAL(reg, cpu_cc_src)) {
968 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
969 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
970 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
971 .mask = CC_S | CC_Z };
978 cc.cond = tcg_invert_cond(cc.cond);
983 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
985 CCPrepare cc = gen_prepare_cc(s, b, reg);
988 if (cc.cond == TCG_COND_EQ) {
989 tcg_gen_xori_tl(reg, cc.reg, 1);
991 tcg_gen_mov_tl(reg, cc.reg);
996 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
997 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
998 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
999 tcg_gen_andi_tl(reg, reg, 1);
1002 if (cc.mask != -1) {
1003 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1007 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1009 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1013 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1015 gen_setcc1(s, JCC_B << 1, reg);
1018 /* generate a conditional jump to label 'l1' according to jump opcode
1019 value 'b'. In the fast case, T0 is guaranted not to be used. */
1020 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1022 CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
1024 if (cc.mask != -1) {
1025 tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
1029 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1031 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1035 /* Generate a conditional jump to label 'l1' according to jump opcode
1036 value 'b'. In the fast case, T0 is guaranted not to be used.
1037 A translation block must end soon. */
1038 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1040 CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
1042 gen_update_cc_op(s);
1043 if (cc.mask != -1) {
1044 tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
1047 set_cc_op(s, CC_OP_DYNAMIC);
1049 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1051 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1055 /* XXX: does not work with gdbstub "ice" single step - not a
1057 static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1059 TCGLabel *l1 = gen_new_label();
1060 TCGLabel *l2 = gen_new_label();
1061 gen_op_jnz_ecx(s->aflag, l1);
1063 gen_jmp_tb(s, next_eip, 1);
1068 static inline void gen_stos(DisasContext *s, TCGMemOp ot)
1070 gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
1071 gen_string_movl_A0_EDI(s);
1072 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
1073 gen_op_movl_T0_Dshift(ot);
1074 gen_op_add_reg_T0(s->aflag, R_EDI);
1077 static inline void gen_lods(DisasContext *s, TCGMemOp ot)
1079 gen_string_movl_A0_ESI(s);
1080 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1081 gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
1082 gen_op_movl_T0_Dshift(ot);
1083 gen_op_add_reg_T0(s->aflag, R_ESI);
1086 static inline void gen_scas(DisasContext *s, TCGMemOp ot)
1088 gen_string_movl_A0_EDI(s);
1089 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
1090 gen_op(s, OP_CMPL, ot, R_EAX);
1091 gen_op_movl_T0_Dshift(ot);
1092 gen_op_add_reg_T0(s->aflag, R_EDI);
1095 static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
1097 gen_string_movl_A0_EDI(s);
1098 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
1099 gen_string_movl_A0_ESI(s);
1100 gen_op(s, OP_CMPL, ot, OR_TMP0);
1101 gen_op_movl_T0_Dshift(ot);
1102 gen_op_add_reg_T0(s->aflag, R_ESI);
1103 gen_op_add_reg_T0(s->aflag, R_EDI);
1106 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1108 if (s->flags & HF_IOBPT_MASK) {
1109 TCGv_i32 t_size = tcg_const_i32(1 << ot);
1110 TCGv t_next = tcg_const_tl(s->pc - s->cs_base);
1112 gen_helper_bpt_io(cpu_env, t_port, t_size, t_next);
1113 tcg_temp_free_i32(t_size);
1114 tcg_temp_free(t_next);
1119 static inline void gen_ins(DisasContext *s, TCGMemOp ot)
1121 if (s->tb->cflags & CF_USE_ICOUNT) {
1124 gen_string_movl_A0_EDI(s);
1125 /* Note: we must do this dummy write first to be restartable in
1126 case of page fault. */
1127 tcg_gen_movi_tl(cpu_T0, 0);
1128 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
1129 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1130 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1131 gen_helper_in_func(ot, cpu_T0, cpu_tmp2_i32);
1132 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
1133 gen_op_movl_T0_Dshift(ot);
1134 gen_op_add_reg_T0(s->aflag, R_EDI);
1135 gen_bpt_io(s, cpu_tmp2_i32, ot);
1136 if (s->tb->cflags & CF_USE_ICOUNT) {
1141 static inline void gen_outs(DisasContext *s, TCGMemOp ot)
1143 if (s->tb->cflags & CF_USE_ICOUNT) {
1146 gen_string_movl_A0_ESI(s);
1147 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1149 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1150 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1151 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T0);
1152 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1153 gen_op_movl_T0_Dshift(ot);
1154 gen_op_add_reg_T0(s->aflag, R_ESI);
1155 gen_bpt_io(s, cpu_tmp2_i32, ot);
1156 if (s->tb->cflags & CF_USE_ICOUNT) {
1161 /* same method as Valgrind : we generate jumps to current or next
1163 #define GEN_REPZ(op) \
1164 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1165 target_ulong cur_eip, target_ulong next_eip) \
1168 gen_update_cc_op(s); \
1169 l2 = gen_jz_ecx_string(s, next_eip); \
1170 gen_ ## op(s, ot); \
1171 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1172 /* a loop would cause two single step exceptions if ECX = 1 \
1173 before rep string_insn */ \
1175 gen_op_jz_ecx(s->aflag, l2); \
1176 gen_jmp(s, cur_eip); \
1179 #define GEN_REPZ2(op) \
1180 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1181 target_ulong cur_eip, \
1182 target_ulong next_eip, \
1186 gen_update_cc_op(s); \
1187 l2 = gen_jz_ecx_string(s, next_eip); \
1188 gen_ ## op(s, ot); \
1189 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1190 gen_update_cc_op(s); \
1191 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1193 gen_op_jz_ecx(s->aflag, l2); \
1194 gen_jmp(s, cur_eip); \
1205 static void gen_helper_fp_arith_ST0_FT0(int op)
1209 gen_helper_fadd_ST0_FT0(cpu_env);
1212 gen_helper_fmul_ST0_FT0(cpu_env);
1215 gen_helper_fcom_ST0_FT0(cpu_env);
1218 gen_helper_fcom_ST0_FT0(cpu_env);
1221 gen_helper_fsub_ST0_FT0(cpu_env);
1224 gen_helper_fsubr_ST0_FT0(cpu_env);
1227 gen_helper_fdiv_ST0_FT0(cpu_env);
1230 gen_helper_fdivr_ST0_FT0(cpu_env);
1235 /* NOTE the exception in "r" op ordering */
1236 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1238 TCGv_i32 tmp = tcg_const_i32(opreg);
1241 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1244 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1247 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1250 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1253 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1256 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1261 /* if d == OR_TMP0, it means memory operand (address in A0) */
1262 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
1265 gen_op_mov_v_reg(ot, cpu_T0, d);
1266 } else if (!(s1->prefix & PREFIX_LOCK)) {
1267 gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
1271 gen_compute_eflags_c(s1, cpu_tmp4);
1272 if (s1->prefix & PREFIX_LOCK) {
1273 tcg_gen_add_tl(cpu_T0, cpu_tmp4, cpu_T1);
1274 tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
1275 s1->mem_index, ot | MO_LE);
1277 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
1278 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_tmp4);
1279 gen_op_st_rm_T0_A0(s1, ot, d);
1281 gen_op_update3_cc(cpu_tmp4);
1282 set_cc_op(s1, CC_OP_ADCB + ot);
1285 gen_compute_eflags_c(s1, cpu_tmp4);
1286 if (s1->prefix & PREFIX_LOCK) {
1287 tcg_gen_add_tl(cpu_T0, cpu_T1, cpu_tmp4);
1288 tcg_gen_neg_tl(cpu_T0, cpu_T0);
1289 tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
1290 s1->mem_index, ot | MO_LE);
1292 tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
1293 tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_tmp4);
1294 gen_op_st_rm_T0_A0(s1, ot, d);
1296 gen_op_update3_cc(cpu_tmp4);
1297 set_cc_op(s1, CC_OP_SBBB + ot);
1300 if (s1->prefix & PREFIX_LOCK) {
1301 tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
1302 s1->mem_index, ot | MO_LE);
1304 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
1305 gen_op_st_rm_T0_A0(s1, ot, d);
1307 gen_op_update2_cc();
1308 set_cc_op(s1, CC_OP_ADDB + ot);
1311 if (s1->prefix & PREFIX_LOCK) {
1312 tcg_gen_neg_tl(cpu_T0, cpu_T1);
1313 tcg_gen_atomic_fetch_add_tl(cpu_cc_srcT, cpu_A0, cpu_T0,
1314 s1->mem_index, ot | MO_LE);
1315 tcg_gen_sub_tl(cpu_T0, cpu_cc_srcT, cpu_T1);
1317 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
1318 tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
1319 gen_op_st_rm_T0_A0(s1, ot, d);
1321 gen_op_update2_cc();
1322 set_cc_op(s1, CC_OP_SUBB + ot);
1326 if (s1->prefix & PREFIX_LOCK) {
1327 tcg_gen_atomic_and_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
1328 s1->mem_index, ot | MO_LE);
1330 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
1331 gen_op_st_rm_T0_A0(s1, ot, d);
1333 gen_op_update1_cc();
1334 set_cc_op(s1, CC_OP_LOGICB + ot);
1337 if (s1->prefix & PREFIX_LOCK) {
1338 tcg_gen_atomic_or_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
1339 s1->mem_index, ot | MO_LE);
1341 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
1342 gen_op_st_rm_T0_A0(s1, ot, d);
1344 gen_op_update1_cc();
1345 set_cc_op(s1, CC_OP_LOGICB + ot);
1348 if (s1->prefix & PREFIX_LOCK) {
1349 tcg_gen_atomic_xor_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
1350 s1->mem_index, ot | MO_LE);
1352 tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_T1);
1353 gen_op_st_rm_T0_A0(s1, ot, d);
1355 gen_op_update1_cc();
1356 set_cc_op(s1, CC_OP_LOGICB + ot);
1359 tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
1360 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
1361 tcg_gen_sub_tl(cpu_cc_dst, cpu_T0, cpu_T1);
1362 set_cc_op(s1, CC_OP_SUBB + ot);
1367 /* if d == OR_TMP0, it means memory operand (address in A0) */
1368 static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
1370 if (s1->prefix & PREFIX_LOCK) {
1371 tcg_gen_movi_tl(cpu_T0, c > 0 ? 1 : -1);
1372 tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
1373 s1->mem_index, ot | MO_LE);
1376 gen_op_mov_v_reg(ot, cpu_T0, d);
1378 gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
1380 tcg_gen_addi_tl(cpu_T0, cpu_T0, (c > 0 ? 1 : -1));
1381 gen_op_st_rm_T0_A0(s1, ot, d);
1384 gen_compute_eflags_c(s1, cpu_cc_src);
1385 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
1386 set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot);
1389 static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
1390 TCGv shm1, TCGv count, bool is_right)
1392 TCGv_i32 z32, s32, oldop;
1395 /* Store the results into the CC variables. If we know that the
1396 variable must be dead, store unconditionally. Otherwise we'll
1397 need to not disrupt the current contents. */
1398 z_tl = tcg_const_tl(0);
1399 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1400 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1401 result, cpu_cc_dst);
1403 tcg_gen_mov_tl(cpu_cc_dst, result);
1405 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1406 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1409 tcg_gen_mov_tl(cpu_cc_src, shm1);
1411 tcg_temp_free(z_tl);
1413 /* Get the two potential CC_OP values into temporaries. */
1414 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1415 if (s->cc_op == CC_OP_DYNAMIC) {
1418 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1419 oldop = cpu_tmp3_i32;
1422 /* Conditionally store the CC_OP value. */
1423 z32 = tcg_const_i32(0);
1424 s32 = tcg_temp_new_i32();
1425 tcg_gen_trunc_tl_i32(s32, count);
1426 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1427 tcg_temp_free_i32(z32);
1428 tcg_temp_free_i32(s32);
1430 /* The CC_OP value is no longer predictable. */
1431 set_cc_op(s, CC_OP_DYNAMIC);
1434 static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1435 int is_right, int is_arith)
1437 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1440 if (op1 == OR_TMP0) {
1441 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1443 gen_op_mov_v_reg(ot, cpu_T0, op1);
1446 tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
1447 tcg_gen_subi_tl(cpu_tmp0, cpu_T1, 1);
1451 gen_exts(ot, cpu_T0);
1452 tcg_gen_sar_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1453 tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
1455 gen_extu(ot, cpu_T0);
1456 tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1457 tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
1460 tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1461 tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
1465 gen_op_st_rm_T0_A0(s, ot, op1);
1467 gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, cpu_T1, is_right);
1470 static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1471 int is_right, int is_arith)
1473 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1477 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1479 gen_op_mov_v_reg(ot, cpu_T0, op1);
1485 gen_exts(ot, cpu_T0);
1486 tcg_gen_sari_tl(cpu_tmp4, cpu_T0, op2 - 1);
1487 tcg_gen_sari_tl(cpu_T0, cpu_T0, op2);
1489 gen_extu(ot, cpu_T0);
1490 tcg_gen_shri_tl(cpu_tmp4, cpu_T0, op2 - 1);
1491 tcg_gen_shri_tl(cpu_T0, cpu_T0, op2);
1494 tcg_gen_shli_tl(cpu_tmp4, cpu_T0, op2 - 1);
1495 tcg_gen_shli_tl(cpu_T0, cpu_T0, op2);
1500 gen_op_st_rm_T0_A0(s, ot, op1);
1502 /* update eflags if non zero shift */
1504 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1505 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
1506 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1510 static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
1512 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1516 if (op1 == OR_TMP0) {
1517 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1519 gen_op_mov_v_reg(ot, cpu_T0, op1);
1522 tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
1526 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1527 tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
1528 tcg_gen_muli_tl(cpu_T0, cpu_T0, 0x01010101);
1531 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1532 tcg_gen_deposit_tl(cpu_T0, cpu_T0, cpu_T0, 16, 16);
1535 #ifdef TARGET_X86_64
1537 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
1538 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
1540 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1542 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1544 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
1549 tcg_gen_rotr_tl(cpu_T0, cpu_T0, cpu_T1);
1551 tcg_gen_rotl_tl(cpu_T0, cpu_T0, cpu_T1);
1557 gen_op_st_rm_T0_A0(s, ot, op1);
1559 /* We'll need the flags computed into CC_SRC. */
1560 gen_compute_eflags(s);
1562 /* The value that was "rotated out" is now present at the other end
1563 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1564 since we've computed the flags into CC_SRC, these variables are
1567 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
1568 tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
1569 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1571 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
1572 tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
1574 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1575 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1577 /* Now conditionally store the new CC_OP value. If the shift count
1578 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1579 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1580 exactly as we computed above. */
1581 t0 = tcg_const_i32(0);
1582 t1 = tcg_temp_new_i32();
1583 tcg_gen_trunc_tl_i32(t1, cpu_T1);
1584 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1585 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1586 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1587 cpu_tmp2_i32, cpu_tmp3_i32);
1588 tcg_temp_free_i32(t0);
1589 tcg_temp_free_i32(t1);
1591 /* The CC_OP value is no longer predictable. */
1592 set_cc_op(s, CC_OP_DYNAMIC);
1595 static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1598 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1602 if (op1 == OR_TMP0) {
1603 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1605 gen_op_mov_v_reg(ot, cpu_T0, op1);
1611 #ifdef TARGET_X86_64
1613 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
1615 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1617 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1619 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
1624 tcg_gen_rotri_tl(cpu_T0, cpu_T0, op2);
1626 tcg_gen_rotli_tl(cpu_T0, cpu_T0, op2);
1637 shift = mask + 1 - shift;
1639 gen_extu(ot, cpu_T0);
1640 tcg_gen_shli_tl(cpu_tmp0, cpu_T0, shift);
1641 tcg_gen_shri_tl(cpu_T0, cpu_T0, mask + 1 - shift);
1642 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
1648 gen_op_st_rm_T0_A0(s, ot, op1);
1651 /* Compute the flags into CC_SRC. */
1652 gen_compute_eflags(s);
1654 /* The value that was "rotated out" is now present at the other end
1655 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1656 since we've computed the flags into CC_SRC, these variables are
1659 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
1660 tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
1661 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1663 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
1664 tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
1666 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1667 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1668 set_cc_op(s, CC_OP_ADCOX);
1672 /* XXX: add faster immediate = 1 case */
1673 static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1676 gen_compute_eflags(s);
1677 assert(s->cc_op == CC_OP_EFLAGS);
1681 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1683 gen_op_mov_v_reg(ot, cpu_T0, op1);
1688 gen_helper_rcrb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1691 gen_helper_rcrw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1694 gen_helper_rcrl(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1696 #ifdef TARGET_X86_64
1698 gen_helper_rcrq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1707 gen_helper_rclb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1710 gen_helper_rclw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1713 gen_helper_rcll(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1715 #ifdef TARGET_X86_64
1717 gen_helper_rclq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1725 gen_op_st_rm_T0_A0(s, ot, op1);
1728 /* XXX: add faster immediate case */
1729 static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1730 bool is_right, TCGv count_in)
1732 target_ulong mask = (ot == MO_64 ? 63 : 31);
1736 if (op1 == OR_TMP0) {
1737 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1739 gen_op_mov_v_reg(ot, cpu_T0, op1);
1742 count = tcg_temp_new();
1743 tcg_gen_andi_tl(count, count_in, mask);
1747 /* Note: we implement the Intel behaviour for shift count > 16.
1748 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1749 portion by constructing it as a 32-bit value. */
1751 tcg_gen_deposit_tl(cpu_tmp0, cpu_T0, cpu_T1, 16, 16);
1752 tcg_gen_mov_tl(cpu_T1, cpu_T0);
1753 tcg_gen_mov_tl(cpu_T0, cpu_tmp0);
1755 tcg_gen_deposit_tl(cpu_T1, cpu_T0, cpu_T1, 16, 16);
1758 #ifdef TARGET_X86_64
1760 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1761 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1763 tcg_gen_concat_tl_i64(cpu_T0, cpu_T0, cpu_T1);
1764 tcg_gen_shr_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
1765 tcg_gen_shr_i64(cpu_T0, cpu_T0, count);
1767 tcg_gen_concat_tl_i64(cpu_T0, cpu_T1, cpu_T0);
1768 tcg_gen_shl_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
1769 tcg_gen_shl_i64(cpu_T0, cpu_T0, count);
1770 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1771 tcg_gen_shri_i64(cpu_T0, cpu_T0, 32);
1776 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1778 tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1780 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1781 tcg_gen_shr_tl(cpu_T0, cpu_T0, count);
1782 tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_tmp4);
1784 tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1786 /* Only needed if count > 16, for Intel behaviour. */
1787 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
1788 tcg_gen_shr_tl(cpu_tmp4, cpu_T1, cpu_tmp4);
1789 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
1792 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1793 tcg_gen_shl_tl(cpu_T0, cpu_T0, count);
1794 tcg_gen_shr_tl(cpu_T1, cpu_T1, cpu_tmp4);
1796 tcg_gen_movi_tl(cpu_tmp4, 0);
1797 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T1, count, cpu_tmp4,
1799 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
1804 gen_op_st_rm_T0_A0(s, ot, op1);
1806 gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, count, is_right);
1807 tcg_temp_free(count);
1810 static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
1813 gen_op_mov_v_reg(ot, cpu_T1, s);
1816 gen_rot_rm_T1(s1, ot, d, 0);
1819 gen_rot_rm_T1(s1, ot, d, 1);
1823 gen_shift_rm_T1(s1, ot, d, 0, 0);
1826 gen_shift_rm_T1(s1, ot, d, 1, 0);
1829 gen_shift_rm_T1(s1, ot, d, 1, 1);
1832 gen_rotc_rm_T1(s1, ot, d, 0);
1835 gen_rotc_rm_T1(s1, ot, d, 1);
1840 static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
1844 gen_rot_rm_im(s1, ot, d, c, 0);
1847 gen_rot_rm_im(s1, ot, d, c, 1);
1851 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1854 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1857 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1860 /* currently not optimized */
1861 tcg_gen_movi_tl(cpu_T1, c);
1862 gen_shift(s1, op, ot, d, OR_TMP1);
1867 /* Decompose an address. */
1869 typedef struct AddressParts {
1877 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1880 int def_seg, base, index, scale, mod, rm;
1889 mod = (modrm >> 6) & 3;
1891 base = rm | REX_B(s);
1894 /* Normally filtered out earlier, but including this path
1895 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
1904 int code = cpu_ldub_code(env, s->pc++);
1905 scale = (code >> 6) & 3;
1906 index = ((code >> 3) & 7) | REX_X(s);
1908 index = -1; /* no index */
1910 base = (code & 7) | REX_B(s);
1916 if ((base & 7) == 5) {
1918 disp = (int32_t)cpu_ldl_code(env, s->pc);
1920 if (CODE64(s) && !havesib) {
1922 disp += s->pc + s->rip_offset;
1927 disp = (int8_t)cpu_ldub_code(env, s->pc++);
1931 disp = (int32_t)cpu_ldl_code(env, s->pc);
1936 /* For correct popl handling with esp. */
1937 if (base == R_ESP && s->popl_esp_hack) {
1938 disp += s->popl_esp_hack;
1940 if (base == R_EBP || base == R_ESP) {
1949 disp = cpu_lduw_code(env, s->pc);
1953 } else if (mod == 1) {
1954 disp = (int8_t)cpu_ldub_code(env, s->pc++);
1956 disp = (int16_t)cpu_lduw_code(env, s->pc);
2001 return (AddressParts){ def_seg, base, index, scale, disp };
2004 /* Compute the address, with a minimum number of TCG ops. */
2005 static TCGv gen_lea_modrm_1(AddressParts a)
2012 ea = cpu_regs[a.index];
2014 tcg_gen_shli_tl(cpu_A0, cpu_regs[a.index], a.scale);
2018 tcg_gen_add_tl(cpu_A0, ea, cpu_regs[a.base]);
2021 } else if (a.base >= 0) {
2022 ea = cpu_regs[a.base];
2024 if (TCGV_IS_UNUSED(ea)) {
2025 tcg_gen_movi_tl(cpu_A0, a.disp);
2027 } else if (a.disp != 0) {
2028 tcg_gen_addi_tl(cpu_A0, ea, a.disp);
2035 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
2037 AddressParts a = gen_lea_modrm_0(env, s, modrm);
2038 TCGv ea = gen_lea_modrm_1(a);
2039 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
2042 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2044 (void)gen_lea_modrm_0(env, s, modrm);
2047 /* Used for BNDCL, BNDCU, BNDCN. */
2048 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
2049 TCGCond cond, TCGv_i64 bndv)
2051 TCGv ea = gen_lea_modrm_1(gen_lea_modrm_0(env, s, modrm));
2053 tcg_gen_extu_tl_i64(cpu_tmp1_i64, ea);
2055 tcg_gen_ext32u_i64(cpu_tmp1_i64, cpu_tmp1_i64);
2057 tcg_gen_setcond_i64(cond, cpu_tmp1_i64, cpu_tmp1_i64, bndv);
2058 tcg_gen_extrl_i64_i32(cpu_tmp2_i32, cpu_tmp1_i64);
2059 gen_helper_bndck(cpu_env, cpu_tmp2_i32);
2062 /* used for LEA and MOV AX, mem */
2063 static void gen_add_A0_ds_seg(DisasContext *s)
2065 gen_lea_v_seg(s, s->aflag, cpu_A0, R_DS, s->override);
2068 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2070 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2071 TCGMemOp ot, int reg, int is_store)
2075 mod = (modrm >> 6) & 3;
2076 rm = (modrm & 7) | REX_B(s);
2080 gen_op_mov_v_reg(ot, cpu_T0, reg);
2081 gen_op_mov_reg_v(ot, rm, cpu_T0);
2083 gen_op_mov_v_reg(ot, cpu_T0, rm);
2085 gen_op_mov_reg_v(ot, reg, cpu_T0);
2088 gen_lea_modrm(env, s, modrm);
2091 gen_op_mov_v_reg(ot, cpu_T0, reg);
2092 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
2094 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
2096 gen_op_mov_reg_v(ot, reg, cpu_T0);
2101 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
2107 ret = cpu_ldub_code(env, s->pc);
2111 ret = cpu_lduw_code(env, s->pc);
2115 #ifdef TARGET_X86_64
2118 ret = cpu_ldl_code(env, s->pc);
2127 static inline int insn_const_size(TCGMemOp ot)
2136 static inline bool use_goto_tb(DisasContext *s, target_ulong pc)
2138 #ifndef CONFIG_USER_ONLY
2139 return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
2140 (pc & TARGET_PAGE_MASK) == (s->pc_start & TARGET_PAGE_MASK);
2146 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2148 target_ulong pc = s->cs_base + eip;
2150 if (use_goto_tb(s, pc)) {
2151 /* jump to same page: we can use a direct jump */
2152 tcg_gen_goto_tb(tb_num);
2154 tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
2156 /* jump to another page: currently not optimized */
2162 static inline void gen_jcc(DisasContext *s, int b,
2163 target_ulong val, target_ulong next_eip)
2168 l1 = gen_new_label();
2171 gen_goto_tb(s, 0, next_eip);
2174 gen_goto_tb(s, 1, val);
2175 s->is_jmp = DISAS_TB_JUMP;
2177 l1 = gen_new_label();
2178 l2 = gen_new_label();
2181 gen_jmp_im(next_eip);
2191 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
2196 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2198 cc = gen_prepare_cc(s, b, cpu_T1);
2199 if (cc.mask != -1) {
2200 TCGv t0 = tcg_temp_new();
2201 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2205 cc.reg2 = tcg_const_tl(cc.imm);
2208 tcg_gen_movcond_tl(cc.cond, cpu_T0, cc.reg, cc.reg2,
2209 cpu_T0, cpu_regs[reg]);
2210 gen_op_mov_reg_v(ot, reg, cpu_T0);
2212 if (cc.mask != -1) {
2213 tcg_temp_free(cc.reg);
2216 tcg_temp_free(cc.reg2);
2220 static inline void gen_op_movl_T0_seg(int seg_reg)
2222 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
2223 offsetof(CPUX86State,segs[seg_reg].selector));
2226 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2228 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
2229 tcg_gen_st32_tl(cpu_T0, cpu_env,
2230 offsetof(CPUX86State,segs[seg_reg].selector));
2231 tcg_gen_shli_tl(cpu_seg_base[seg_reg], cpu_T0, 4);
2234 /* move T0 to seg_reg and compute if the CPU state may change. Never
2235 call this function with seg_reg == R_CS */
2236 static void gen_movl_seg_T0(DisasContext *s, int seg_reg)
2238 if (s->pe && !s->vm86) {
2239 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
2240 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2241 /* abort translation because the addseg value may change or
2242 because ss32 may change. For R_SS, translation must always
2243 stop as a special handling must be done to disable hardware
2244 interrupts for the next instruction */
2245 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2246 s->is_jmp = DISAS_TB_JUMP;
2248 gen_op_movl_seg_T0_vm(seg_reg);
2249 if (seg_reg == R_SS)
2250 s->is_jmp = DISAS_TB_JUMP;
2254 static inline int svm_is_rep(int prefixes)
2256 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2260 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2261 uint32_t type, uint64_t param)
2263 /* no SVM activated; fast case */
2264 if (likely(!(s->flags & HF_SVMI_MASK)))
2266 gen_update_cc_op(s);
2267 gen_jmp_im(pc_start - s->cs_base);
2268 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
2269 tcg_const_i64(param));
2273 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2275 gen_svm_check_intercept_param(s, pc_start, type, 0);
2278 static inline void gen_stack_update(DisasContext *s, int addend)
2280 gen_op_add_reg_im(mo_stacksize(s), R_ESP, addend);
2283 /* Generate a push. It depends on ss32, addseg and dflag. */
2284 static void gen_push_v(DisasContext *s, TCGv val)
2286 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2287 TCGMemOp a_ot = mo_stacksize(s);
2288 int size = 1 << d_ot;
2289 TCGv new_esp = cpu_A0;
2291 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_ESP], size);
2296 tcg_gen_mov_tl(new_esp, cpu_A0);
2298 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2301 gen_op_st_v(s, d_ot, val, cpu_A0);
2302 gen_op_mov_reg_v(a_ot, R_ESP, new_esp);
2305 /* two step pop is necessary for precise exceptions */
2306 static TCGMemOp gen_pop_T0(DisasContext *s)
2308 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2310 gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1);
2311 gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
2316 static inline void gen_pop_update(DisasContext *s, TCGMemOp ot)
2318 gen_stack_update(s, 1 << ot);
2321 static inline void gen_stack_A0(DisasContext *s)
2323 gen_lea_v_seg(s, s->ss32 ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
2326 static void gen_pusha(DisasContext *s)
2328 TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
2329 TCGMemOp d_ot = s->dflag;
2330 int size = 1 << d_ot;
2333 for (i = 0; i < 8; i++) {
2334 tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], (i - 8) * size);
2335 gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
2336 gen_op_st_v(s, d_ot, cpu_regs[7 - i], cpu_A0);
2339 gen_stack_update(s, -8 * size);
2342 static void gen_popa(DisasContext *s)
2344 TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
2345 TCGMemOp d_ot = s->dflag;
2346 int size = 1 << d_ot;
2349 for (i = 0; i < 8; i++) {
2350 /* ESP is not reloaded */
2351 if (7 - i == R_ESP) {
2354 tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], i * size);
2355 gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
2356 gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
2357 gen_op_mov_reg_v(d_ot, 7 - i, cpu_T0);
2360 gen_stack_update(s, 8 * size);
2363 static void gen_enter(DisasContext *s, int esp_addend, int level)
2365 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2366 TCGMemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
2367 int size = 1 << d_ot;
2369 /* Push BP; compute FrameTemp into T1. */
2370 tcg_gen_subi_tl(cpu_T1, cpu_regs[R_ESP], size);
2371 gen_lea_v_seg(s, a_ot, cpu_T1, R_SS, -1);
2372 gen_op_st_v(s, d_ot, cpu_regs[R_EBP], cpu_A0);
2378 /* Copy level-1 pointers from the previous frame. */
2379 for (i = 1; i < level; ++i) {
2380 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_EBP], size * i);
2381 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2382 gen_op_ld_v(s, d_ot, cpu_tmp0, cpu_A0);
2384 tcg_gen_subi_tl(cpu_A0, cpu_T1, size * i);
2385 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2386 gen_op_st_v(s, d_ot, cpu_tmp0, cpu_A0);
2389 /* Push the current FrameTemp as the last level. */
2390 tcg_gen_subi_tl(cpu_A0, cpu_T1, size * level);
2391 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2392 gen_op_st_v(s, d_ot, cpu_T1, cpu_A0);
2395 /* Copy the FrameTemp value to EBP. */
2396 gen_op_mov_reg_v(a_ot, R_EBP, cpu_T1);
2398 /* Compute the final value of ESP. */
2399 tcg_gen_subi_tl(cpu_T1, cpu_T1, esp_addend + size * level);
2400 gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
2403 static void gen_leave(DisasContext *s)
2405 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2406 TCGMemOp a_ot = mo_stacksize(s);
2408 gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
2409 gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
2411 tcg_gen_addi_tl(cpu_T1, cpu_regs[R_EBP], 1 << d_ot);
2413 gen_op_mov_reg_v(d_ot, R_EBP, cpu_T0);
2414 gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
2417 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2419 gen_update_cc_op(s);
2420 gen_jmp_im(cur_eip);
2421 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2422 s->is_jmp = DISAS_TB_JUMP;
2425 /* Generate #UD for the current instruction. The assumption here is that
2426 the instruction is known, but it isn't allowed in the current cpu mode. */
2427 static void gen_illegal_opcode(DisasContext *s)
2429 gen_exception(s, EXCP06_ILLOP, s->pc_start - s->cs_base);
2432 /* Similarly, except that the assumption here is that we don't decode
2433 the instruction at all -- either a missing opcode, an unimplemented
2434 feature, or just a bogus instruction stream. */
2435 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2437 gen_illegal_opcode(s);
2439 if (qemu_loglevel_mask(LOG_UNIMP)) {
2440 target_ulong pc = s->pc_start, end = s->pc;
2442 qemu_log("ILLOPC: " TARGET_FMT_lx ":", pc);
2443 for (; pc < end; ++pc) {
2444 qemu_log(" %02x", cpu_ldub_code(env, pc));
2451 /* an interrupt is different from an exception because of the
2453 static void gen_interrupt(DisasContext *s, int intno,
2454 target_ulong cur_eip, target_ulong next_eip)
2456 gen_update_cc_op(s);
2457 gen_jmp_im(cur_eip);
2458 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2459 tcg_const_i32(next_eip - cur_eip));
2460 s->is_jmp = DISAS_TB_JUMP;
2463 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2465 gen_update_cc_op(s);
2466 gen_jmp_im(cur_eip);
2467 gen_helper_debug(cpu_env);
2468 s->is_jmp = DISAS_TB_JUMP;
2471 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2473 if ((s->flags & mask) == 0) {
2474 TCGv_i32 t = tcg_temp_new_i32();
2475 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2476 tcg_gen_ori_i32(t, t, mask);
2477 tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2478 tcg_temp_free_i32(t);
2483 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2485 if (s->flags & mask) {
2486 TCGv_i32 t = tcg_temp_new_i32();
2487 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2488 tcg_gen_andi_i32(t, t, ~mask);
2489 tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2490 tcg_temp_free_i32(t);
2495 /* Clear BND registers during legacy branches. */
2496 static void gen_bnd_jmp(DisasContext *s)
2498 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2499 and if the BNDREGs are known to be in use (non-zero) already.
2500 The helper itself will check BNDPRESERVE at runtime. */
2501 if ((s->prefix & PREFIX_REPNZ) == 0
2502 && (s->flags & HF_MPX_EN_MASK) != 0
2503 && (s->flags & HF_MPX_IU_MASK) != 0) {
2504 gen_helper_bnd_jmp(cpu_env);
2508 /* Generate an end of block. Trace exception is also generated if needed.
2509 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2510 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2511 S->TF. This is used by the syscall/sysret insns. */
2512 static void gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
2514 gen_update_cc_op(s);
2516 /* If several instructions disable interrupts, only the first does it. */
2517 if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
2518 gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2520 gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2523 if (s->tb->flags & HF_RF_MASK) {
2524 gen_helper_reset_rf(cpu_env);
2526 if (s->singlestep_enabled) {
2527 gen_helper_debug(cpu_env);
2528 } else if (recheck_tf) {
2529 gen_helper_rechecking_single_step(cpu_env);
2532 gen_helper_single_step(cpu_env);
2536 s->is_jmp = DISAS_TB_JUMP;
2540 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2541 static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
2543 gen_eob_worker(s, inhibit, false);
2546 /* End of block, resetting the inhibit irq flag. */
2547 static void gen_eob(DisasContext *s)
2549 gen_eob_worker(s, false, false);
2552 /* generate a jump to eip. No segment change must happen before as a
2553 direct call to the next block may occur */
2554 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2556 gen_update_cc_op(s);
2557 set_cc_op(s, CC_OP_DYNAMIC);
2559 gen_goto_tb(s, tb_num, eip);
2560 s->is_jmp = DISAS_TB_JUMP;
2567 static void gen_jmp(DisasContext *s, target_ulong eip)
2569 gen_jmp_tb(s, eip, 0);
2572 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2574 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2575 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2578 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2580 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2581 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2584 static inline void gen_ldo_env_A0(DisasContext *s, int offset)
2586 int mem_index = s->mem_index;
2587 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2588 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
2589 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2590 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2591 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
2594 static inline void gen_sto_env_A0(DisasContext *s, int offset)
2596 int mem_index = s->mem_index;
2597 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
2598 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2599 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2600 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
2601 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2604 static inline void gen_op_movo(int d_offset, int s_offset)
2606 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(0)));
2607 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(0)));
2608 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(1)));
2609 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(1)));
2612 static inline void gen_op_movq(int d_offset, int s_offset)
2614 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2615 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2618 static inline void gen_op_movl(int d_offset, int s_offset)
2620 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2621 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2624 static inline void gen_op_movq_env_0(int d_offset)
2626 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2627 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2630 typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2631 typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2632 typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2633 typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2634 typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2635 typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2637 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2638 typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2641 #define SSE_SPECIAL ((void *)1)
2642 #define SSE_DUMMY ((void *)2)
2644 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2645 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2646 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2648 static const SSEFunc_0_epp sse_op_table1[256][4] = {
2649 /* 3DNow! extensions */
2650 [0x0e] = { SSE_DUMMY }, /* femms */
2651 [0x0f] = { SSE_DUMMY }, /* pf... */
2652 /* pure SSE operations */
2653 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2654 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2655 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2656 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2657 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2658 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2659 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2660 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2662 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2663 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2664 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2665 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2666 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2667 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2668 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2669 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2670 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2671 [0x51] = SSE_FOP(sqrt),
2672 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2673 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2674 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2675 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2676 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2677 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2678 [0x58] = SSE_FOP(add),
2679 [0x59] = SSE_FOP(mul),
2680 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2681 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2682 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2683 [0x5c] = SSE_FOP(sub),
2684 [0x5d] = SSE_FOP(min),
2685 [0x5e] = SSE_FOP(div),
2686 [0x5f] = SSE_FOP(max),
2688 [0xc2] = SSE_FOP(cmpeq),
2689 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2690 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
2692 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2693 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2694 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2696 /* MMX ops and their SSE extensions */
2697 [0x60] = MMX_OP2(punpcklbw),
2698 [0x61] = MMX_OP2(punpcklwd),
2699 [0x62] = MMX_OP2(punpckldq),
2700 [0x63] = MMX_OP2(packsswb),
2701 [0x64] = MMX_OP2(pcmpgtb),
2702 [0x65] = MMX_OP2(pcmpgtw),
2703 [0x66] = MMX_OP2(pcmpgtl),
2704 [0x67] = MMX_OP2(packuswb),
2705 [0x68] = MMX_OP2(punpckhbw),
2706 [0x69] = MMX_OP2(punpckhwd),
2707 [0x6a] = MMX_OP2(punpckhdq),
2708 [0x6b] = MMX_OP2(packssdw),
2709 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2710 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2711 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2712 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2713 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2714 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2715 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2716 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2717 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2718 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2719 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2720 [0x74] = MMX_OP2(pcmpeqb),
2721 [0x75] = MMX_OP2(pcmpeqw),
2722 [0x76] = MMX_OP2(pcmpeql),
2723 [0x77] = { SSE_DUMMY }, /* emms */
2724 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2725 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2726 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2727 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2728 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2729 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2730 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2731 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2732 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2733 [0xd1] = MMX_OP2(psrlw),
2734 [0xd2] = MMX_OP2(psrld),
2735 [0xd3] = MMX_OP2(psrlq),
2736 [0xd4] = MMX_OP2(paddq),
2737 [0xd5] = MMX_OP2(pmullw),
2738 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2739 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2740 [0xd8] = MMX_OP2(psubusb),
2741 [0xd9] = MMX_OP2(psubusw),
2742 [0xda] = MMX_OP2(pminub),
2743 [0xdb] = MMX_OP2(pand),
2744 [0xdc] = MMX_OP2(paddusb),
2745 [0xdd] = MMX_OP2(paddusw),
2746 [0xde] = MMX_OP2(pmaxub),
2747 [0xdf] = MMX_OP2(pandn),
2748 [0xe0] = MMX_OP2(pavgb),
2749 [0xe1] = MMX_OP2(psraw),
2750 [0xe2] = MMX_OP2(psrad),
2751 [0xe3] = MMX_OP2(pavgw),
2752 [0xe4] = MMX_OP2(pmulhuw),
2753 [0xe5] = MMX_OP2(pmulhw),
2754 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2755 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2756 [0xe8] = MMX_OP2(psubsb),
2757 [0xe9] = MMX_OP2(psubsw),
2758 [0xea] = MMX_OP2(pminsw),
2759 [0xeb] = MMX_OP2(por),
2760 [0xec] = MMX_OP2(paddsb),
2761 [0xed] = MMX_OP2(paddsw),
2762 [0xee] = MMX_OP2(pmaxsw),
2763 [0xef] = MMX_OP2(pxor),
2764 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2765 [0xf1] = MMX_OP2(psllw),
2766 [0xf2] = MMX_OP2(pslld),
2767 [0xf3] = MMX_OP2(psllq),
2768 [0xf4] = MMX_OP2(pmuludq),
2769 [0xf5] = MMX_OP2(pmaddwd),
2770 [0xf6] = MMX_OP2(psadbw),
2771 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2772 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
2773 [0xf8] = MMX_OP2(psubb),
2774 [0xf9] = MMX_OP2(psubw),
2775 [0xfa] = MMX_OP2(psubl),
2776 [0xfb] = MMX_OP2(psubq),
2777 [0xfc] = MMX_OP2(paddb),
2778 [0xfd] = MMX_OP2(paddw),
2779 [0xfe] = MMX_OP2(paddl),
2782 static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
2783 [0 + 2] = MMX_OP2(psrlw),
2784 [0 + 4] = MMX_OP2(psraw),
2785 [0 + 6] = MMX_OP2(psllw),
2786 [8 + 2] = MMX_OP2(psrld),
2787 [8 + 4] = MMX_OP2(psrad),
2788 [8 + 6] = MMX_OP2(pslld),
2789 [16 + 2] = MMX_OP2(psrlq),
2790 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2791 [16 + 6] = MMX_OP2(psllq),
2792 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2795 static const SSEFunc_0_epi sse_op_table3ai[] = {
2796 gen_helper_cvtsi2ss,
2800 #ifdef TARGET_X86_64
2801 static const SSEFunc_0_epl sse_op_table3aq[] = {
2802 gen_helper_cvtsq2ss,
2807 static const SSEFunc_i_ep sse_op_table3bi[] = {
2808 gen_helper_cvttss2si,
2809 gen_helper_cvtss2si,
2810 gen_helper_cvttsd2si,
2814 #ifdef TARGET_X86_64
2815 static const SSEFunc_l_ep sse_op_table3bq[] = {
2816 gen_helper_cvttss2sq,
2817 gen_helper_cvtss2sq,
2818 gen_helper_cvttsd2sq,
2823 static const SSEFunc_0_epp sse_op_table4[8][4] = {
2834 static const SSEFunc_0_epp sse_op_table5[256] = {
2835 [0x0c] = gen_helper_pi2fw,
2836 [0x0d] = gen_helper_pi2fd,
2837 [0x1c] = gen_helper_pf2iw,
2838 [0x1d] = gen_helper_pf2id,
2839 [0x8a] = gen_helper_pfnacc,
2840 [0x8e] = gen_helper_pfpnacc,
2841 [0x90] = gen_helper_pfcmpge,
2842 [0x94] = gen_helper_pfmin,
2843 [0x96] = gen_helper_pfrcp,
2844 [0x97] = gen_helper_pfrsqrt,
2845 [0x9a] = gen_helper_pfsub,
2846 [0x9e] = gen_helper_pfadd,
2847 [0xa0] = gen_helper_pfcmpgt,
2848 [0xa4] = gen_helper_pfmax,
2849 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2850 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2851 [0xaa] = gen_helper_pfsubr,
2852 [0xae] = gen_helper_pfacc,
2853 [0xb0] = gen_helper_pfcmpeq,
2854 [0xb4] = gen_helper_pfmul,
2855 [0xb6] = gen_helper_movq, /* pfrcpit2 */
2856 [0xb7] = gen_helper_pmulhrw_mmx,
2857 [0xbb] = gen_helper_pswapd,
2858 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
2861 struct SSEOpHelper_epp {
2862 SSEFunc_0_epp op[2];
2866 struct SSEOpHelper_eppi {
2867 SSEFunc_0_eppi op[2];
2871 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2872 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2873 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2874 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2875 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2876 CPUID_EXT_PCLMULQDQ }
2877 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
2879 static const struct SSEOpHelper_epp sse_op_table6[256] = {
2880 [0x00] = SSSE3_OP(pshufb),
2881 [0x01] = SSSE3_OP(phaddw),
2882 [0x02] = SSSE3_OP(phaddd),
2883 [0x03] = SSSE3_OP(phaddsw),
2884 [0x04] = SSSE3_OP(pmaddubsw),
2885 [0x05] = SSSE3_OP(phsubw),
2886 [0x06] = SSSE3_OP(phsubd),
2887 [0x07] = SSSE3_OP(phsubsw),
2888 [0x08] = SSSE3_OP(psignb),
2889 [0x09] = SSSE3_OP(psignw),
2890 [0x0a] = SSSE3_OP(psignd),
2891 [0x0b] = SSSE3_OP(pmulhrsw),
2892 [0x10] = SSE41_OP(pblendvb),
2893 [0x14] = SSE41_OP(blendvps),
2894 [0x15] = SSE41_OP(blendvpd),
2895 [0x17] = SSE41_OP(ptest),
2896 [0x1c] = SSSE3_OP(pabsb),
2897 [0x1d] = SSSE3_OP(pabsw),
2898 [0x1e] = SSSE3_OP(pabsd),
2899 [0x20] = SSE41_OP(pmovsxbw),
2900 [0x21] = SSE41_OP(pmovsxbd),
2901 [0x22] = SSE41_OP(pmovsxbq),
2902 [0x23] = SSE41_OP(pmovsxwd),
2903 [0x24] = SSE41_OP(pmovsxwq),
2904 [0x25] = SSE41_OP(pmovsxdq),
2905 [0x28] = SSE41_OP(pmuldq),
2906 [0x29] = SSE41_OP(pcmpeqq),
2907 [0x2a] = SSE41_SPECIAL, /* movntqda */
2908 [0x2b] = SSE41_OP(packusdw),
2909 [0x30] = SSE41_OP(pmovzxbw),
2910 [0x31] = SSE41_OP(pmovzxbd),
2911 [0x32] = SSE41_OP(pmovzxbq),
2912 [0x33] = SSE41_OP(pmovzxwd),
2913 [0x34] = SSE41_OP(pmovzxwq),
2914 [0x35] = SSE41_OP(pmovzxdq),
2915 [0x37] = SSE42_OP(pcmpgtq),
2916 [0x38] = SSE41_OP(pminsb),
2917 [0x39] = SSE41_OP(pminsd),
2918 [0x3a] = SSE41_OP(pminuw),
2919 [0x3b] = SSE41_OP(pminud),
2920 [0x3c] = SSE41_OP(pmaxsb),
2921 [0x3d] = SSE41_OP(pmaxsd),
2922 [0x3e] = SSE41_OP(pmaxuw),
2923 [0x3f] = SSE41_OP(pmaxud),
2924 [0x40] = SSE41_OP(pmulld),
2925 [0x41] = SSE41_OP(phminposuw),
2926 [0xdb] = AESNI_OP(aesimc),
2927 [0xdc] = AESNI_OP(aesenc),
2928 [0xdd] = AESNI_OP(aesenclast),
2929 [0xde] = AESNI_OP(aesdec),
2930 [0xdf] = AESNI_OP(aesdeclast),
2933 static const struct SSEOpHelper_eppi sse_op_table7[256] = {
2934 [0x08] = SSE41_OP(roundps),
2935 [0x09] = SSE41_OP(roundpd),
2936 [0x0a] = SSE41_OP(roundss),
2937 [0x0b] = SSE41_OP(roundsd),
2938 [0x0c] = SSE41_OP(blendps),
2939 [0x0d] = SSE41_OP(blendpd),
2940 [0x0e] = SSE41_OP(pblendw),
2941 [0x0f] = SSSE3_OP(palignr),
2942 [0x14] = SSE41_SPECIAL, /* pextrb */
2943 [0x15] = SSE41_SPECIAL, /* pextrw */
2944 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
2945 [0x17] = SSE41_SPECIAL, /* extractps */
2946 [0x20] = SSE41_SPECIAL, /* pinsrb */
2947 [0x21] = SSE41_SPECIAL, /* insertps */
2948 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
2949 [0x40] = SSE41_OP(dpps),
2950 [0x41] = SSE41_OP(dppd),
2951 [0x42] = SSE41_OP(mpsadbw),
2952 [0x44] = PCLMULQDQ_OP(pclmulqdq),
2953 [0x60] = SSE42_OP(pcmpestrm),
2954 [0x61] = SSE42_OP(pcmpestri),
2955 [0x62] = SSE42_OP(pcmpistrm),
2956 [0x63] = SSE42_OP(pcmpistri),
2957 [0xdf] = AESNI_OP(aeskeygenassist),
2960 static void gen_sse(CPUX86State *env, DisasContext *s, int b,
2961 target_ulong pc_start, int rex_r)
2963 int b1, op1_offset, op2_offset, is_xmm, val;
2964 int modrm, mod, rm, reg;
2965 SSEFunc_0_epp sse_fn_epp;
2966 SSEFunc_0_eppi sse_fn_eppi;
2967 SSEFunc_0_ppi sse_fn_ppi;
2968 SSEFunc_0_eppt sse_fn_eppt;
2972 if (s->prefix & PREFIX_DATA)
2974 else if (s->prefix & PREFIX_REPZ)
2976 else if (s->prefix & PREFIX_REPNZ)
2980 sse_fn_epp = sse_op_table1[b][b1];
2984 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
2994 /* simple MMX/SSE operation */
2995 if (s->flags & HF_TS_MASK) {
2996 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
2999 if (s->flags & HF_EM_MASK) {
3001 gen_illegal_opcode(s);
3005 && !(s->flags & HF_OSFXSR_MASK)
3006 && ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))) {
3010 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
3011 /* If we were fully decoding this we might use illegal_op. */
3015 gen_helper_emms(cpu_env);
3020 gen_helper_emms(cpu_env);
3023 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3024 the static cpu state) */
3026 gen_helper_enter_mmx(cpu_env);
3029 modrm = cpu_ldub_code(env, s->pc++);
3030 reg = ((modrm >> 3) & 7);
3033 mod = (modrm >> 6) & 3;
3034 if (sse_fn_epp == SSE_SPECIAL) {
3037 case 0x0e7: /* movntq */
3041 gen_lea_modrm(env, s, modrm);
3042 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3044 case 0x1e7: /* movntdq */
3045 case 0x02b: /* movntps */
3046 case 0x12b: /* movntps */
3049 gen_lea_modrm(env, s, modrm);
3050 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3052 case 0x3f0: /* lddqu */
3055 gen_lea_modrm(env, s, modrm);
3056 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3058 case 0x22b: /* movntss */
3059 case 0x32b: /* movntsd */
3062 gen_lea_modrm(env, s, modrm);
3064 gen_stq_env_A0(s, offsetof(CPUX86State,
3065 xmm_regs[reg].ZMM_Q(0)));
3067 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
3068 xmm_regs[reg].ZMM_L(0)));
3069 gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
3072 case 0x6e: /* movd mm, ea */
3073 #ifdef TARGET_X86_64
3074 if (s->dflag == MO_64) {
3075 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3076 tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3080 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3081 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3082 offsetof(CPUX86State,fpregs[reg].mmx));
3083 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
3084 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3087 case 0x16e: /* movd xmm, ea */
3088 #ifdef TARGET_X86_64
3089 if (s->dflag == MO_64) {
3090 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3091 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3092 offsetof(CPUX86State,xmm_regs[reg]));
3093 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T0);
3097 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3098 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3099 offsetof(CPUX86State,xmm_regs[reg]));
3100 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
3101 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3104 case 0x6f: /* movq mm, ea */
3106 gen_lea_modrm(env, s, modrm);
3107 gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3110 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3111 offsetof(CPUX86State,fpregs[rm].mmx));
3112 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3113 offsetof(CPUX86State,fpregs[reg].mmx));
3116 case 0x010: /* movups */
3117 case 0x110: /* movupd */
3118 case 0x028: /* movaps */
3119 case 0x128: /* movapd */
3120 case 0x16f: /* movdqa xmm, ea */
3121 case 0x26f: /* movdqu xmm, ea */
3123 gen_lea_modrm(env, s, modrm);
3124 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3126 rm = (modrm & 7) | REX_B(s);
3127 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3128 offsetof(CPUX86State,xmm_regs[rm]));
3131 case 0x210: /* movss xmm, ea */
3133 gen_lea_modrm(env, s, modrm);
3134 gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
3135 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3136 tcg_gen_movi_tl(cpu_T0, 0);
3137 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
3138 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3139 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
3141 rm = (modrm & 7) | REX_B(s);
3142 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3143 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
3146 case 0x310: /* movsd xmm, ea */
3148 gen_lea_modrm(env, s, modrm);
3149 gen_ldq_env_A0(s, offsetof(CPUX86State,
3150 xmm_regs[reg].ZMM_Q(0)));
3151 tcg_gen_movi_tl(cpu_T0, 0);
3152 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3153 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
3155 rm = (modrm & 7) | REX_B(s);
3156 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3157 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3160 case 0x012: /* movlps */
3161 case 0x112: /* movlpd */
3163 gen_lea_modrm(env, s, modrm);
3164 gen_ldq_env_A0(s, offsetof(CPUX86State,
3165 xmm_regs[reg].ZMM_Q(0)));
3168 rm = (modrm & 7) | REX_B(s);
3169 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3170 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
3173 case 0x212: /* movsldup */
3175 gen_lea_modrm(env, s, modrm);
3176 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3178 rm = (modrm & 7) | REX_B(s);
3179 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3180 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
3181 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
3182 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(2)));
3184 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
3185 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3186 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
3187 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3189 case 0x312: /* movddup */
3191 gen_lea_modrm(env, s, modrm);
3192 gen_ldq_env_A0(s, offsetof(CPUX86State,
3193 xmm_regs[reg].ZMM_Q(0)));
3195 rm = (modrm & 7) | REX_B(s);
3196 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3197 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3199 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
3200 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3202 case 0x016: /* movhps */
3203 case 0x116: /* movhpd */
3205 gen_lea_modrm(env, s, modrm);
3206 gen_ldq_env_A0(s, offsetof(CPUX86State,
3207 xmm_regs[reg].ZMM_Q(1)));
3210 rm = (modrm & 7) | REX_B(s);
3211 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
3212 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3215 case 0x216: /* movshdup */
3217 gen_lea_modrm(env, s, modrm);
3218 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3220 rm = (modrm & 7) | REX_B(s);
3221 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
3222 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(1)));
3223 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
3224 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(3)));
3226 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3227 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
3228 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
3229 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
3234 int bit_index, field_length;
3236 if (b1 == 1 && reg != 0)
3238 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3239 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
3240 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3241 offsetof(CPUX86State,xmm_regs[reg]));
3243 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3244 tcg_const_i32(bit_index),
3245 tcg_const_i32(field_length));
3247 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3248 tcg_const_i32(bit_index),
3249 tcg_const_i32(field_length));
3252 case 0x7e: /* movd ea, mm */
3253 #ifdef TARGET_X86_64
3254 if (s->dflag == MO_64) {
3255 tcg_gen_ld_i64(cpu_T0, cpu_env,
3256 offsetof(CPUX86State,fpregs[reg].mmx));
3257 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3261 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
3262 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3263 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3266 case 0x17e: /* movd ea, xmm */
3267 #ifdef TARGET_X86_64
3268 if (s->dflag == MO_64) {
3269 tcg_gen_ld_i64(cpu_T0, cpu_env,
3270 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3271 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3275 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
3276 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3277 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3280 case 0x27e: /* movq xmm, ea */
3282 gen_lea_modrm(env, s, modrm);
3283 gen_ldq_env_A0(s, offsetof(CPUX86State,
3284 xmm_regs[reg].ZMM_Q(0)));
3286 rm = (modrm & 7) | REX_B(s);
3287 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3288 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3290 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
3292 case 0x7f: /* movq ea, mm */
3294 gen_lea_modrm(env, s, modrm);
3295 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3298 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3299 offsetof(CPUX86State,fpregs[reg].mmx));
3302 case 0x011: /* movups */
3303 case 0x111: /* movupd */
3304 case 0x029: /* movaps */
3305 case 0x129: /* movapd */
3306 case 0x17f: /* movdqa ea, xmm */
3307 case 0x27f: /* movdqu ea, xmm */
3309 gen_lea_modrm(env, s, modrm);
3310 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3312 rm = (modrm & 7) | REX_B(s);
3313 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3314 offsetof(CPUX86State,xmm_regs[reg]));
3317 case 0x211: /* movss ea, xmm */
3319 gen_lea_modrm(env, s, modrm);
3320 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3321 gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
3323 rm = (modrm & 7) | REX_B(s);
3324 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)),
3325 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3328 case 0x311: /* movsd ea, xmm */
3330 gen_lea_modrm(env, s, modrm);
3331 gen_stq_env_A0(s, offsetof(CPUX86State,
3332 xmm_regs[reg].ZMM_Q(0)));
3334 rm = (modrm & 7) | REX_B(s);
3335 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
3336 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3339 case 0x013: /* movlps */
3340 case 0x113: /* movlpd */
3342 gen_lea_modrm(env, s, modrm);
3343 gen_stq_env_A0(s, offsetof(CPUX86State,
3344 xmm_regs[reg].ZMM_Q(0)));
3349 case 0x017: /* movhps */
3350 case 0x117: /* movhpd */
3352 gen_lea_modrm(env, s, modrm);
3353 gen_stq_env_A0(s, offsetof(CPUX86State,
3354 xmm_regs[reg].ZMM_Q(1)));
3359 case 0x71: /* shift mm, im */
3362 case 0x171: /* shift xmm, im */
3368 val = cpu_ldub_code(env, s->pc++);
3370 tcg_gen_movi_tl(cpu_T0, val);
3371 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
3372 tcg_gen_movi_tl(cpu_T0, 0);
3373 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(1)));
3374 op1_offset = offsetof(CPUX86State,xmm_t0);
3376 tcg_gen_movi_tl(cpu_T0, val);
3377 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3378 tcg_gen_movi_tl(cpu_T0, 0);
3379 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3380 op1_offset = offsetof(CPUX86State,mmx_t0);
3382 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3383 (((modrm >> 3)) & 7)][b1];
3388 rm = (modrm & 7) | REX_B(s);
3389 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3392 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3394 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3395 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3396 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3398 case 0x050: /* movmskps */
3399 rm = (modrm & 7) | REX_B(s);
3400 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3401 offsetof(CPUX86State,xmm_regs[rm]));
3402 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3403 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3405 case 0x150: /* movmskpd */
3406 rm = (modrm & 7) | REX_B(s);
3407 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3408 offsetof(CPUX86State,xmm_regs[rm]));
3409 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3410 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3412 case 0x02a: /* cvtpi2ps */
3413 case 0x12a: /* cvtpi2pd */
3414 gen_helper_enter_mmx(cpu_env);
3416 gen_lea_modrm(env, s, modrm);
3417 op2_offset = offsetof(CPUX86State,mmx_t0);
3418 gen_ldq_env_A0(s, op2_offset);
3421 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3423 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3424 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3425 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3428 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
3432 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
3436 case 0x22a: /* cvtsi2ss */
3437 case 0x32a: /* cvtsi2sd */
3438 ot = mo_64_32(s->dflag);
3439 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3440 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3441 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3443 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
3444 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
3445 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
3447 #ifdef TARGET_X86_64
3448 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3449 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T0);
3455 case 0x02c: /* cvttps2pi */
3456 case 0x12c: /* cvttpd2pi */
3457 case 0x02d: /* cvtps2pi */
3458 case 0x12d: /* cvtpd2pi */
3459 gen_helper_enter_mmx(cpu_env);
3461 gen_lea_modrm(env, s, modrm);
3462 op2_offset = offsetof(CPUX86State,xmm_t0);
3463 gen_ldo_env_A0(s, op2_offset);
3465 rm = (modrm & 7) | REX_B(s);
3466 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3468 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3469 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3470 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3473 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3476 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3479 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3482 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3486 case 0x22c: /* cvttss2si */
3487 case 0x32c: /* cvttsd2si */
3488 case 0x22d: /* cvtss2si */
3489 case 0x32d: /* cvtsd2si */
3490 ot = mo_64_32(s->dflag);
3492 gen_lea_modrm(env, s, modrm);
3494 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0)));
3496 gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
3497 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
3499 op2_offset = offsetof(CPUX86State,xmm_t0);
3501 rm = (modrm & 7) | REX_B(s);
3502 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3504 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3506 SSEFunc_i_ep sse_fn_i_ep =
3507 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3508 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3509 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
3511 #ifdef TARGET_X86_64
3512 SSEFunc_l_ep sse_fn_l_ep =
3513 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3514 sse_fn_l_ep(cpu_T0, cpu_env, cpu_ptr0);
3519 gen_op_mov_reg_v(ot, reg, cpu_T0);
3521 case 0xc4: /* pinsrw */
3524 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
3525 val = cpu_ldub_code(env, s->pc++);
3528 tcg_gen_st16_tl(cpu_T0, cpu_env,
3529 offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val)));
3532 tcg_gen_st16_tl(cpu_T0, cpu_env,
3533 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3536 case 0xc5: /* pextrw */
3540 ot = mo_64_32(s->dflag);
3541 val = cpu_ldub_code(env, s->pc++);
3544 rm = (modrm & 7) | REX_B(s);
3545 tcg_gen_ld16u_tl(cpu_T0, cpu_env,
3546 offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val)));
3550 tcg_gen_ld16u_tl(cpu_T0, cpu_env,
3551 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3553 reg = ((modrm >> 3) & 7) | rex_r;
3554 gen_op_mov_reg_v(ot, reg, cpu_T0);
3556 case 0x1d6: /* movq ea, xmm */
3558 gen_lea_modrm(env, s, modrm);
3559 gen_stq_env_A0(s, offsetof(CPUX86State,
3560 xmm_regs[reg].ZMM_Q(0)));
3562 rm = (modrm & 7) | REX_B(s);
3563 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
3564 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3565 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
3568 case 0x2d6: /* movq2dq */
3569 gen_helper_enter_mmx(cpu_env);
3571 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3572 offsetof(CPUX86State,fpregs[rm].mmx));
3573 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
3575 case 0x3d6: /* movdq2q */
3576 gen_helper_enter_mmx(cpu_env);
3577 rm = (modrm & 7) | REX_B(s);
3578 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3579 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3581 case 0xd7: /* pmovmskb */
3586 rm = (modrm & 7) | REX_B(s);
3587 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3588 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3591 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3592 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3594 reg = ((modrm >> 3) & 7) | rex_r;
3595 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3601 if ((b & 0xf0) == 0xf0) {
3604 modrm = cpu_ldub_code(env, s->pc++);
3606 reg = ((modrm >> 3) & 7) | rex_r;
3607 mod = (modrm >> 6) & 3;
3612 sse_fn_epp = sse_op_table6[b].op[b1];
3616 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3620 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3622 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3624 op2_offset = offsetof(CPUX86State,xmm_t0);
3625 gen_lea_modrm(env, s, modrm);
3627 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3628 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3629 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3630 gen_ldq_env_A0(s, op2_offset +
3631 offsetof(ZMMReg, ZMM_Q(0)));
3633 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3634 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3635 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
3636 s->mem_index, MO_LEUL);
3637 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3638 offsetof(ZMMReg, ZMM_L(0)));
3640 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3641 tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
3642 s->mem_index, MO_LEUW);
3643 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3644 offsetof(ZMMReg, ZMM_W(0)));
3646 case 0x2a: /* movntqda */
3647 gen_ldo_env_A0(s, op1_offset);
3650 gen_ldo_env_A0(s, op2_offset);
3654 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3656 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3658 op2_offset = offsetof(CPUX86State,mmx_t0);
3659 gen_lea_modrm(env, s, modrm);
3660 gen_ldq_env_A0(s, op2_offset);
3663 if (sse_fn_epp == SSE_SPECIAL) {
3667 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3668 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3669 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3672 set_cc_op(s, CC_OP_EFLAGS);
3679 /* Various integer extensions at 0f 38 f[0-f]. */
3680 b = modrm | (b1 << 8);
3681 modrm = cpu_ldub_code(env, s->pc++);
3682 reg = ((modrm >> 3) & 7) | rex_r;
3685 case 0x3f0: /* crc32 Gd,Eb */
3686 case 0x3f1: /* crc32 Gd,Ey */
3688 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3691 if ((b & 0xff) == 0xf0) {
3693 } else if (s->dflag != MO_64) {
3694 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3699 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
3700 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3701 gen_helper_crc32(cpu_T0, cpu_tmp2_i32,
3702 cpu_T0, tcg_const_i32(8 << ot));
3704 ot = mo_64_32(s->dflag);
3705 gen_op_mov_reg_v(ot, reg, cpu_T0);
3708 case 0x1f0: /* crc32 or movbe */
3710 /* For these insns, the f3 prefix is supposed to have priority
3711 over the 66 prefix, but that's not what we implement above
3713 if (s->prefix & PREFIX_REPNZ) {
3717 case 0x0f0: /* movbe Gy,My */
3718 case 0x0f1: /* movbe My,Gy */
3719 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3722 if (s->dflag != MO_64) {
3723 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3728 gen_lea_modrm(env, s, modrm);
3730 tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
3731 s->mem_index, ot | MO_BE);
3732 gen_op_mov_reg_v(ot, reg, cpu_T0);
3734 tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
3735 s->mem_index, ot | MO_BE);
3739 case 0x0f2: /* andn Gy, By, Ey */
3740 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3741 || !(s->prefix & PREFIX_VEX)
3745 ot = mo_64_32(s->dflag);
3746 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3747 tcg_gen_andc_tl(cpu_T0, cpu_regs[s->vex_v], cpu_T0);
3748 gen_op_mov_reg_v(ot, reg, cpu_T0);
3749 gen_op_update1_cc();
3750 set_cc_op(s, CC_OP_LOGICB + ot);
3753 case 0x0f7: /* bextr Gy, Ey, By */
3754 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3755 || !(s->prefix & PREFIX_VEX)
3759 ot = mo_64_32(s->dflag);
3763 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3764 /* Extract START, and shift the operand.
3765 Shifts larger than operand size get zeros. */
3766 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
3767 tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_A0);
3769 bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3770 zero = tcg_const_tl(0);
3771 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T0, cpu_A0, bound,
3773 tcg_temp_free(zero);
3775 /* Extract the LEN into a mask. Lengths larger than
3776 operand size get all ones. */
3777 tcg_gen_extract_tl(cpu_A0, cpu_regs[s->vex_v], 8, 8);
3778 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
3780 tcg_temp_free(bound);
3781 tcg_gen_movi_tl(cpu_T1, 1);
3782 tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_A0);
3783 tcg_gen_subi_tl(cpu_T1, cpu_T1, 1);
3784 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
3786 gen_op_mov_reg_v(ot, reg, cpu_T0);
3787 gen_op_update1_cc();
3788 set_cc_op(s, CC_OP_LOGICB + ot);
3792 case 0x0f5: /* bzhi Gy, Ey, By */
3793 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3794 || !(s->prefix & PREFIX_VEX)
3798 ot = mo_64_32(s->dflag);
3799 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3800 tcg_gen_ext8u_tl(cpu_T1, cpu_regs[s->vex_v]);
3802 TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3803 /* Note that since we're using BMILG (in order to get O
3804 cleared) we need to store the inverse into C. */
3805 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
3807 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T1, cpu_T1,
3808 bound, bound, cpu_T1);
3809 tcg_temp_free(bound);
3811 tcg_gen_movi_tl(cpu_A0, -1);
3812 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T1);
3813 tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_A0);
3814 gen_op_mov_reg_v(ot, reg, cpu_T0);
3815 gen_op_update1_cc();
3816 set_cc_op(s, CC_OP_BMILGB + ot);
3819 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3820 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3821 || !(s->prefix & PREFIX_VEX)
3825 ot = mo_64_32(s->dflag);
3826 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3829 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
3830 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
3831 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
3832 cpu_tmp2_i32, cpu_tmp3_i32);
3833 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
3834 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
3836 #ifdef TARGET_X86_64
3838 tcg_gen_mulu2_i64(cpu_T0, cpu_T1,
3839 cpu_T0, cpu_regs[R_EDX]);
3840 tcg_gen_mov_i64(cpu_regs[s->vex_v], cpu_T0);
3841 tcg_gen_mov_i64(cpu_regs[reg], cpu_T1);
3847 case 0x3f5: /* pdep Gy, By, Ey */
3848 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3849 || !(s->prefix & PREFIX_VEX)
3853 ot = mo_64_32(s->dflag);
3854 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3855 /* Note that by zero-extending the mask operand, we
3856 automatically handle zero-extending the result. */
3858 tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
3860 tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
3862 gen_helper_pdep(cpu_regs[reg], cpu_T0, cpu_T1);
3865 case 0x2f5: /* pext Gy, By, Ey */
3866 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3867 || !(s->prefix & PREFIX_VEX)
3871 ot = mo_64_32(s->dflag);
3872 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3873 /* Note that by zero-extending the mask operand, we
3874 automatically handle zero-extending the result. */
3876 tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
3878 tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
3880 gen_helper_pext(cpu_regs[reg], cpu_T0, cpu_T1);
3883 case 0x1f6: /* adcx Gy, Ey */
3884 case 0x2f6: /* adox Gy, Ey */
3885 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
3888 TCGv carry_in, carry_out, zero;
3891 ot = mo_64_32(s->dflag);
3892 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3894 /* Re-use the carry-out from a previous round. */
3895 TCGV_UNUSED(carry_in);
3896 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
3900 carry_in = cpu_cc_dst;
3901 end_op = CC_OP_ADCX;
3903 end_op = CC_OP_ADCOX;
3908 end_op = CC_OP_ADCOX;
3910 carry_in = cpu_cc_src2;
3911 end_op = CC_OP_ADOX;
3915 end_op = CC_OP_ADCOX;
3916 carry_in = carry_out;
3919 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
3922 /* If we can't reuse carry-out, get it out of EFLAGS. */
3923 if (TCGV_IS_UNUSED(carry_in)) {
3924 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
3925 gen_compute_eflags(s);
3927 carry_in = cpu_tmp0;
3928 tcg_gen_extract_tl(carry_in, cpu_cc_src,
3929 ctz32(b == 0x1f6 ? CC_C : CC_O), 1);
3933 #ifdef TARGET_X86_64
3935 /* If we know TL is 64-bit, and we want a 32-bit
3936 result, just do everything in 64-bit arithmetic. */
3937 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
3938 tcg_gen_ext32u_i64(cpu_T0, cpu_T0);
3939 tcg_gen_add_i64(cpu_T0, cpu_T0, cpu_regs[reg]);
3940 tcg_gen_add_i64(cpu_T0, cpu_T0, carry_in);
3941 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T0);
3942 tcg_gen_shri_i64(carry_out, cpu_T0, 32);
3946 /* Otherwise compute the carry-out in two steps. */
3947 zero = tcg_const_tl(0);
3948 tcg_gen_add2_tl(cpu_T0, carry_out,
3951 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
3952 cpu_regs[reg], carry_out,
3954 tcg_temp_free(zero);
3957 set_cc_op(s, end_op);
3961 case 0x1f7: /* shlx Gy, Ey, By */
3962 case 0x2f7: /* sarx Gy, Ey, By */
3963 case 0x3f7: /* shrx Gy, Ey, By */
3964 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3965 || !(s->prefix & PREFIX_VEX)
3969 ot = mo_64_32(s->dflag);
3970 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3972 tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 63);
3974 tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 31);
3977 tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
3978 } else if (b == 0x2f7) {
3980 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
3982 tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
3985 tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
3987 tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
3989 gen_op_mov_reg_v(ot, reg, cpu_T0);
3995 case 0x3f3: /* Group 17 */
3996 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3997 || !(s->prefix & PREFIX_VEX)
4001 ot = mo_64_32(s->dflag);
4002 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4005 case 1: /* blsr By,Ey */
4006 tcg_gen_neg_tl(cpu_T1, cpu_T0);
4007 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
4008 gen_op_mov_reg_v(ot, s->vex_v, cpu_T0);
4009 gen_op_update2_cc();
4010 set_cc_op(s, CC_OP_BMILGB + ot);
4013 case 2: /* blsmsk By,Ey */
4014 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
4015 tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
4016 tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_cc_src);
4017 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4018 set_cc_op(s, CC_OP_BMILGB + ot);
4021 case 3: /* blsi By, Ey */
4022 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
4023 tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
4024 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_cc_src);
4025 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4026 set_cc_op(s, CC_OP_BMILGB + ot);
4042 modrm = cpu_ldub_code(env, s->pc++);
4044 reg = ((modrm >> 3) & 7) | rex_r;
4045 mod = (modrm >> 6) & 3;
4050 sse_fn_eppi = sse_op_table7[b].op[b1];
4054 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
4057 if (sse_fn_eppi == SSE_SPECIAL) {
4058 ot = mo_64_32(s->dflag);
4059 rm = (modrm & 7) | REX_B(s);
4061 gen_lea_modrm(env, s, modrm);
4062 reg = ((modrm >> 3) & 7) | rex_r;
4063 val = cpu_ldub_code(env, s->pc++);
4065 case 0x14: /* pextrb */
4066 tcg_gen_ld8u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
4067 xmm_regs[reg].ZMM_B(val & 15)));
4069 gen_op_mov_reg_v(ot, rm, cpu_T0);
4071 tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
4072 s->mem_index, MO_UB);
4075 case 0x15: /* pextrw */
4076 tcg_gen_ld16u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
4077 xmm_regs[reg].ZMM_W(val & 7)));
4079 gen_op_mov_reg_v(ot, rm, cpu_T0);
4081 tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
4082 s->mem_index, MO_LEUW);
4086 if (ot == MO_32) { /* pextrd */
4087 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4088 offsetof(CPUX86State,
4089 xmm_regs[reg].ZMM_L(val & 3)));
4091 tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
4093 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
4094 s->mem_index, MO_LEUL);
4096 } else { /* pextrq */
4097 #ifdef TARGET_X86_64
4098 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4099 offsetof(CPUX86State,
4100 xmm_regs[reg].ZMM_Q(val & 1)));
4102 tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
4104 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
4105 s->mem_index, MO_LEQ);
4112 case 0x17: /* extractps */
4113 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
4114 xmm_regs[reg].ZMM_L(val & 3)));
4116 gen_op_mov_reg_v(ot, rm, cpu_T0);
4118 tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
4119 s->mem_index, MO_LEUL);
4122 case 0x20: /* pinsrb */
4124 gen_op_mov_v_reg(MO_32, cpu_T0, rm);
4126 tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
4127 s->mem_index, MO_UB);
4129 tcg_gen_st8_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
4130 xmm_regs[reg].ZMM_B(val & 15)));
4132 case 0x21: /* insertps */
4134 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4135 offsetof(CPUX86State,xmm_regs[rm]
4136 .ZMM_L((val >> 6) & 3)));
4138 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4139 s->mem_index, MO_LEUL);
4141 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4142 offsetof(CPUX86State,xmm_regs[reg]
4143 .ZMM_L((val >> 4) & 3)));
4145 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4146 cpu_env, offsetof(CPUX86State,
4147 xmm_regs[reg].ZMM_L(0)));
4149 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4150 cpu_env, offsetof(CPUX86State,
4151 xmm_regs[reg].ZMM_L(1)));
4153 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4154 cpu_env, offsetof(CPUX86State,
4155 xmm_regs[reg].ZMM_L(2)));
4157 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4158 cpu_env, offsetof(CPUX86State,
4159 xmm_regs[reg].ZMM_L(3)));
4162 if (ot == MO_32) { /* pinsrd */
4164 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
4166 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4167 s->mem_index, MO_LEUL);
4169 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4170 offsetof(CPUX86State,
4171 xmm_regs[reg].ZMM_L(val & 3)));
4172 } else { /* pinsrq */
4173 #ifdef TARGET_X86_64
4175 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4177 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
4178 s->mem_index, MO_LEQ);
4180 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4181 offsetof(CPUX86State,
4182 xmm_regs[reg].ZMM_Q(val & 1)));
4193 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4195 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4197 op2_offset = offsetof(CPUX86State,xmm_t0);
4198 gen_lea_modrm(env, s, modrm);
4199 gen_ldo_env_A0(s, op2_offset);
4202 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4204 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4206 op2_offset = offsetof(CPUX86State,mmx_t0);
4207 gen_lea_modrm(env, s, modrm);
4208 gen_ldq_env_A0(s, op2_offset);
4211 val = cpu_ldub_code(env, s->pc++);
4213 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4214 set_cc_op(s, CC_OP_EFLAGS);
4216 if (s->dflag == MO_64) {
4217 /* The helper must use entire 64-bit gp registers */
4222 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4223 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4224 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4228 /* Various integer extensions at 0f 3a f[0-f]. */
4229 b = modrm | (b1 << 8);
4230 modrm = cpu_ldub_code(env, s->pc++);
4231 reg = ((modrm >> 3) & 7) | rex_r;
4234 case 0x3f0: /* rorx Gy,Ey, Ib */
4235 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4236 || !(s->prefix & PREFIX_VEX)
4240 ot = mo_64_32(s->dflag);
4241 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4242 b = cpu_ldub_code(env, s->pc++);
4244 tcg_gen_rotri_tl(cpu_T0, cpu_T0, b & 63);
4246 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4247 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
4248 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
4250 gen_op_mov_reg_v(ot, reg, cpu_T0);
4260 gen_unknown_opcode(env, s);
4264 /* generic MMX or SSE operation */
4266 case 0x70: /* pshufx insn */
4267 case 0xc6: /* pshufx insn */
4268 case 0xc2: /* compare insns */
4275 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4279 gen_lea_modrm(env, s, modrm);
4280 op2_offset = offsetof(CPUX86State,xmm_t0);
4286 /* Most sse scalar operations. */
4289 } else if (b1 == 3) {
4294 case 0x2e: /* ucomis[sd] */
4295 case 0x2f: /* comis[sd] */
4307 gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
4308 tcg_gen_st32_tl(cpu_T0, cpu_env,
4309 offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
4313 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_D(0)));
4316 /* 128 bit access */
4317 gen_ldo_env_A0(s, op2_offset);
4321 rm = (modrm & 7) | REX_B(s);
4322 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4325 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4327 gen_lea_modrm(env, s, modrm);
4328 op2_offset = offsetof(CPUX86State,mmx_t0);
4329 gen_ldq_env_A0(s, op2_offset);
4332 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4336 case 0x0f: /* 3DNow! data insns */
4337 val = cpu_ldub_code(env, s->pc++);
4338 sse_fn_epp = sse_op_table5[val];
4342 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
4345 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4346 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4347 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4349 case 0x70: /* pshufx insn */
4350 case 0xc6: /* pshufx insn */
4351 val = cpu_ldub_code(env, s->pc++);
4352 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4353 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4354 /* XXX: introduce a new table? */
4355 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
4356 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4360 val = cpu_ldub_code(env, s->pc++);
4363 sse_fn_epp = sse_op_table4[val][b1];
4365 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4366 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4367 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4370 /* maskmov : we must prepare A0 */
4373 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);
4374 gen_extu(s->aflag, cpu_A0);
4375 gen_add_A0_ds_seg(s);
4377 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4378 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4379 /* XXX: introduce a new table? */
4380 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4381 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
4384 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4385 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4386 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4389 if (b == 0x2e || b == 0x2f) {
4390 set_cc_op(s, CC_OP_EFLAGS);
4395 /* convert one instruction. s->is_jmp is set if the translation must
4396 be stopped. Return the next pc value */
4397 static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4398 target_ulong pc_start)
4402 TCGMemOp ot, aflag, dflag;
4403 int modrm, reg, rm, mod, op, opreg, val;
4404 target_ulong next_eip, tval;
4407 s->pc_start = s->pc = pc_start;
4412 #ifdef TARGET_X86_64
4417 s->rip_offset = 0; /* for relative ip address */
4421 b = cpu_ldub_code(env, s->pc);
4423 /* Collect prefixes. */
4426 prefixes |= PREFIX_REPZ;
4429 prefixes |= PREFIX_REPNZ;
4432 prefixes |= PREFIX_LOCK;
4453 prefixes |= PREFIX_DATA;
4456 prefixes |= PREFIX_ADR;
4458 #ifdef TARGET_X86_64
4462 rex_w = (b >> 3) & 1;
4463 rex_r = (b & 0x4) << 1;
4464 s->rex_x = (b & 0x2) << 2;
4465 REX_B(s) = (b & 0x1) << 3;
4466 x86_64_hregs = 1; /* select uniform byte register addressing */
4471 case 0xc5: /* 2-byte VEX */
4472 case 0xc4: /* 3-byte VEX */
4473 /* VEX prefixes cannot be used except in 32-bit mode.
4474 Otherwise the instruction is LES or LDS. */
4475 if (s->code32 && !s->vm86) {
4476 static const int pp_prefix[4] = {
4477 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4479 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4481 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4482 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4483 otherwise the instruction is LES or LDS. */
4488 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4489 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4490 | PREFIX_LOCK | PREFIX_DATA)) {
4493 #ifdef TARGET_X86_64
4498 rex_r = (~vex2 >> 4) & 8;
4501 b = cpu_ldub_code(env, s->pc++);
4503 #ifdef TARGET_X86_64
4504 s->rex_x = (~vex2 >> 3) & 8;
4505 s->rex_b = (~vex2 >> 2) & 8;
4507 vex3 = cpu_ldub_code(env, s->pc++);
4508 rex_w = (vex3 >> 7) & 1;
4509 switch (vex2 & 0x1f) {
4510 case 0x01: /* Implied 0f leading opcode bytes. */
4511 b = cpu_ldub_code(env, s->pc++) | 0x100;
4513 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4516 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4519 default: /* Reserved for future use. */
4523 s->vex_v = (~vex3 >> 3) & 0xf;
4524 s->vex_l = (vex3 >> 2) & 1;
4525 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4530 /* Post-process prefixes. */
4532 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4533 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4534 over 0x66 if both are present. */
4535 dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
4536 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4537 aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
4539 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4540 if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
4545 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4546 if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
4553 s->prefix = prefixes;
4557 /* now check op code */
4561 /**************************/
4562 /* extended op code */
4563 b = cpu_ldub_code(env, s->pc++) | 0x100;
4566 /**************************/
4581 ot = mo_b_d(b, dflag);
4584 case 0: /* OP Ev, Gv */
4585 modrm = cpu_ldub_code(env, s->pc++);
4586 reg = ((modrm >> 3) & 7) | rex_r;
4587 mod = (modrm >> 6) & 3;
4588 rm = (modrm & 7) | REX_B(s);
4590 gen_lea_modrm(env, s, modrm);
4592 } else if (op == OP_XORL && rm == reg) {
4594 /* xor reg, reg optimisation */
4595 set_cc_op(s, CC_OP_CLR);
4596 tcg_gen_movi_tl(cpu_T0, 0);
4597 gen_op_mov_reg_v(ot, reg, cpu_T0);
4602 gen_op_mov_v_reg(ot, cpu_T1, reg);
4603 gen_op(s, op, ot, opreg);
4605 case 1: /* OP Gv, Ev */
4606 modrm = cpu_ldub_code(env, s->pc++);
4607 mod = (modrm >> 6) & 3;
4608 reg = ((modrm >> 3) & 7) | rex_r;
4609 rm = (modrm & 7) | REX_B(s);
4611 gen_lea_modrm(env, s, modrm);
4612 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
4613 } else if (op == OP_XORL && rm == reg) {
4616 gen_op_mov_v_reg(ot, cpu_T1, rm);
4618 gen_op(s, op, ot, reg);
4620 case 2: /* OP A, Iv */
4621 val = insn_get(env, s, ot);
4622 tcg_gen_movi_tl(cpu_T1, val);
4623 gen_op(s, op, ot, OR_EAX);
4632 case 0x80: /* GRP1 */
4638 ot = mo_b_d(b, dflag);
4640 modrm = cpu_ldub_code(env, s->pc++);
4641 mod = (modrm >> 6) & 3;
4642 rm = (modrm & 7) | REX_B(s);
4643 op = (modrm >> 3) & 7;
4649 s->rip_offset = insn_const_size(ot);
4650 gen_lea_modrm(env, s, modrm);
4661 val = insn_get(env, s, ot);
4664 val = (int8_t)insn_get(env, s, MO_8);
4667 tcg_gen_movi_tl(cpu_T1, val);
4668 gen_op(s, op, ot, opreg);
4672 /**************************/
4673 /* inc, dec, and other misc arith */
4674 case 0x40 ... 0x47: /* inc Gv */
4676 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4678 case 0x48 ... 0x4f: /* dec Gv */
4680 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4682 case 0xf6: /* GRP3 */
4684 ot = mo_b_d(b, dflag);
4686 modrm = cpu_ldub_code(env, s->pc++);
4687 mod = (modrm >> 6) & 3;
4688 rm = (modrm & 7) | REX_B(s);
4689 op = (modrm >> 3) & 7;
4692 s->rip_offset = insn_const_size(ot);
4694 gen_lea_modrm(env, s, modrm);
4695 /* For those below that handle locked memory, don't load here. */
4696 if (!(s->prefix & PREFIX_LOCK)
4698 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
4701 gen_op_mov_v_reg(ot, cpu_T0, rm);
4706 val = insn_get(env, s, ot);
4707 tcg_gen_movi_tl(cpu_T1, val);
4708 gen_op_testl_T0_T1_cc();
4709 set_cc_op(s, CC_OP_LOGICB + ot);
4712 if (s->prefix & PREFIX_LOCK) {
4716 tcg_gen_movi_tl(cpu_T0, ~0);
4717 tcg_gen_atomic_xor_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
4718 s->mem_index, ot | MO_LE);
4720 tcg_gen_not_tl(cpu_T0, cpu_T0);
4722 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
4724 gen_op_mov_reg_v(ot, rm, cpu_T0);
4729 if (s->prefix & PREFIX_LOCK) {
4731 TCGv a0, t0, t1, t2;
4736 a0 = tcg_temp_local_new();
4737 t0 = tcg_temp_local_new();
4738 label1 = gen_new_label();
4740 tcg_gen_mov_tl(a0, cpu_A0);
4741 tcg_gen_mov_tl(t0, cpu_T0);
4743 gen_set_label(label1);
4744 t1 = tcg_temp_new();
4745 t2 = tcg_temp_new();
4746 tcg_gen_mov_tl(t2, t0);
4747 tcg_gen_neg_tl(t1, t0);
4748 tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
4749 s->mem_index, ot | MO_LE);
4751 tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
4755 tcg_gen_mov_tl(cpu_T0, t0);
4758 tcg_gen_neg_tl(cpu_T0, cpu_T0);
4760 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
4762 gen_op_mov_reg_v(ot, rm, cpu_T0);
4765 gen_op_update_neg_cc();
4766 set_cc_op(s, CC_OP_SUBB + ot);
4771 gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
4772 tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
4773 tcg_gen_ext8u_tl(cpu_T1, cpu_T1);
4774 /* XXX: use 32 bit mul which could be faster */
4775 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4776 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4777 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4778 tcg_gen_andi_tl(cpu_cc_src, cpu_T0, 0xff00);
4779 set_cc_op(s, CC_OP_MULB);
4782 gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
4783 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
4784 tcg_gen_ext16u_tl(cpu_T1, cpu_T1);
4785 /* XXX: use 32 bit mul which could be faster */
4786 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4787 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4788 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4789 tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
4790 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
4791 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
4792 set_cc_op(s, CC_OP_MULW);
4796 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4797 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4798 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4799 cpu_tmp2_i32, cpu_tmp3_i32);
4800 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4801 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4802 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4803 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4804 set_cc_op(s, CC_OP_MULL);
4806 #ifdef TARGET_X86_64
4808 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4809 cpu_T0, cpu_regs[R_EAX]);
4810 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4811 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4812 set_cc_op(s, CC_OP_MULQ);
4820 gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
4821 tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
4822 tcg_gen_ext8s_tl(cpu_T1, cpu_T1);
4823 /* XXX: use 32 bit mul which could be faster */
4824 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4825 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4826 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4827 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T0);
4828 tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
4829 set_cc_op(s, CC_OP_MULB);
4832 gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
4833 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
4834 tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
4835 /* XXX: use 32 bit mul which could be faster */
4836 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4837 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4838 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4839 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
4840 tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
4841 tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
4842 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
4843 set_cc_op(s, CC_OP_MULW);
4847 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4848 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4849 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4850 cpu_tmp2_i32, cpu_tmp3_i32);
4851 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4852 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4853 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
4854 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4855 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
4856 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
4857 set_cc_op(s, CC_OP_MULL);
4859 #ifdef TARGET_X86_64
4861 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4862 cpu_T0, cpu_regs[R_EAX]);
4863 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4864 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
4865 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
4866 set_cc_op(s, CC_OP_MULQ);
4874 gen_helper_divb_AL(cpu_env, cpu_T0);
4877 gen_helper_divw_AX(cpu_env, cpu_T0);
4881 gen_helper_divl_EAX(cpu_env, cpu_T0);
4883 #ifdef TARGET_X86_64
4885 gen_helper_divq_EAX(cpu_env, cpu_T0);
4893 gen_helper_idivb_AL(cpu_env, cpu_T0);
4896 gen_helper_idivw_AX(cpu_env, cpu_T0);
4900 gen_helper_idivl_EAX(cpu_env, cpu_T0);
4902 #ifdef TARGET_X86_64
4904 gen_helper_idivq_EAX(cpu_env, cpu_T0);
4914 case 0xfe: /* GRP4 */
4915 case 0xff: /* GRP5 */
4916 ot = mo_b_d(b, dflag);
4918 modrm = cpu_ldub_code(env, s->pc++);
4919 mod = (modrm >> 6) & 3;
4920 rm = (modrm & 7) | REX_B(s);
4921 op = (modrm >> 3) & 7;
4922 if (op >= 2 && b == 0xfe) {
4926 if (op == 2 || op == 4) {
4927 /* operand size for jumps is 64 bit */
4929 } else if (op == 3 || op == 5) {
4930 ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
4931 } else if (op == 6) {
4932 /* default push size is 64 bit */
4933 ot = mo_pushpop(s, dflag);
4937 gen_lea_modrm(env, s, modrm);
4938 if (op >= 2 && op != 3 && op != 5)
4939 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
4941 gen_op_mov_v_reg(ot, cpu_T0, rm);
4945 case 0: /* inc Ev */
4950 gen_inc(s, ot, opreg, 1);
4952 case 1: /* dec Ev */
4957 gen_inc(s, ot, opreg, -1);
4959 case 2: /* call Ev */
4960 /* XXX: optimize if memory (no 'and' is necessary) */
4961 if (dflag == MO_16) {
4962 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
4964 next_eip = s->pc - s->cs_base;
4965 tcg_gen_movi_tl(cpu_T1, next_eip);
4966 gen_push_v(s, cpu_T1);
4967 gen_op_jmp_v(cpu_T0);
4971 case 3: /* lcall Ev */
4972 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
4973 gen_add_A0_im(s, 1 << ot);
4974 gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
4976 if (s->pe && !s->vm86) {
4977 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4978 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
4979 tcg_const_i32(dflag - 1),
4980 tcg_const_tl(s->pc - s->cs_base));
4982 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4983 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T1,
4984 tcg_const_i32(dflag - 1),
4985 tcg_const_i32(s->pc - s->cs_base));
4989 case 4: /* jmp Ev */
4990 if (dflag == MO_16) {
4991 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
4993 gen_op_jmp_v(cpu_T0);
4997 case 5: /* ljmp Ev */
4998 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
4999 gen_add_A0_im(s, 1 << ot);
5000 gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
5002 if (s->pe && !s->vm86) {
5003 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
5004 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
5005 tcg_const_tl(s->pc - s->cs_base));
5007 gen_op_movl_seg_T0_vm(R_CS);
5008 gen_op_jmp_v(cpu_T1);
5012 case 6: /* push Ev */
5013 gen_push_v(s, cpu_T0);
5020 case 0x84: /* test Ev, Gv */
5022 ot = mo_b_d(b, dflag);
5024 modrm = cpu_ldub_code(env, s->pc++);
5025 reg = ((modrm >> 3) & 7) | rex_r;
5027 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5028 gen_op_mov_v_reg(ot, cpu_T1, reg);
5029 gen_op_testl_T0_T1_cc();
5030 set_cc_op(s, CC_OP_LOGICB + ot);
5033 case 0xa8: /* test eAX, Iv */
5035 ot = mo_b_d(b, dflag);
5036 val = insn_get(env, s, ot);
5038 gen_op_mov_v_reg(ot, cpu_T0, OR_EAX);
5039 tcg_gen_movi_tl(cpu_T1, val);
5040 gen_op_testl_T0_T1_cc();
5041 set_cc_op(s, CC_OP_LOGICB + ot);
5044 case 0x98: /* CWDE/CBW */
5046 #ifdef TARGET_X86_64
5048 gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
5049 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
5050 gen_op_mov_reg_v(MO_64, R_EAX, cpu_T0);
5054 gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
5055 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
5056 gen_op_mov_reg_v(MO_32, R_EAX, cpu_T0);
5059 gen_op_mov_v_reg(MO_8, cpu_T0, R_EAX);
5060 tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
5061 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
5067 case 0x99: /* CDQ/CWD */
5069 #ifdef TARGET_X86_64
5071 gen_op_mov_v_reg(MO_64, cpu_T0, R_EAX);
5072 tcg_gen_sari_tl(cpu_T0, cpu_T0, 63);
5073 gen_op_mov_reg_v(MO_64, R_EDX, cpu_T0);
5077 gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
5078 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
5079 tcg_gen_sari_tl(cpu_T0, cpu_T0, 31);
5080 gen_op_mov_reg_v(MO_32, R_EDX, cpu_T0);
5083 gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
5084 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
5085 tcg_gen_sari_tl(cpu_T0, cpu_T0, 15);
5086 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
5092 case 0x1af: /* imul Gv, Ev */
5093 case 0x69: /* imul Gv, Ev, I */
5096 modrm = cpu_ldub_code(env, s->pc++);
5097 reg = ((modrm >> 3) & 7) | rex_r;
5099 s->rip_offset = insn_const_size(ot);
5102 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5104 val = insn_get(env, s, ot);
5105 tcg_gen_movi_tl(cpu_T1, val);
5106 } else if (b == 0x6b) {
5107 val = (int8_t)insn_get(env, s, MO_8);
5108 tcg_gen_movi_tl(cpu_T1, val);
5110 gen_op_mov_v_reg(ot, cpu_T1, reg);
5113 #ifdef TARGET_X86_64
5115 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, cpu_T0, cpu_T1);
5116 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5117 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
5118 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T1);
5122 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
5123 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
5124 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5125 cpu_tmp2_i32, cpu_tmp3_i32);
5126 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
5127 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5128 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5129 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5130 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5133 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
5134 tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
5135 /* XXX: use 32 bit mul which could be faster */
5136 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
5137 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
5138 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
5139 tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
5140 gen_op_mov_reg_v(ot, reg, cpu_T0);
5143 set_cc_op(s, CC_OP_MULB + ot);
5146 case 0x1c1: /* xadd Ev, Gv */
5147 ot = mo_b_d(b, dflag);
5148 modrm = cpu_ldub_code(env, s->pc++);
5149 reg = ((modrm >> 3) & 7) | rex_r;
5150 mod = (modrm >> 6) & 3;
5151 gen_op_mov_v_reg(ot, cpu_T0, reg);
5153 rm = (modrm & 7) | REX_B(s);
5154 gen_op_mov_v_reg(ot, cpu_T1, rm);
5155 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
5156 gen_op_mov_reg_v(ot, reg, cpu_T1);
5157 gen_op_mov_reg_v(ot, rm, cpu_T0);
5159 gen_lea_modrm(env, s, modrm);
5160 if (s->prefix & PREFIX_LOCK) {
5161 tcg_gen_atomic_fetch_add_tl(cpu_T1, cpu_A0, cpu_T0,
5162 s->mem_index, ot | MO_LE);
5163 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
5165 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
5166 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
5167 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
5169 gen_op_mov_reg_v(ot, reg, cpu_T1);
5171 gen_op_update2_cc();
5172 set_cc_op(s, CC_OP_ADDB + ot);
5175 case 0x1b1: /* cmpxchg Ev, Gv */
5177 TCGv oldv, newv, cmpv;
5179 ot = mo_b_d(b, dflag);
5180 modrm = cpu_ldub_code(env, s->pc++);
5181 reg = ((modrm >> 3) & 7) | rex_r;
5182 mod = (modrm >> 6) & 3;
5183 oldv = tcg_temp_new();
5184 newv = tcg_temp_new();
5185 cmpv = tcg_temp_new();
5186 gen_op_mov_v_reg(ot, newv, reg);
5187 tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
5189 if (s->prefix & PREFIX_LOCK) {
5193 gen_lea_modrm(env, s, modrm);
5194 tcg_gen_atomic_cmpxchg_tl(oldv, cpu_A0, cmpv, newv,
5195 s->mem_index, ot | MO_LE);
5196 gen_op_mov_reg_v(ot, R_EAX, oldv);
5199 rm = (modrm & 7) | REX_B(s);
5200 gen_op_mov_v_reg(ot, oldv, rm);
5202 gen_lea_modrm(env, s, modrm);
5203 gen_op_ld_v(s, ot, oldv, cpu_A0);
5204 rm = 0; /* avoid warning */
5208 /* store value = (old == cmp ? new : old); */
5209 tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
5211 gen_op_mov_reg_v(ot, R_EAX, oldv);
5212 gen_op_mov_reg_v(ot, rm, newv);
5214 /* Perform an unconditional store cycle like physical cpu;
5215 must be before changing accumulator to ensure
5216 idempotency if the store faults and the instruction
5218 gen_op_st_v(s, ot, newv, cpu_A0);
5219 gen_op_mov_reg_v(ot, R_EAX, oldv);
5222 tcg_gen_mov_tl(cpu_cc_src, oldv);
5223 tcg_gen_mov_tl(cpu_cc_srcT, cmpv);
5224 tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
5225 set_cc_op(s, CC_OP_SUBB + ot);
5226 tcg_temp_free(oldv);
5227 tcg_temp_free(newv);
5228 tcg_temp_free(cmpv);
5231 case 0x1c7: /* cmpxchg8b */
5232 modrm = cpu_ldub_code(env, s->pc++);
5233 mod = (modrm >> 6) & 3;
5234 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5236 #ifdef TARGET_X86_64
5237 if (dflag == MO_64) {
5238 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5240 gen_lea_modrm(env, s, modrm);
5241 if ((s->prefix & PREFIX_LOCK) && parallel_cpus) {
5242 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
5244 gen_helper_cmpxchg16b_unlocked(cpu_env, cpu_A0);
5249 if (!(s->cpuid_features & CPUID_CX8))
5251 gen_lea_modrm(env, s, modrm);
5252 if ((s->prefix & PREFIX_LOCK) && parallel_cpus) {
5253 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
5255 gen_helper_cmpxchg8b_unlocked(cpu_env, cpu_A0);
5258 set_cc_op(s, CC_OP_EFLAGS);
5261 /**************************/
5263 case 0x50 ... 0x57: /* push */
5264 gen_op_mov_v_reg(MO_32, cpu_T0, (b & 7) | REX_B(s));
5265 gen_push_v(s, cpu_T0);
5267 case 0x58 ... 0x5f: /* pop */
5269 /* NOTE: order is important for pop %sp */
5270 gen_pop_update(s, ot);
5271 gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T0);
5273 case 0x60: /* pusha */
5278 case 0x61: /* popa */
5283 case 0x68: /* push Iv */
5285 ot = mo_pushpop(s, dflag);
5287 val = insn_get(env, s, ot);
5289 val = (int8_t)insn_get(env, s, MO_8);
5290 tcg_gen_movi_tl(cpu_T0, val);
5291 gen_push_v(s, cpu_T0);
5293 case 0x8f: /* pop Ev */
5294 modrm = cpu_ldub_code(env, s->pc++);
5295 mod = (modrm >> 6) & 3;
5298 /* NOTE: order is important for pop %sp */
5299 gen_pop_update(s, ot);
5300 rm = (modrm & 7) | REX_B(s);
5301 gen_op_mov_reg_v(ot, rm, cpu_T0);
5303 /* NOTE: order is important too for MMU exceptions */
5304 s->popl_esp_hack = 1 << ot;
5305 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5306 s->popl_esp_hack = 0;
5307 gen_pop_update(s, ot);
5310 case 0xc8: /* enter */
5313 val = cpu_lduw_code(env, s->pc);
5315 level = cpu_ldub_code(env, s->pc++);
5316 gen_enter(s, val, level);
5319 case 0xc9: /* leave */
5322 case 0x06: /* push es */
5323 case 0x0e: /* push cs */
5324 case 0x16: /* push ss */
5325 case 0x1e: /* push ds */
5328 gen_op_movl_T0_seg(b >> 3);
5329 gen_push_v(s, cpu_T0);
5331 case 0x1a0: /* push fs */
5332 case 0x1a8: /* push gs */
5333 gen_op_movl_T0_seg((b >> 3) & 7);
5334 gen_push_v(s, cpu_T0);
5336 case 0x07: /* pop es */
5337 case 0x17: /* pop ss */
5338 case 0x1f: /* pop ds */
5343 gen_movl_seg_T0(s, reg);
5344 gen_pop_update(s, ot);
5345 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5347 gen_jmp_im(s->pc - s->cs_base);
5350 gen_eob_inhibit_irq(s, true);
5356 case 0x1a1: /* pop fs */
5357 case 0x1a9: /* pop gs */
5359 gen_movl_seg_T0(s, (b >> 3) & 7);
5360 gen_pop_update(s, ot);
5362 gen_jmp_im(s->pc - s->cs_base);
5367 /**************************/
5370 case 0x89: /* mov Gv, Ev */
5371 ot = mo_b_d(b, dflag);
5372 modrm = cpu_ldub_code(env, s->pc++);
5373 reg = ((modrm >> 3) & 7) | rex_r;
5375 /* generate a generic store */
5376 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
5379 case 0xc7: /* mov Ev, Iv */
5380 ot = mo_b_d(b, dflag);
5381 modrm = cpu_ldub_code(env, s->pc++);
5382 mod = (modrm >> 6) & 3;
5384 s->rip_offset = insn_const_size(ot);
5385 gen_lea_modrm(env, s, modrm);
5387 val = insn_get(env, s, ot);
5388 tcg_gen_movi_tl(cpu_T0, val);
5390 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
5392 gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T0);
5396 case 0x8b: /* mov Ev, Gv */
5397 ot = mo_b_d(b, dflag);
5398 modrm = cpu_ldub_code(env, s->pc++);
5399 reg = ((modrm >> 3) & 7) | rex_r;
5401 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5402 gen_op_mov_reg_v(ot, reg, cpu_T0);
5404 case 0x8e: /* mov seg, Gv */
5405 modrm = cpu_ldub_code(env, s->pc++);
5406 reg = (modrm >> 3) & 7;
5407 if (reg >= 6 || reg == R_CS)
5409 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5410 gen_movl_seg_T0(s, reg);
5411 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5413 gen_jmp_im(s->pc - s->cs_base);
5416 gen_eob_inhibit_irq(s, true);
5422 case 0x8c: /* mov Gv, seg */
5423 modrm = cpu_ldub_code(env, s->pc++);
5424 reg = (modrm >> 3) & 7;
5425 mod = (modrm >> 6) & 3;
5428 gen_op_movl_T0_seg(reg);
5429 ot = mod == 3 ? dflag : MO_16;
5430 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5433 case 0x1b6: /* movzbS Gv, Eb */
5434 case 0x1b7: /* movzwS Gv, Eb */
5435 case 0x1be: /* movsbS Gv, Eb */
5436 case 0x1bf: /* movswS Gv, Eb */
5441 /* d_ot is the size of destination */
5443 /* ot is the size of source */
5444 ot = (b & 1) + MO_8;
5445 /* s_ot is the sign+size of source */
5446 s_ot = b & 8 ? MO_SIGN | ot : ot;
5448 modrm = cpu_ldub_code(env, s->pc++);
5449 reg = ((modrm >> 3) & 7) | rex_r;
5450 mod = (modrm >> 6) & 3;
5451 rm = (modrm & 7) | REX_B(s);
5454 if (s_ot == MO_SB && byte_reg_is_xH(rm)) {
5455 tcg_gen_sextract_tl(cpu_T0, cpu_regs[rm - 4], 8, 8);
5457 gen_op_mov_v_reg(ot, cpu_T0, rm);
5460 tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
5463 tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
5466 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
5470 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
5474 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
5476 gen_lea_modrm(env, s, modrm);
5477 gen_op_ld_v(s, s_ot, cpu_T0, cpu_A0);
5478 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
5483 case 0x8d: /* lea */
5484 modrm = cpu_ldub_code(env, s->pc++);
5485 mod = (modrm >> 6) & 3;
5488 reg = ((modrm >> 3) & 7) | rex_r;
5490 AddressParts a = gen_lea_modrm_0(env, s, modrm);
5491 TCGv ea = gen_lea_modrm_1(a);
5492 gen_lea_v_seg(s, s->aflag, ea, -1, -1);
5493 gen_op_mov_reg_v(dflag, reg, cpu_A0);
5497 case 0xa0: /* mov EAX, Ov */
5499 case 0xa2: /* mov Ov, EAX */
5502 target_ulong offset_addr;
5504 ot = mo_b_d(b, dflag);
5506 #ifdef TARGET_X86_64
5508 offset_addr = cpu_ldq_code(env, s->pc);
5513 offset_addr = insn_get(env, s, s->aflag);
5516 tcg_gen_movi_tl(cpu_A0, offset_addr);
5517 gen_add_A0_ds_seg(s);
5519 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
5520 gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
5522 gen_op_mov_v_reg(ot, cpu_T0, R_EAX);
5523 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
5527 case 0xd7: /* xlat */
5528 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
5529 tcg_gen_ext8u_tl(cpu_T0, cpu_regs[R_EAX]);
5530 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T0);
5531 gen_extu(s->aflag, cpu_A0);
5532 gen_add_A0_ds_seg(s);
5533 gen_op_ld_v(s, MO_8, cpu_T0, cpu_A0);
5534 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
5536 case 0xb0 ... 0xb7: /* mov R, Ib */
5537 val = insn_get(env, s, MO_8);
5538 tcg_gen_movi_tl(cpu_T0, val);
5539 gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T0);
5541 case 0xb8 ... 0xbf: /* mov R, Iv */
5542 #ifdef TARGET_X86_64
5543 if (dflag == MO_64) {
5546 tmp = cpu_ldq_code(env, s->pc);
5548 reg = (b & 7) | REX_B(s);
5549 tcg_gen_movi_tl(cpu_T0, tmp);
5550 gen_op_mov_reg_v(MO_64, reg, cpu_T0);
5555 val = insn_get(env, s, ot);
5556 reg = (b & 7) | REX_B(s);
5557 tcg_gen_movi_tl(cpu_T0, val);
5558 gen_op_mov_reg_v(ot, reg, cpu_T0);
5562 case 0x91 ... 0x97: /* xchg R, EAX */
5565 reg = (b & 7) | REX_B(s);
5569 case 0x87: /* xchg Ev, Gv */
5570 ot = mo_b_d(b, dflag);
5571 modrm = cpu_ldub_code(env, s->pc++);
5572 reg = ((modrm >> 3) & 7) | rex_r;
5573 mod = (modrm >> 6) & 3;
5575 rm = (modrm & 7) | REX_B(s);
5577 gen_op_mov_v_reg(ot, cpu_T0, reg);
5578 gen_op_mov_v_reg(ot, cpu_T1, rm);
5579 gen_op_mov_reg_v(ot, rm, cpu_T0);
5580 gen_op_mov_reg_v(ot, reg, cpu_T1);
5582 gen_lea_modrm(env, s, modrm);
5583 gen_op_mov_v_reg(ot, cpu_T0, reg);
5584 /* for xchg, lock is implicit */
5585 tcg_gen_atomic_xchg_tl(cpu_T1, cpu_A0, cpu_T0,
5586 s->mem_index, ot | MO_LE);
5587 gen_op_mov_reg_v(ot, reg, cpu_T1);
5590 case 0xc4: /* les Gv */
5591 /* In CODE64 this is VEX3; see above. */
5594 case 0xc5: /* lds Gv */
5595 /* In CODE64 this is VEX2; see above. */
5598 case 0x1b2: /* lss Gv */
5601 case 0x1b4: /* lfs Gv */
5604 case 0x1b5: /* lgs Gv */
5607 ot = dflag != MO_16 ? MO_32 : MO_16;
5608 modrm = cpu_ldub_code(env, s->pc++);
5609 reg = ((modrm >> 3) & 7) | rex_r;
5610 mod = (modrm >> 6) & 3;
5613 gen_lea_modrm(env, s, modrm);
5614 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
5615 gen_add_A0_im(s, 1 << ot);
5616 /* load the segment first to handle exceptions properly */
5617 gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
5618 gen_movl_seg_T0(s, op);
5619 /* then put the data */
5620 gen_op_mov_reg_v(ot, reg, cpu_T1);
5622 gen_jmp_im(s->pc - s->cs_base);
5627 /************************/
5635 ot = mo_b_d(b, dflag);
5636 modrm = cpu_ldub_code(env, s->pc++);
5637 mod = (modrm >> 6) & 3;
5638 op = (modrm >> 3) & 7;
5644 gen_lea_modrm(env, s, modrm);
5647 opreg = (modrm & 7) | REX_B(s);
5652 gen_shift(s, op, ot, opreg, OR_ECX);
5655 shift = cpu_ldub_code(env, s->pc++);
5657 gen_shifti(s, op, ot, opreg, shift);
5672 case 0x1a4: /* shld imm */
5676 case 0x1a5: /* shld cl */
5680 case 0x1ac: /* shrd imm */
5684 case 0x1ad: /* shrd cl */
5689 modrm = cpu_ldub_code(env, s->pc++);
5690 mod = (modrm >> 6) & 3;
5691 rm = (modrm & 7) | REX_B(s);
5692 reg = ((modrm >> 3) & 7) | rex_r;
5694 gen_lea_modrm(env, s, modrm);
5699 gen_op_mov_v_reg(ot, cpu_T1, reg);
5702 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5703 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5706 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
5710 /************************/
5713 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5714 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5715 /* XXX: what to do if illegal op ? */
5716 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5719 modrm = cpu_ldub_code(env, s->pc++);
5720 mod = (modrm >> 6) & 3;
5722 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5725 gen_lea_modrm(env, s, modrm);
5727 case 0x00 ... 0x07: /* fxxxs */
5728 case 0x10 ... 0x17: /* fixxxl */
5729 case 0x20 ... 0x27: /* fxxxl */
5730 case 0x30 ... 0x37: /* fixxx */
5737 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5738 s->mem_index, MO_LEUL);
5739 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
5742 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5743 s->mem_index, MO_LEUL);
5744 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5747 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5748 s->mem_index, MO_LEQ);
5749 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
5753 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5754 s->mem_index, MO_LESW);
5755 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5759 gen_helper_fp_arith_ST0_FT0(op1);
5761 /* fcomp needs pop */
5762 gen_helper_fpop(cpu_env);
5766 case 0x08: /* flds */
5767 case 0x0a: /* fsts */
5768 case 0x0b: /* fstps */
5769 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5770 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5771 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5776 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5777 s->mem_index, MO_LEUL);
5778 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
5781 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5782 s->mem_index, MO_LEUL);
5783 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5786 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5787 s->mem_index, MO_LEQ);
5788 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
5792 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5793 s->mem_index, MO_LESW);
5794 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5799 /* XXX: the corresponding CPUID bit must be tested ! */
5802 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
5803 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5804 s->mem_index, MO_LEUL);
5807 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
5808 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5809 s->mem_index, MO_LEQ);
5813 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
5814 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5815 s->mem_index, MO_LEUW);
5818 gen_helper_fpop(cpu_env);
5823 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
5824 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5825 s->mem_index, MO_LEUL);
5828 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
5829 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5830 s->mem_index, MO_LEUL);
5833 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
5834 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5835 s->mem_index, MO_LEQ);
5839 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
5840 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5841 s->mem_index, MO_LEUW);
5845 gen_helper_fpop(cpu_env);
5849 case 0x0c: /* fldenv mem */
5850 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5852 case 0x0d: /* fldcw mem */
5853 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5854 s->mem_index, MO_LEUW);
5855 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
5857 case 0x0e: /* fnstenv mem */
5858 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5860 case 0x0f: /* fnstcw mem */
5861 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
5862 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5863 s->mem_index, MO_LEUW);
5865 case 0x1d: /* fldt mem */
5866 gen_helper_fldt_ST0(cpu_env, cpu_A0);
5868 case 0x1f: /* fstpt mem */
5869 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5870 gen_helper_fpop(cpu_env);
5872 case 0x2c: /* frstor mem */
5873 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5875 case 0x2e: /* fnsave mem */
5876 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5878 case 0x2f: /* fnstsw mem */
5879 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
5880 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5881 s->mem_index, MO_LEUW);
5883 case 0x3c: /* fbld */
5884 gen_helper_fbld_ST0(cpu_env, cpu_A0);
5886 case 0x3e: /* fbstp */
5887 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5888 gen_helper_fpop(cpu_env);
5890 case 0x3d: /* fildll */
5891 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5892 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
5894 case 0x3f: /* fistpll */
5895 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
5896 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5897 gen_helper_fpop(cpu_env);
5903 /* register float ops */
5907 case 0x08: /* fld sti */
5908 gen_helper_fpush(cpu_env);
5909 gen_helper_fmov_ST0_STN(cpu_env,
5910 tcg_const_i32((opreg + 1) & 7));
5912 case 0x09: /* fxchg sti */
5913 case 0x29: /* fxchg4 sti, undocumented op */
5914 case 0x39: /* fxchg7 sti, undocumented op */
5915 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
5917 case 0x0a: /* grp d9/2 */
5920 /* check exceptions (FreeBSD FPU probe) */
5921 gen_helper_fwait(cpu_env);
5927 case 0x0c: /* grp d9/4 */
5930 gen_helper_fchs_ST0(cpu_env);
5933 gen_helper_fabs_ST0(cpu_env);
5936 gen_helper_fldz_FT0(cpu_env);
5937 gen_helper_fcom_ST0_FT0(cpu_env);
5940 gen_helper_fxam_ST0(cpu_env);
5946 case 0x0d: /* grp d9/5 */
5950 gen_helper_fpush(cpu_env);
5951 gen_helper_fld1_ST0(cpu_env);
5954 gen_helper_fpush(cpu_env);
5955 gen_helper_fldl2t_ST0(cpu_env);
5958 gen_helper_fpush(cpu_env);
5959 gen_helper_fldl2e_ST0(cpu_env);
5962 gen_helper_fpush(cpu_env);
5963 gen_helper_fldpi_ST0(cpu_env);
5966 gen_helper_fpush(cpu_env);
5967 gen_helper_fldlg2_ST0(cpu_env);
5970 gen_helper_fpush(cpu_env);
5971 gen_helper_fldln2_ST0(cpu_env);
5974 gen_helper_fpush(cpu_env);
5975 gen_helper_fldz_ST0(cpu_env);
5982 case 0x0e: /* grp d9/6 */
5985 gen_helper_f2xm1(cpu_env);
5988 gen_helper_fyl2x(cpu_env);
5991 gen_helper_fptan(cpu_env);
5993 case 3: /* fpatan */
5994 gen_helper_fpatan(cpu_env);
5996 case 4: /* fxtract */
5997 gen_helper_fxtract(cpu_env);
5999 case 5: /* fprem1 */
6000 gen_helper_fprem1(cpu_env);
6002 case 6: /* fdecstp */
6003 gen_helper_fdecstp(cpu_env);
6006 case 7: /* fincstp */
6007 gen_helper_fincstp(cpu_env);
6011 case 0x0f: /* grp d9/7 */
6014 gen_helper_fprem(cpu_env);
6016 case 1: /* fyl2xp1 */
6017 gen_helper_fyl2xp1(cpu_env);
6020 gen_helper_fsqrt(cpu_env);
6022 case 3: /* fsincos */
6023 gen_helper_fsincos(cpu_env);
6025 case 5: /* fscale */
6026 gen_helper_fscale(cpu_env);
6028 case 4: /* frndint */
6029 gen_helper_frndint(cpu_env);
6032 gen_helper_fsin(cpu_env);
6036 gen_helper_fcos(cpu_env);
6040 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6041 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6042 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6048 gen_helper_fp_arith_STN_ST0(op1, opreg);
6050 gen_helper_fpop(cpu_env);
6052 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6053 gen_helper_fp_arith_ST0_FT0(op1);
6057 case 0x02: /* fcom */
6058 case 0x22: /* fcom2, undocumented op */
6059 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6060 gen_helper_fcom_ST0_FT0(cpu_env);
6062 case 0x03: /* fcomp */
6063 case 0x23: /* fcomp3, undocumented op */
6064 case 0x32: /* fcomp5, undocumented op */
6065 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6066 gen_helper_fcom_ST0_FT0(cpu_env);
6067 gen_helper_fpop(cpu_env);
6069 case 0x15: /* da/5 */
6071 case 1: /* fucompp */
6072 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6073 gen_helper_fucom_ST0_FT0(cpu_env);
6074 gen_helper_fpop(cpu_env);
6075 gen_helper_fpop(cpu_env);
6083 case 0: /* feni (287 only, just do nop here) */
6085 case 1: /* fdisi (287 only, just do nop here) */
6088 gen_helper_fclex(cpu_env);
6090 case 3: /* fninit */
6091 gen_helper_fninit(cpu_env);
6093 case 4: /* fsetpm (287 only, just do nop here) */
6099 case 0x1d: /* fucomi */
6100 if (!(s->cpuid_features & CPUID_CMOV)) {
6103 gen_update_cc_op(s);
6104 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6105 gen_helper_fucomi_ST0_FT0(cpu_env);
6106 set_cc_op(s, CC_OP_EFLAGS);
6108 case 0x1e: /* fcomi */
6109 if (!(s->cpuid_features & CPUID_CMOV)) {
6112 gen_update_cc_op(s);
6113 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6114 gen_helper_fcomi_ST0_FT0(cpu_env);
6115 set_cc_op(s, CC_OP_EFLAGS);
6117 case 0x28: /* ffree sti */
6118 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6120 case 0x2a: /* fst sti */
6121 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6123 case 0x2b: /* fstp sti */
6124 case 0x0b: /* fstp1 sti, undocumented op */
6125 case 0x3a: /* fstp8 sti, undocumented op */
6126 case 0x3b: /* fstp9 sti, undocumented op */
6127 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6128 gen_helper_fpop(cpu_env);
6130 case 0x2c: /* fucom st(i) */
6131 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6132 gen_helper_fucom_ST0_FT0(cpu_env);
6134 case 0x2d: /* fucomp st(i) */
6135 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6136 gen_helper_fucom_ST0_FT0(cpu_env);
6137 gen_helper_fpop(cpu_env);
6139 case 0x33: /* de/3 */
6141 case 1: /* fcompp */
6142 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6143 gen_helper_fcom_ST0_FT0(cpu_env);
6144 gen_helper_fpop(cpu_env);
6145 gen_helper_fpop(cpu_env);
6151 case 0x38: /* ffreep sti, undocumented op */
6152 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6153 gen_helper_fpop(cpu_env);
6155 case 0x3c: /* df/4 */
6158 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
6159 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
6160 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
6166 case 0x3d: /* fucomip */
6167 if (!(s->cpuid_features & CPUID_CMOV)) {
6170 gen_update_cc_op(s);
6171 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6172 gen_helper_fucomi_ST0_FT0(cpu_env);
6173 gen_helper_fpop(cpu_env);
6174 set_cc_op(s, CC_OP_EFLAGS);
6176 case 0x3e: /* fcomip */
6177 if (!(s->cpuid_features & CPUID_CMOV)) {
6180 gen_update_cc_op(s);
6181 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6182 gen_helper_fcomi_ST0_FT0(cpu_env);
6183 gen_helper_fpop(cpu_env);
6184 set_cc_op(s, CC_OP_EFLAGS);
6186 case 0x10 ... 0x13: /* fcmovxx */
6191 static const uint8_t fcmov_cc[8] = {
6198 if (!(s->cpuid_features & CPUID_CMOV)) {
6201 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6202 l1 = gen_new_label();
6203 gen_jcc1_noeob(s, op1, l1);
6204 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
6213 /************************/
6216 case 0xa4: /* movsS */
6218 ot = mo_b_d(b, dflag);
6219 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6220 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6226 case 0xaa: /* stosS */
6228 ot = mo_b_d(b, dflag);
6229 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6230 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6235 case 0xac: /* lodsS */
6237 ot = mo_b_d(b, dflag);
6238 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6239 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6244 case 0xae: /* scasS */
6246 ot = mo_b_d(b, dflag);
6247 if (prefixes & PREFIX_REPNZ) {
6248 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6249 } else if (prefixes & PREFIX_REPZ) {
6250 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6256 case 0xa6: /* cmpsS */
6258 ot = mo_b_d(b, dflag);
6259 if (prefixes & PREFIX_REPNZ) {
6260 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6261 } else if (prefixes & PREFIX_REPZ) {
6262 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6267 case 0x6c: /* insS */
6269 ot = mo_b_d32(b, dflag);
6270 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
6271 gen_check_io(s, ot, pc_start - s->cs_base,
6272 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6273 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6274 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6277 if (s->tb->cflags & CF_USE_ICOUNT) {
6278 gen_jmp(s, s->pc - s->cs_base);
6282 case 0x6e: /* outsS */
6284 ot = mo_b_d32(b, dflag);
6285 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
6286 gen_check_io(s, ot, pc_start - s->cs_base,
6287 svm_is_rep(prefixes) | 4);
6288 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6289 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6292 if (s->tb->cflags & CF_USE_ICOUNT) {
6293 gen_jmp(s, s->pc - s->cs_base);
6298 /************************/
6303 ot = mo_b_d32(b, dflag);
6304 val = cpu_ldub_code(env, s->pc++);
6305 tcg_gen_movi_tl(cpu_T0, val);
6306 gen_check_io(s, ot, pc_start - s->cs_base,
6307 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6308 if (s->tb->cflags & CF_USE_ICOUNT) {
6311 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6312 gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
6313 gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
6314 gen_bpt_io(s, cpu_tmp2_i32, ot);
6315 if (s->tb->cflags & CF_USE_ICOUNT) {
6317 gen_jmp(s, s->pc - s->cs_base);
6322 ot = mo_b_d32(b, dflag);
6323 val = cpu_ldub_code(env, s->pc++);
6324 tcg_gen_movi_tl(cpu_T0, val);
6325 gen_check_io(s, ot, pc_start - s->cs_base,
6326 svm_is_rep(prefixes));
6327 gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
6329 if (s->tb->cflags & CF_USE_ICOUNT) {
6332 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6333 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
6334 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6335 gen_bpt_io(s, cpu_tmp2_i32, ot);
6336 if (s->tb->cflags & CF_USE_ICOUNT) {
6338 gen_jmp(s, s->pc - s->cs_base);
6343 ot = mo_b_d32(b, dflag);
6344 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
6345 gen_check_io(s, ot, pc_start - s->cs_base,
6346 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6347 if (s->tb->cflags & CF_USE_ICOUNT) {
6350 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
6351 gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
6352 gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
6353 gen_bpt_io(s, cpu_tmp2_i32, ot);
6354 if (s->tb->cflags & CF_USE_ICOUNT) {
6356 gen_jmp(s, s->pc - s->cs_base);
6361 ot = mo_b_d32(b, dflag);
6362 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
6363 gen_check_io(s, ot, pc_start - s->cs_base,
6364 svm_is_rep(prefixes));
6365 gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
6367 if (s->tb->cflags & CF_USE_ICOUNT) {
6370 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
6371 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
6372 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6373 gen_bpt_io(s, cpu_tmp2_i32, ot);
6374 if (s->tb->cflags & CF_USE_ICOUNT) {
6376 gen_jmp(s, s->pc - s->cs_base);
6380 /************************/
6382 case 0xc2: /* ret im */
6383 val = cpu_ldsw_code(env, s->pc);
6386 gen_stack_update(s, val + (1 << ot));
6387 /* Note that gen_pop_T0 uses a zero-extending load. */
6388 gen_op_jmp_v(cpu_T0);
6392 case 0xc3: /* ret */
6394 gen_pop_update(s, ot);
6395 /* Note that gen_pop_T0 uses a zero-extending load. */
6396 gen_op_jmp_v(cpu_T0);
6400 case 0xca: /* lret im */
6401 val = cpu_ldsw_code(env, s->pc);
6404 if (s->pe && !s->vm86) {
6405 gen_update_cc_op(s);
6406 gen_jmp_im(pc_start - s->cs_base);
6407 gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
6408 tcg_const_i32(val));
6412 gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
6413 /* NOTE: keeping EIP updated is not a problem in case of
6415 gen_op_jmp_v(cpu_T0);
6417 gen_add_A0_im(s, 1 << dflag);
6418 gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
6419 gen_op_movl_seg_T0_vm(R_CS);
6420 /* add stack offset */
6421 gen_stack_update(s, val + (2 << dflag));
6425 case 0xcb: /* lret */
6428 case 0xcf: /* iret */
6429 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6432 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6433 set_cc_op(s, CC_OP_EFLAGS);
6434 } else if (s->vm86) {
6436 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6438 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6439 set_cc_op(s, CC_OP_EFLAGS);
6442 gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
6443 tcg_const_i32(s->pc - s->cs_base));
6444 set_cc_op(s, CC_OP_EFLAGS);
6448 case 0xe8: /* call im */
6450 if (dflag != MO_16) {
6451 tval = (int32_t)insn_get(env, s, MO_32);
6453 tval = (int16_t)insn_get(env, s, MO_16);
6455 next_eip = s->pc - s->cs_base;
6457 if (dflag == MO_16) {
6459 } else if (!CODE64(s)) {
6462 tcg_gen_movi_tl(cpu_T0, next_eip);
6463 gen_push_v(s, cpu_T0);
6468 case 0x9a: /* lcall im */
6470 unsigned int selector, offset;
6475 offset = insn_get(env, s, ot);
6476 selector = insn_get(env, s, MO_16);
6478 tcg_gen_movi_tl(cpu_T0, selector);
6479 tcg_gen_movi_tl(cpu_T1, offset);
6482 case 0xe9: /* jmp im */
6483 if (dflag != MO_16) {
6484 tval = (int32_t)insn_get(env, s, MO_32);
6486 tval = (int16_t)insn_get(env, s, MO_16);
6488 tval += s->pc - s->cs_base;
6489 if (dflag == MO_16) {
6491 } else if (!CODE64(s)) {
6497 case 0xea: /* ljmp im */
6499 unsigned int selector, offset;
6504 offset = insn_get(env, s, ot);
6505 selector = insn_get(env, s, MO_16);
6507 tcg_gen_movi_tl(cpu_T0, selector);
6508 tcg_gen_movi_tl(cpu_T1, offset);
6511 case 0xeb: /* jmp Jb */
6512 tval = (int8_t)insn_get(env, s, MO_8);
6513 tval += s->pc - s->cs_base;
6514 if (dflag == MO_16) {
6519 case 0x70 ... 0x7f: /* jcc Jb */
6520 tval = (int8_t)insn_get(env, s, MO_8);
6522 case 0x180 ... 0x18f: /* jcc Jv */
6523 if (dflag != MO_16) {
6524 tval = (int32_t)insn_get(env, s, MO_32);
6526 tval = (int16_t)insn_get(env, s, MO_16);
6529 next_eip = s->pc - s->cs_base;
6531 if (dflag == MO_16) {
6535 gen_jcc(s, b, tval, next_eip);
6538 case 0x190 ... 0x19f: /* setcc Gv */
6539 modrm = cpu_ldub_code(env, s->pc++);
6540 gen_setcc1(s, b, cpu_T0);
6541 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
6543 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6544 if (!(s->cpuid_features & CPUID_CMOV)) {
6548 modrm = cpu_ldub_code(env, s->pc++);
6549 reg = ((modrm >> 3) & 7) | rex_r;
6550 gen_cmovcc1(env, s, ot, b, modrm, reg);
6553 /************************/
6555 case 0x9c: /* pushf */
6556 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6557 if (s->vm86 && s->iopl != 3) {
6558 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6560 gen_update_cc_op(s);
6561 gen_helper_read_eflags(cpu_T0, cpu_env);
6562 gen_push_v(s, cpu_T0);
6565 case 0x9d: /* popf */
6566 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6567 if (s->vm86 && s->iopl != 3) {
6568 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6572 if (dflag != MO_16) {
6573 gen_helper_write_eflags(cpu_env, cpu_T0,
6574 tcg_const_i32((TF_MASK | AC_MASK |
6579 gen_helper_write_eflags(cpu_env, cpu_T0,
6580 tcg_const_i32((TF_MASK | AC_MASK |
6582 IF_MASK | IOPL_MASK)
6586 if (s->cpl <= s->iopl) {
6587 if (dflag != MO_16) {
6588 gen_helper_write_eflags(cpu_env, cpu_T0,
6589 tcg_const_i32((TF_MASK |
6595 gen_helper_write_eflags(cpu_env, cpu_T0,
6596 tcg_const_i32((TF_MASK |
6604 if (dflag != MO_16) {
6605 gen_helper_write_eflags(cpu_env, cpu_T0,
6606 tcg_const_i32((TF_MASK | AC_MASK |
6607 ID_MASK | NT_MASK)));
6609 gen_helper_write_eflags(cpu_env, cpu_T0,
6610 tcg_const_i32((TF_MASK | AC_MASK |
6616 gen_pop_update(s, ot);
6617 set_cc_op(s, CC_OP_EFLAGS);
6618 /* abort translation because TF/AC flag may change */
6619 gen_jmp_im(s->pc - s->cs_base);
6623 case 0x9e: /* sahf */
6624 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6626 gen_op_mov_v_reg(MO_8, cpu_T0, R_AH);
6627 gen_compute_eflags(s);
6628 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6629 tcg_gen_andi_tl(cpu_T0, cpu_T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
6630 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T0);
6632 case 0x9f: /* lahf */
6633 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6635 gen_compute_eflags(s);
6636 /* Note: gen_compute_eflags() only gives the condition codes */
6637 tcg_gen_ori_tl(cpu_T0, cpu_cc_src, 0x02);
6638 gen_op_mov_reg_v(MO_8, R_AH, cpu_T0);
6640 case 0xf5: /* cmc */
6641 gen_compute_eflags(s);
6642 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6644 case 0xf8: /* clc */
6645 gen_compute_eflags(s);
6646 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6648 case 0xf9: /* stc */
6649 gen_compute_eflags(s);
6650 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6652 case 0xfc: /* cld */
6653 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6654 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6656 case 0xfd: /* std */
6657 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6658 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6661 /************************/
6662 /* bit operations */
6663 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6665 modrm = cpu_ldub_code(env, s->pc++);
6666 op = (modrm >> 3) & 7;
6667 mod = (modrm >> 6) & 3;
6668 rm = (modrm & 7) | REX_B(s);
6671 gen_lea_modrm(env, s, modrm);
6672 if (!(s->prefix & PREFIX_LOCK)) {
6673 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
6676 gen_op_mov_v_reg(ot, cpu_T0, rm);
6679 val = cpu_ldub_code(env, s->pc++);
6680 tcg_gen_movi_tl(cpu_T1, val);
6685 case 0x1a3: /* bt Gv, Ev */
6688 case 0x1ab: /* bts */
6691 case 0x1b3: /* btr */
6694 case 0x1bb: /* btc */
6698 modrm = cpu_ldub_code(env, s->pc++);
6699 reg = ((modrm >> 3) & 7) | rex_r;
6700 mod = (modrm >> 6) & 3;
6701 rm = (modrm & 7) | REX_B(s);
6702 gen_op_mov_v_reg(MO_32, cpu_T1, reg);
6704 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6705 /* specific case: we need to add a displacement */
6706 gen_exts(ot, cpu_T1);
6707 tcg_gen_sari_tl(cpu_tmp0, cpu_T1, 3 + ot);
6708 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6709 tcg_gen_add_tl(cpu_A0, gen_lea_modrm_1(a), cpu_tmp0);
6710 gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
6711 if (!(s->prefix & PREFIX_LOCK)) {
6712 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
6715 gen_op_mov_v_reg(ot, cpu_T0, rm);
6718 tcg_gen_andi_tl(cpu_T1, cpu_T1, (1 << (3 + ot)) - 1);
6719 tcg_gen_movi_tl(cpu_tmp0, 1);
6720 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
6721 if (s->prefix & PREFIX_LOCK) {
6724 /* Needs no atomic ops; we surpressed the normal
6725 memory load for LOCK above so do it now. */
6726 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
6729 tcg_gen_atomic_fetch_or_tl(cpu_T0, cpu_A0, cpu_tmp0,
6730 s->mem_index, ot | MO_LE);
6733 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6734 tcg_gen_atomic_fetch_and_tl(cpu_T0, cpu_A0, cpu_tmp0,
6735 s->mem_index, ot | MO_LE);
6739 tcg_gen_atomic_fetch_xor_tl(cpu_T0, cpu_A0, cpu_tmp0,
6740 s->mem_index, ot | MO_LE);
6743 tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
6745 tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
6748 /* Data already loaded; nothing to do. */
6751 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
6754 tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_tmp0);
6758 tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_tmp0);
6763 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
6765 gen_op_mov_reg_v(ot, rm, cpu_T0);
6770 /* Delay all CC updates until after the store above. Note that
6771 C is the result of the test, Z is unchanged, and the others
6772 are all undefined. */
6774 case CC_OP_MULB ... CC_OP_MULQ:
6775 case CC_OP_ADDB ... CC_OP_ADDQ:
6776 case CC_OP_ADCB ... CC_OP_ADCQ:
6777 case CC_OP_SUBB ... CC_OP_SUBQ:
6778 case CC_OP_SBBB ... CC_OP_SBBQ:
6779 case CC_OP_LOGICB ... CC_OP_LOGICQ:
6780 case CC_OP_INCB ... CC_OP_INCQ:
6781 case CC_OP_DECB ... CC_OP_DECQ:
6782 case CC_OP_SHLB ... CC_OP_SHLQ:
6783 case CC_OP_SARB ... CC_OP_SARQ:
6784 case CC_OP_BMILGB ... CC_OP_BMILGQ:
6785 /* Z was going to be computed from the non-zero status of CC_DST.
6786 We can get that same Z value (and the new C value) by leaving
6787 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6789 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6790 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
6793 /* Otherwise, generate EFLAGS and replace the C bit. */
6794 gen_compute_eflags(s);
6795 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4,
6800 case 0x1bc: /* bsf / tzcnt */
6801 case 0x1bd: /* bsr / lzcnt */
6803 modrm = cpu_ldub_code(env, s->pc++);
6804 reg = ((modrm >> 3) & 7) | rex_r;
6805 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6806 gen_extu(ot, cpu_T0);
6808 /* Note that lzcnt and tzcnt are in different extensions. */
6809 if ((prefixes & PREFIX_REPZ)
6811 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
6812 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
6814 /* For lzcnt/tzcnt, C bit is defined related to the input. */
6815 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
6817 /* For lzcnt, reduce the target_ulong result by the
6818 number of zeros that we expect to find at the top. */
6819 tcg_gen_clzi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS);
6820 tcg_gen_subi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - size);
6822 /* For tzcnt, a zero input must return the operand size. */
6823 tcg_gen_ctzi_tl(cpu_T0, cpu_T0, size);
6825 /* For lzcnt/tzcnt, Z bit is defined related to the result. */
6826 gen_op_update1_cc();
6827 set_cc_op(s, CC_OP_BMILGB + ot);
6829 /* For bsr/bsf, only the Z bit is defined and it is related
6830 to the input and not the result. */
6831 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
6832 set_cc_op(s, CC_OP_LOGICB + ot);
6834 /* ??? The manual says that the output is undefined when the
6835 input is zero, but real hardware leaves it unchanged, and
6836 real programs appear to depend on that. Accomplish this
6837 by passing the output as the value to return upon zero. */
6839 /* For bsr, return the bit index of the first 1 bit,
6840 not the count of leading zeros. */
6841 tcg_gen_xori_tl(cpu_T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
6842 tcg_gen_clz_tl(cpu_T0, cpu_T0, cpu_T1);
6843 tcg_gen_xori_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - 1);
6845 tcg_gen_ctz_tl(cpu_T0, cpu_T0, cpu_regs[reg]);
6848 gen_op_mov_reg_v(ot, reg, cpu_T0);
6850 /************************/
6852 case 0x27: /* daa */
6855 gen_update_cc_op(s);
6856 gen_helper_daa(cpu_env);
6857 set_cc_op(s, CC_OP_EFLAGS);
6859 case 0x2f: /* das */
6862 gen_update_cc_op(s);
6863 gen_helper_das(cpu_env);
6864 set_cc_op(s, CC_OP_EFLAGS);
6866 case 0x37: /* aaa */
6869 gen_update_cc_op(s);
6870 gen_helper_aaa(cpu_env);
6871 set_cc_op(s, CC_OP_EFLAGS);
6873 case 0x3f: /* aas */
6876 gen_update_cc_op(s);
6877 gen_helper_aas(cpu_env);
6878 set_cc_op(s, CC_OP_EFLAGS);
6880 case 0xd4: /* aam */
6883 val = cpu_ldub_code(env, s->pc++);
6885 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6887 gen_helper_aam(cpu_env, tcg_const_i32(val));
6888 set_cc_op(s, CC_OP_LOGICB);
6891 case 0xd5: /* aad */
6894 val = cpu_ldub_code(env, s->pc++);
6895 gen_helper_aad(cpu_env, tcg_const_i32(val));
6896 set_cc_op(s, CC_OP_LOGICB);
6898 /************************/
6900 case 0x90: /* nop */
6901 /* XXX: correct lock test for all insn */
6902 if (prefixes & PREFIX_LOCK) {
6905 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6907 goto do_xchg_reg_eax;
6909 if (prefixes & PREFIX_REPZ) {
6910 gen_update_cc_op(s);
6911 gen_jmp_im(pc_start - s->cs_base);
6912 gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
6913 s->is_jmp = DISAS_TB_JUMP;
6916 case 0x9b: /* fwait */
6917 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6918 (HF_MP_MASK | HF_TS_MASK)) {
6919 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6921 gen_helper_fwait(cpu_env);
6924 case 0xcc: /* int3 */
6925 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6927 case 0xcd: /* int N */
6928 val = cpu_ldub_code(env, s->pc++);
6929 if (s->vm86 && s->iopl != 3) {
6930 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6932 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6935 case 0xce: /* into */
6938 gen_update_cc_op(s);
6939 gen_jmp_im(pc_start - s->cs_base);
6940 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
6943 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6944 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6946 gen_debug(s, pc_start - s->cs_base);
6949 tb_flush(CPU(x86_env_get_cpu(env)));
6950 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6954 case 0xfa: /* cli */
6956 if (s->cpl <= s->iopl) {
6957 gen_helper_cli(cpu_env);
6959 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6963 gen_helper_cli(cpu_env);
6965 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6969 case 0xfb: /* sti */
6970 if (s->vm86 ? s->iopl == 3 : s->cpl <= s->iopl) {
6971 gen_helper_sti(cpu_env);
6972 /* interruptions are enabled only the first insn after sti */
6973 gen_jmp_im(s->pc - s->cs_base);
6974 gen_eob_inhibit_irq(s, true);
6976 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6979 case 0x62: /* bound */
6983 modrm = cpu_ldub_code(env, s->pc++);
6984 reg = (modrm >> 3) & 7;
6985 mod = (modrm >> 6) & 3;
6988 gen_op_mov_v_reg(ot, cpu_T0, reg);
6989 gen_lea_modrm(env, s, modrm);
6990 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
6992 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
6994 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
6997 case 0x1c8 ... 0x1cf: /* bswap reg */
6998 reg = (b & 7) | REX_B(s);
6999 #ifdef TARGET_X86_64
7000 if (dflag == MO_64) {
7001 gen_op_mov_v_reg(MO_64, cpu_T0, reg);
7002 tcg_gen_bswap64_i64(cpu_T0, cpu_T0);
7003 gen_op_mov_reg_v(MO_64, reg, cpu_T0);
7007 gen_op_mov_v_reg(MO_32, cpu_T0, reg);
7008 tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
7009 tcg_gen_bswap32_tl(cpu_T0, cpu_T0);
7010 gen_op_mov_reg_v(MO_32, reg, cpu_T0);
7013 case 0xd6: /* salc */
7016 gen_compute_eflags_c(s, cpu_T0);
7017 tcg_gen_neg_tl(cpu_T0, cpu_T0);
7018 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
7020 case 0xe0: /* loopnz */
7021 case 0xe1: /* loopz */
7022 case 0xe2: /* loop */
7023 case 0xe3: /* jecxz */
7025 TCGLabel *l1, *l2, *l3;
7027 tval = (int8_t)insn_get(env, s, MO_8);
7028 next_eip = s->pc - s->cs_base;
7030 if (dflag == MO_16) {
7034 l1 = gen_new_label();
7035 l2 = gen_new_label();
7036 l3 = gen_new_label();
7039 case 0: /* loopnz */
7041 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7042 gen_op_jz_ecx(s->aflag, l3);
7043 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
7046 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7047 gen_op_jnz_ecx(s->aflag, l1);
7051 gen_op_jz_ecx(s->aflag, l1);
7056 gen_jmp_im(next_eip);
7065 case 0x130: /* wrmsr */
7066 case 0x132: /* rdmsr */
7068 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7070 gen_update_cc_op(s);
7071 gen_jmp_im(pc_start - s->cs_base);
7073 gen_helper_rdmsr(cpu_env);
7075 gen_helper_wrmsr(cpu_env);
7079 case 0x131: /* rdtsc */
7080 gen_update_cc_op(s);
7081 gen_jmp_im(pc_start - s->cs_base);
7082 if (s->tb->cflags & CF_USE_ICOUNT) {
7085 gen_helper_rdtsc(cpu_env);
7086 if (s->tb->cflags & CF_USE_ICOUNT) {
7088 gen_jmp(s, s->pc - s->cs_base);
7091 case 0x133: /* rdpmc */
7092 gen_update_cc_op(s);
7093 gen_jmp_im(pc_start - s->cs_base);
7094 gen_helper_rdpmc(cpu_env);
7096 case 0x134: /* sysenter */
7097 /* For Intel SYSENTER is valid on 64-bit */
7098 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7101 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7103 gen_helper_sysenter(cpu_env);
7107 case 0x135: /* sysexit */
7108 /* For Intel SYSEXIT is valid on 64-bit */
7109 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7112 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7114 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
7118 #ifdef TARGET_X86_64
7119 case 0x105: /* syscall */
7120 /* XXX: is it usable in real mode ? */
7121 gen_update_cc_op(s);
7122 gen_jmp_im(pc_start - s->cs_base);
7123 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
7124 /* TF handling for the syscall insn is different. The TF bit is checked
7125 after the syscall insn completes. This allows #DB to not be
7126 generated after one has entered CPL0 if TF is set in FMASK. */
7127 gen_eob_worker(s, false, true);
7129 case 0x107: /* sysret */
7131 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7133 gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
7134 /* condition codes are modified only in long mode */
7136 set_cc_op(s, CC_OP_EFLAGS);
7138 /* TF handling for the sysret insn is different. The TF bit is
7139 checked after the sysret insn completes. This allows #DB to be
7140 generated "as if" the syscall insn in userspace has just
7142 gen_eob_worker(s, false, true);
7146 case 0x1a2: /* cpuid */
7147 gen_update_cc_op(s);
7148 gen_jmp_im(pc_start - s->cs_base);
7149 gen_helper_cpuid(cpu_env);
7151 case 0xf4: /* hlt */
7153 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7155 gen_update_cc_op(s);
7156 gen_jmp_im(pc_start - s->cs_base);
7157 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
7158 s->is_jmp = DISAS_TB_JUMP;
7162 modrm = cpu_ldub_code(env, s->pc++);
7163 mod = (modrm >> 6) & 3;
7164 op = (modrm >> 3) & 7;
7167 if (!s->pe || s->vm86)
7169 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7170 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
7171 offsetof(CPUX86State, ldt.selector));
7172 ot = mod == 3 ? dflag : MO_16;
7173 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7176 if (!s->pe || s->vm86)
7179 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7181 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7182 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7183 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
7184 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
7188 if (!s->pe || s->vm86)
7190 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7191 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
7192 offsetof(CPUX86State, tr.selector));
7193 ot = mod == 3 ? dflag : MO_16;
7194 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7197 if (!s->pe || s->vm86)
7200 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7202 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7203 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7204 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
7205 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
7210 if (!s->pe || s->vm86)
7212 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7213 gen_update_cc_op(s);
7215 gen_helper_verr(cpu_env, cpu_T0);
7217 gen_helper_verw(cpu_env, cpu_T0);
7219 set_cc_op(s, CC_OP_EFLAGS);
7227 modrm = cpu_ldub_code(env, s->pc++);
7229 CASE_MODRM_MEM_OP(0): /* sgdt */
7230 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7231 gen_lea_modrm(env, s, modrm);
7232 tcg_gen_ld32u_tl(cpu_T0,
7233 cpu_env, offsetof(CPUX86State, gdt.limit));
7234 gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
7235 gen_add_A0_im(s, 2);
7236 tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
7237 if (dflag == MO_16) {
7238 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7240 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7243 case 0xc8: /* monitor */
7244 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
7247 gen_update_cc_op(s);
7248 gen_jmp_im(pc_start - s->cs_base);
7249 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
7250 gen_extu(s->aflag, cpu_A0);
7251 gen_add_A0_ds_seg(s);
7252 gen_helper_monitor(cpu_env, cpu_A0);
7255 case 0xc9: /* mwait */
7256 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
7259 gen_update_cc_op(s);
7260 gen_jmp_im(pc_start - s->cs_base);
7261 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7265 case 0xca: /* clac */
7266 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
7270 gen_helper_clac(cpu_env);
7271 gen_jmp_im(s->pc - s->cs_base);
7275 case 0xcb: /* stac */
7276 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
7280 gen_helper_stac(cpu_env);
7281 gen_jmp_im(s->pc - s->cs_base);
7285 CASE_MODRM_MEM_OP(1): /* sidt */
7286 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7287 gen_lea_modrm(env, s, modrm);
7288 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit));
7289 gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
7290 gen_add_A0_im(s, 2);
7291 tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
7292 if (dflag == MO_16) {
7293 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7295 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7298 case 0xd0: /* xgetbv */
7299 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7300 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
7301 | PREFIX_REPZ | PREFIX_REPNZ))) {
7304 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7305 gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
7306 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
7309 case 0xd1: /* xsetbv */
7310 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7311 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
7312 | PREFIX_REPZ | PREFIX_REPNZ))) {
7316 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7319 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7321 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7322 gen_helper_xsetbv(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
7323 /* End TB because translation flags may change. */
7324 gen_jmp_im(s->pc - s->cs_base);
7328 case 0xd8: /* VMRUN */
7329 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7333 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7336 gen_update_cc_op(s);
7337 gen_jmp_im(pc_start - s->cs_base);
7338 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
7339 tcg_const_i32(s->pc - pc_start));
7341 s->is_jmp = DISAS_TB_JUMP;
7344 case 0xd9: /* VMMCALL */
7345 if (!(s->flags & HF_SVME_MASK)) {
7348 gen_update_cc_op(s);
7349 gen_jmp_im(pc_start - s->cs_base);
7350 gen_helper_vmmcall(cpu_env);
7353 case 0xda: /* VMLOAD */
7354 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7358 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7361 gen_update_cc_op(s);
7362 gen_jmp_im(pc_start - s->cs_base);
7363 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
7366 case 0xdb: /* VMSAVE */
7367 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7371 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7374 gen_update_cc_op(s);
7375 gen_jmp_im(pc_start - s->cs_base);
7376 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
7379 case 0xdc: /* STGI */
7380 if ((!(s->flags & HF_SVME_MASK)
7381 && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
7386 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7389 gen_update_cc_op(s);
7390 gen_jmp_im(pc_start - s->cs_base);
7391 gen_helper_stgi(cpu_env);
7394 case 0xdd: /* CLGI */
7395 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7399 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7402 gen_update_cc_op(s);
7403 gen_jmp_im(pc_start - s->cs_base);
7404 gen_helper_clgi(cpu_env);
7407 case 0xde: /* SKINIT */
7408 if ((!(s->flags & HF_SVME_MASK)
7409 && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
7413 gen_update_cc_op(s);
7414 gen_jmp_im(pc_start - s->cs_base);
7415 gen_helper_skinit(cpu_env);
7418 case 0xdf: /* INVLPGA */
7419 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7423 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7426 gen_update_cc_op(s);
7427 gen_jmp_im(pc_start - s->cs_base);
7428 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1));
7431 CASE_MODRM_MEM_OP(2): /* lgdt */
7433 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7436 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE);
7437 gen_lea_modrm(env, s, modrm);
7438 gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
7439 gen_add_A0_im(s, 2);
7440 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7441 if (dflag == MO_16) {
7442 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7444 tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
7445 tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit));
7448 CASE_MODRM_MEM_OP(3): /* lidt */
7450 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7453 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE);
7454 gen_lea_modrm(env, s, modrm);
7455 gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
7456 gen_add_A0_im(s, 2);
7457 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7458 if (dflag == MO_16) {
7459 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7461 tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
7462 tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit));
7465 CASE_MODRM_OP(4): /* smsw */
7466 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7467 tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0]));
7469 mod = (modrm >> 6) & 3;
7470 ot = (mod != 3 ? MO_16 : s->dflag);
7474 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7476 case 0xee: /* rdpkru */
7477 if (prefixes & PREFIX_LOCK) {
7480 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7481 gen_helper_rdpkru(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
7482 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
7484 case 0xef: /* wrpkru */
7485 if (prefixes & PREFIX_LOCK) {
7488 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7490 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7491 gen_helper_wrpkru(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
7493 CASE_MODRM_OP(6): /* lmsw */
7495 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7498 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7499 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7500 gen_helper_lmsw(cpu_env, cpu_T0);
7501 gen_jmp_im(s->pc - s->cs_base);
7505 CASE_MODRM_MEM_OP(7): /* invlpg */
7507 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7510 gen_update_cc_op(s);
7511 gen_jmp_im(pc_start - s->cs_base);
7512 gen_lea_modrm(env, s, modrm);
7513 gen_helper_invlpg(cpu_env, cpu_A0);
7514 gen_jmp_im(s->pc - s->cs_base);
7518 case 0xf8: /* swapgs */
7519 #ifdef TARGET_X86_64
7522 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7524 tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]);
7525 tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
7526 offsetof(CPUX86State, kernelgsbase));
7527 tcg_gen_st_tl(cpu_T0, cpu_env,
7528 offsetof(CPUX86State, kernelgsbase));
7535 case 0xf9: /* rdtscp */
7536 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
7539 gen_update_cc_op(s);
7540 gen_jmp_im(pc_start - s->cs_base);
7541 if (s->tb->cflags & CF_USE_ICOUNT) {
7544 gen_helper_rdtscp(cpu_env);
7545 if (s->tb->cflags & CF_USE_ICOUNT) {
7547 gen_jmp(s, s->pc - s->cs_base);
7556 case 0x108: /* invd */
7557 case 0x109: /* wbinvd */
7559 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7561 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7565 case 0x63: /* arpl or movslS (x86_64) */
7566 #ifdef TARGET_X86_64
7569 /* d_ot is the size of destination */
7572 modrm = cpu_ldub_code(env, s->pc++);
7573 reg = ((modrm >> 3) & 7) | rex_r;
7574 mod = (modrm >> 6) & 3;
7575 rm = (modrm & 7) | REX_B(s);
7578 gen_op_mov_v_reg(MO_32, cpu_T0, rm);
7580 if (d_ot == MO_64) {
7581 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
7583 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
7585 gen_lea_modrm(env, s, modrm);
7586 gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, cpu_A0);
7587 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
7593 TCGv t0, t1, t2, a0;
7595 if (!s->pe || s->vm86)
7597 t0 = tcg_temp_local_new();
7598 t1 = tcg_temp_local_new();
7599 t2 = tcg_temp_local_new();
7601 modrm = cpu_ldub_code(env, s->pc++);
7602 reg = (modrm >> 3) & 7;
7603 mod = (modrm >> 6) & 3;
7606 gen_lea_modrm(env, s, modrm);
7607 gen_op_ld_v(s, ot, t0, cpu_A0);
7608 a0 = tcg_temp_local_new();
7609 tcg_gen_mov_tl(a0, cpu_A0);
7611 gen_op_mov_v_reg(ot, t0, rm);
7614 gen_op_mov_v_reg(ot, t1, reg);
7615 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7616 tcg_gen_andi_tl(t1, t1, 3);
7617 tcg_gen_movi_tl(t2, 0);
7618 label1 = gen_new_label();
7619 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7620 tcg_gen_andi_tl(t0, t0, ~3);
7621 tcg_gen_or_tl(t0, t0, t1);
7622 tcg_gen_movi_tl(t2, CC_Z);
7623 gen_set_label(label1);
7625 gen_op_st_v(s, ot, t0, a0);
7628 gen_op_mov_reg_v(ot, rm, t0);
7630 gen_compute_eflags(s);
7631 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7632 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7638 case 0x102: /* lar */
7639 case 0x103: /* lsl */
7643 if (!s->pe || s->vm86)
7645 ot = dflag != MO_16 ? MO_32 : MO_16;
7646 modrm = cpu_ldub_code(env, s->pc++);
7647 reg = ((modrm >> 3) & 7) | rex_r;
7648 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7649 t0 = tcg_temp_local_new();
7650 gen_update_cc_op(s);
7652 gen_helper_lar(t0, cpu_env, cpu_T0);
7654 gen_helper_lsl(t0, cpu_env, cpu_T0);
7656 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7657 label1 = gen_new_label();
7658 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7659 gen_op_mov_reg_v(ot, reg, t0);
7660 gen_set_label(label1);
7661 set_cc_op(s, CC_OP_EFLAGS);
7666 modrm = cpu_ldub_code(env, s->pc++);
7667 mod = (modrm >> 6) & 3;
7668 op = (modrm >> 3) & 7;
7670 case 0: /* prefetchnta */
7671 case 1: /* prefetchnt0 */
7672 case 2: /* prefetchnt0 */
7673 case 3: /* prefetchnt0 */
7676 gen_nop_modrm(env, s, modrm);
7677 /* nothing more to do */
7679 default: /* nop (multi byte) */
7680 gen_nop_modrm(env, s, modrm);
7685 modrm = cpu_ldub_code(env, s->pc++);
7686 if (s->flags & HF_MPX_EN_MASK) {
7687 mod = (modrm >> 6) & 3;
7688 reg = ((modrm >> 3) & 7) | rex_r;
7689 if (prefixes & PREFIX_REPZ) {
7692 || (prefixes & PREFIX_LOCK)
7693 || s->aflag == MO_16) {
7696 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
7697 } else if (prefixes & PREFIX_REPNZ) {
7700 || (prefixes & PREFIX_LOCK)
7701 || s->aflag == MO_16) {
7704 TCGv_i64 notu = tcg_temp_new_i64();
7705 tcg_gen_not_i64(notu, cpu_bndu[reg]);
7706 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
7707 tcg_temp_free_i64(notu);
7708 } else if (prefixes & PREFIX_DATA) {
7709 /* bndmov -- from reg/mem */
7710 if (reg >= 4 || s->aflag == MO_16) {
7714 int reg2 = (modrm & 7) | REX_B(s);
7715 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
7718 if (s->flags & HF_MPX_IU_MASK) {
7719 tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
7720 tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
7723 gen_lea_modrm(env, s, modrm);
7725 tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
7726 s->mem_index, MO_LEQ);
7727 tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
7728 tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
7729 s->mem_index, MO_LEQ);
7731 tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
7732 s->mem_index, MO_LEUL);
7733 tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
7734 tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
7735 s->mem_index, MO_LEUL);
7737 /* bnd registers are now in-use */
7738 gen_set_hflag(s, HF_MPX_IU_MASK);
7740 } else if (mod != 3) {
7742 AddressParts a = gen_lea_modrm_0(env, s, modrm);
7744 || (prefixes & PREFIX_LOCK)
7745 || s->aflag == MO_16
7750 tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
7752 tcg_gen_movi_tl(cpu_A0, 0);
7754 gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
7756 tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
7758 tcg_gen_movi_tl(cpu_T0, 0);
7761 gen_helper_bndldx64(cpu_bndl[reg], cpu_env, cpu_A0, cpu_T0);
7762 tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
7763 offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
7765 gen_helper_bndldx32(cpu_bndu[reg], cpu_env, cpu_A0, cpu_T0);
7766 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
7767 tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
7769 gen_set_hflag(s, HF_MPX_IU_MASK);
7772 gen_nop_modrm(env, s, modrm);
7775 modrm = cpu_ldub_code(env, s->pc++);
7776 if (s->flags & HF_MPX_EN_MASK) {
7777 mod = (modrm >> 6) & 3;
7778 reg = ((modrm >> 3) & 7) | rex_r;
7779 if (mod != 3 && (prefixes & PREFIX_REPZ)) {
7782 || (prefixes & PREFIX_LOCK)
7783 || s->aflag == MO_16) {
7786 AddressParts a = gen_lea_modrm_0(env, s, modrm);
7788 tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
7790 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
7792 } else if (a.base == -1) {
7793 /* no base register has lower bound of 0 */
7794 tcg_gen_movi_i64(cpu_bndl[reg], 0);
7796 /* rip-relative generates #ud */
7799 tcg_gen_not_tl(cpu_A0, gen_lea_modrm_1(a));
7801 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
7803 tcg_gen_extu_tl_i64(cpu_bndu[reg], cpu_A0);
7804 /* bnd registers are now in-use */
7805 gen_set_hflag(s, HF_MPX_IU_MASK);
7807 } else if (prefixes & PREFIX_REPNZ) {
7810 || (prefixes & PREFIX_LOCK)
7811 || s->aflag == MO_16) {
7814 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
7815 } else if (prefixes & PREFIX_DATA) {
7816 /* bndmov -- to reg/mem */
7817 if (reg >= 4 || s->aflag == MO_16) {
7821 int reg2 = (modrm & 7) | REX_B(s);
7822 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
7825 if (s->flags & HF_MPX_IU_MASK) {
7826 tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
7827 tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
7830 gen_lea_modrm(env, s, modrm);
7832 tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
7833 s->mem_index, MO_LEQ);
7834 tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
7835 tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
7836 s->mem_index, MO_LEQ);
7838 tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
7839 s->mem_index, MO_LEUL);
7840 tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
7841 tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
7842 s->mem_index, MO_LEUL);
7845 } else if (mod != 3) {
7847 AddressParts a = gen_lea_modrm_0(env, s, modrm);
7849 || (prefixes & PREFIX_LOCK)
7850 || s->aflag == MO_16
7855 tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
7857 tcg_gen_movi_tl(cpu_A0, 0);
7859 gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
7861 tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
7863 tcg_gen_movi_tl(cpu_T0, 0);
7866 gen_helper_bndstx64(cpu_env, cpu_A0, cpu_T0,
7867 cpu_bndl[reg], cpu_bndu[reg]);
7869 gen_helper_bndstx32(cpu_env, cpu_A0, cpu_T0,
7870 cpu_bndl[reg], cpu_bndu[reg]);
7874 gen_nop_modrm(env, s, modrm);
7876 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
7877 modrm = cpu_ldub_code(env, s->pc++);
7878 gen_nop_modrm(env, s, modrm);
7880 case 0x120: /* mov reg, crN */
7881 case 0x122: /* mov crN, reg */
7883 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7885 modrm = cpu_ldub_code(env, s->pc++);
7886 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7887 * AMD documentation (24594.pdf) and testing of
7888 * intel 386 and 486 processors all show that the mod bits
7889 * are assumed to be 1's, regardless of actual values.
7891 rm = (modrm & 7) | REX_B(s);
7892 reg = ((modrm >> 3) & 7) | rex_r;
7897 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7898 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7907 gen_update_cc_op(s);
7908 gen_jmp_im(pc_start - s->cs_base);
7910 gen_op_mov_v_reg(ot, cpu_T0, rm);
7911 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7913 gen_jmp_im(s->pc - s->cs_base);
7916 gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
7917 gen_op_mov_reg_v(ot, rm, cpu_T0);
7925 case 0x121: /* mov reg, drN */
7926 case 0x123: /* mov drN, reg */
7928 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7930 modrm = cpu_ldub_code(env, s->pc++);
7931 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7932 * AMD documentation (24594.pdf) and testing of
7933 * intel 386 and 486 processors all show that the mod bits
7934 * are assumed to be 1's, regardless of actual values.
7936 rm = (modrm & 7) | REX_B(s);
7937 reg = ((modrm >> 3) & 7) | rex_r;
7946 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7947 gen_op_mov_v_reg(ot, cpu_T0, rm);
7948 tcg_gen_movi_i32(cpu_tmp2_i32, reg);
7949 gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T0);
7950 gen_jmp_im(s->pc - s->cs_base);
7953 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7954 tcg_gen_movi_i32(cpu_tmp2_i32, reg);
7955 gen_helper_get_dr(cpu_T0, cpu_env, cpu_tmp2_i32);
7956 gen_op_mov_reg_v(ot, rm, cpu_T0);
7960 case 0x106: /* clts */
7962 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7964 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7965 gen_helper_clts(cpu_env);
7966 /* abort block because static cpu state changed */
7967 gen_jmp_im(s->pc - s->cs_base);
7971 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7972 case 0x1c3: /* MOVNTI reg, mem */
7973 if (!(s->cpuid_features & CPUID_SSE2))
7975 ot = mo_64_32(dflag);
7976 modrm = cpu_ldub_code(env, s->pc++);
7977 mod = (modrm >> 6) & 3;
7980 reg = ((modrm >> 3) & 7) | rex_r;
7981 /* generate a generic store */
7982 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
7985 modrm = cpu_ldub_code(env, s->pc++);
7987 CASE_MODRM_MEM_OP(0): /* fxsave */
7988 if (!(s->cpuid_features & CPUID_FXSR)
7989 || (prefixes & PREFIX_LOCK)) {
7992 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7993 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7996 gen_lea_modrm(env, s, modrm);
7997 gen_helper_fxsave(cpu_env, cpu_A0);
8000 CASE_MODRM_MEM_OP(1): /* fxrstor */
8001 if (!(s->cpuid_features & CPUID_FXSR)
8002 || (prefixes & PREFIX_LOCK)) {
8005 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
8006 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8009 gen_lea_modrm(env, s, modrm);
8010 gen_helper_fxrstor(cpu_env, cpu_A0);
8013 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
8014 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
8017 if (s->flags & HF_TS_MASK) {
8018 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8021 gen_lea_modrm(env, s, modrm);
8022 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL);
8023 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
8026 CASE_MODRM_MEM_OP(3): /* stmxcsr */
8027 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
8030 if (s->flags & HF_TS_MASK) {
8031 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8034 gen_lea_modrm(env, s, modrm);
8035 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr));
8036 gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
8039 CASE_MODRM_MEM_OP(4): /* xsave */
8040 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
8041 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
8042 | PREFIX_REPZ | PREFIX_REPNZ))) {
8045 gen_lea_modrm(env, s, modrm);
8046 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
8048 gen_helper_xsave(cpu_env, cpu_A0, cpu_tmp1_i64);
8051 CASE_MODRM_MEM_OP(5): /* xrstor */
8052 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
8053 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
8054 | PREFIX_REPZ | PREFIX_REPNZ))) {
8057 gen_lea_modrm(env, s, modrm);
8058 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
8060 gen_helper_xrstor(cpu_env, cpu_A0, cpu_tmp1_i64);
8061 /* XRSTOR is how MPX is enabled, which changes how
8062 we translate. Thus we need to end the TB. */
8063 gen_update_cc_op(s);
8064 gen_jmp_im(s->pc - s->cs_base);
8068 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
8069 if (prefixes & PREFIX_LOCK) {
8072 if (prefixes & PREFIX_DATA) {
8074 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
8077 gen_nop_modrm(env, s, modrm);
8080 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
8081 || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
8082 || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
8085 gen_lea_modrm(env, s, modrm);
8086 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
8088 gen_helper_xsaveopt(cpu_env, cpu_A0, cpu_tmp1_i64);
8092 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
8093 if (prefixes & PREFIX_LOCK) {
8096 if (prefixes & PREFIX_DATA) {
8098 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
8103 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
8104 || !(s->cpuid_features & CPUID_CLFLUSH)) {
8108 gen_nop_modrm(env, s, modrm);
8111 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
8112 case 0xc8 ... 0xc8: /* rdgsbase (f3 0f ae /1) */
8113 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
8114 case 0xd8 ... 0xd8: /* wrgsbase (f3 0f ae /3) */
8116 && (prefixes & PREFIX_REPZ)
8117 && !(prefixes & PREFIX_LOCK)
8118 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
8119 TCGv base, treg, src, dst;
8121 /* Preserve hflags bits by testing CR4 at runtime. */
8122 tcg_gen_movi_i32(cpu_tmp2_i32, CR4_FSGSBASE_MASK);
8123 gen_helper_cr4_testbit(cpu_env, cpu_tmp2_i32);
8125 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
8126 treg = cpu_regs[(modrm & 7) | REX_B(s)];
8130 dst = base, src = treg;
8133 dst = treg, src = base;
8136 if (s->dflag == MO_32) {
8137 tcg_gen_ext32u_tl(dst, src);
8139 tcg_gen_mov_tl(dst, src);
8145 case 0xf8: /* sfence / pcommit */
8146 if (prefixes & PREFIX_DATA) {
8148 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
8149 || (prefixes & PREFIX_LOCK)) {
8155 case 0xf9 ... 0xff: /* sfence */
8156 if (!(s->cpuid_features & CPUID_SSE)
8157 || (prefixes & PREFIX_LOCK)) {
8160 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
8162 case 0xe8 ... 0xef: /* lfence */
8163 if (!(s->cpuid_features & CPUID_SSE)
8164 || (prefixes & PREFIX_LOCK)) {
8167 tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
8169 case 0xf0 ... 0xf7: /* mfence */
8170 if (!(s->cpuid_features & CPUID_SSE2)
8171 || (prefixes & PREFIX_LOCK)) {
8174 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
8182 case 0x10d: /* 3DNow! prefetch(w) */
8183 modrm = cpu_ldub_code(env, s->pc++);
8184 mod = (modrm >> 6) & 3;
8187 gen_nop_modrm(env, s, modrm);
8189 case 0x1aa: /* rsm */
8190 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
8191 if (!(s->flags & HF_SMM_MASK))
8193 gen_update_cc_op(s);
8194 gen_jmp_im(s->pc - s->cs_base);
8195 gen_helper_rsm(cpu_env);
8198 case 0x1b8: /* SSE4.2 popcnt */
8199 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
8202 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
8205 modrm = cpu_ldub_code(env, s->pc++);
8206 reg = ((modrm >> 3) & 7) | rex_r;
8208 if (s->prefix & PREFIX_DATA) {
8211 ot = mo_64_32(dflag);
8214 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
8215 gen_extu(ot, cpu_T0);
8216 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
8217 tcg_gen_ctpop_tl(cpu_T0, cpu_T0);
8218 gen_op_mov_reg_v(ot, reg, cpu_T0);
8220 set_cc_op(s, CC_OP_POPCNT);
8222 case 0x10e ... 0x10f:
8223 /* 3DNow! instructions, ignore prefixes */
8224 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
8225 case 0x110 ... 0x117:
8226 case 0x128 ... 0x12f:
8227 case 0x138 ... 0x13a:
8228 case 0x150 ... 0x179:
8229 case 0x17c ... 0x17f:
8231 case 0x1c4 ... 0x1c6:
8232 case 0x1d0 ... 0x1fe:
8233 gen_sse(env, s, b, pc_start, rex_r);
8240 gen_illegal_opcode(s);
8243 gen_unknown_opcode(env, s);
8247 void tcg_x86_init(void)
8249 static const char reg_names[CPU_NB_REGS][4] = {
8250 #ifdef TARGET_X86_64
8278 static const char seg_base_names[6][8] = {
8286 static const char bnd_regl_names[4][8] = {
8287 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
8289 static const char bnd_regu_names[4][8] = {
8290 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
8293 static bool initialized;
8300 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
8301 tcg_ctx.tcg_env = cpu_env;
8302 cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
8303 offsetof(CPUX86State, cc_op), "cc_op");
8304 cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst),
8306 cpu_cc_src = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src),
8308 cpu_cc_src2 = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src2),
8311 for (i = 0; i < CPU_NB_REGS; ++i) {
8312 cpu_regs[i] = tcg_global_mem_new(cpu_env,
8313 offsetof(CPUX86State, regs[i]),
8317 for (i = 0; i < 6; ++i) {
8319 = tcg_global_mem_new(cpu_env,
8320 offsetof(CPUX86State, segs[i].base),
8324 for (i = 0; i < 4; ++i) {
8326 = tcg_global_mem_new_i64(cpu_env,
8327 offsetof(CPUX86State, bnd_regs[i].lb),
8330 = tcg_global_mem_new_i64(cpu_env,
8331 offsetof(CPUX86State, bnd_regs[i].ub),
8336 /* generate intermediate code for basic block 'tb'. */
8337 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
8339 X86CPU *cpu = x86_env_get_cpu(env);
8340 CPUState *cs = CPU(cpu);
8341 DisasContext dc1, *dc = &dc1;
8342 target_ulong pc_ptr;
8344 target_ulong pc_start;
8345 target_ulong cs_base;
8349 /* generate intermediate code */
8351 cs_base = tb->cs_base;
8354 dc->pe = (flags >> HF_PE_SHIFT) & 1;
8355 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
8356 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
8357 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
8359 dc->vm86 = (flags >> VM_SHIFT) & 1;
8360 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
8361 dc->iopl = (flags >> IOPL_SHIFT) & 3;
8362 dc->tf = (flags >> TF_SHIFT) & 1;
8363 dc->singlestep_enabled = cs->singlestep_enabled;
8364 dc->cc_op = CC_OP_DYNAMIC;
8365 dc->cc_op_dirty = false;
8366 dc->cs_base = cs_base;
8368 dc->popl_esp_hack = 0;
8369 /* select memory access functions */
8371 #ifdef CONFIG_SOFTMMU
8372 dc->mem_index = cpu_mmu_index(env, false);
8374 dc->cpuid_features = env->features[FEAT_1_EDX];
8375 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
8376 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
8377 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
8378 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
8379 dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
8380 #ifdef TARGET_X86_64
8381 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
8382 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
8385 dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
8386 (flags & HF_INHIBIT_IRQ_MASK));
8387 /* Do not optimize repz jumps at all in icount mode, because
8388 rep movsS instructions are execured with different paths
8389 in !repz_opt and repz_opt modes. The first one was used
8390 always except single step mode. And this setting
8391 disables jumps optimization and control paths become
8392 equivalent in run and single step modes.
8393 Now there will be no jump optimization for repz in
8394 record/replay modes and there will always be an
8395 additional step for ecx=0 when icount is enabled.
8397 dc->repz_opt = !dc->jmp_opt && !(tb->cflags & CF_USE_ICOUNT);
8399 /* check addseg logic */
8400 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
8401 printf("ERROR addseg\n");
8404 cpu_T0 = tcg_temp_new();
8405 cpu_T1 = tcg_temp_new();
8406 cpu_A0 = tcg_temp_new();
8408 cpu_tmp0 = tcg_temp_new();
8409 cpu_tmp1_i64 = tcg_temp_new_i64();
8410 cpu_tmp2_i32 = tcg_temp_new_i32();
8411 cpu_tmp3_i32 = tcg_temp_new_i32();
8412 cpu_tmp4 = tcg_temp_new();
8413 cpu_ptr0 = tcg_temp_new_ptr();
8414 cpu_ptr1 = tcg_temp_new_ptr();
8415 cpu_cc_srcT = tcg_temp_local_new();
8417 dc->is_jmp = DISAS_NEXT;
8420 max_insns = tb->cflags & CF_COUNT_MASK;
8421 if (max_insns == 0) {
8422 max_insns = CF_COUNT_MASK;
8424 if (max_insns > TCG_MAX_INSNS) {
8425 max_insns = TCG_MAX_INSNS;
8430 tcg_gen_insn_start(pc_ptr, dc->cc_op);
8433 /* If RF is set, suppress an internally generated breakpoint. */
8434 if (unlikely(cpu_breakpoint_test(cs, pc_ptr,
8435 tb->flags & HF_RF_MASK
8436 ? BP_GDB : BP_ANY))) {
8437 gen_debug(dc, pc_ptr - dc->cs_base);
8438 /* The address covered by the breakpoint must be included in
8439 [tb->pc, tb->pc + tb->size) in order to for it to be
8440 properly cleared -- thus we increment the PC here so that
8441 the logic setting tb->size below does the right thing. */
8443 goto done_generating;
8445 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
8449 pc_ptr = disas_insn(env, dc, pc_ptr);
8450 /* stop translation if indicated */
8453 /* if single step mode, we generate only one instruction and
8454 generate an exception */
8455 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8456 the flag and abort the translation to give the irqs a
8457 change to be happen */
8458 if (dc->tf || dc->singlestep_enabled ||
8459 (flags & HF_INHIBIT_IRQ_MASK)) {
8460 gen_jmp_im(pc_ptr - dc->cs_base);
8464 /* Do not cross the boundary of the pages in icount mode,
8465 it can cause an exception. Do it only when boundary is
8466 crossed by the first instruction in the block.
8467 If current instruction already crossed the bound - it's ok,
8468 because an exception hasn't stopped this code.
8470 if ((tb->cflags & CF_USE_ICOUNT)
8471 && ((pc_ptr & TARGET_PAGE_MASK)
8472 != ((pc_ptr + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK)
8473 || (pc_ptr & ~TARGET_PAGE_MASK) == 0)) {
8474 gen_jmp_im(pc_ptr - dc->cs_base);
8478 /* if too long translation, stop generation too */
8479 if (tcg_op_buf_full() ||
8480 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8481 num_insns >= max_insns) {
8482 gen_jmp_im(pc_ptr - dc->cs_base);
8487 gen_jmp_im(pc_ptr - dc->cs_base);
8492 if (tb->cflags & CF_LAST_IO)
8495 gen_tb_end(tb, num_insns);
8498 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
8499 && qemu_log_in_addr_range(pc_start)) {
8502 qemu_log("----------------\n");
8503 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8504 #ifdef TARGET_X86_64
8509 disas_flags = !dc->code32;
8510 log_target_disas(cs, pc_start, pc_ptr - pc_start, disas_flags);
8516 tb->size = pc_ptr - pc_start;
8517 tb->icount = num_insns;
8520 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
8523 int cc_op = data[1];
8524 env->eip = data[0] - tb->cs_base;
8525 if (cc_op != CC_OP_DYNAMIC) {