]> Git Repo - qemu.git/blame - target-i386/translate.c
exec: extract exec/tb-context.h
[qemu.git] / target-i386 / translate.c
CommitLineData
2c0262af
FB
1/*
2 * i386 translation
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 18 */
b6a0aa05 19#include "qemu/osdep.h"
2c0262af 20
bec93d72 21#include "qemu/host-utils.h"
2c0262af 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
57fec1fe 24#include "tcg-op.h"
f08b6170 25#include "exec/cpu_ldst.h"
2c0262af 26
2ef6175a
RH
27#include "exec/helper-proto.h"
28#include "exec/helper-gen.h"
a7812ae4 29
a7e30d84 30#include "trace-tcg.h"
508127e2 31#include "exec/log.h"
a7e30d84
LV
32
33
2c0262af
FB
34#define PREFIX_REPZ 0x01
35#define PREFIX_REPNZ 0x02
36#define PREFIX_LOCK 0x04
37#define PREFIX_DATA 0x08
38#define PREFIX_ADR 0x10
701ed211 39#define PREFIX_VEX 0x20
2c0262af 40
14ce26e7 41#ifdef TARGET_X86_64
14ce26e7
FB
42#define CODE64(s) ((s)->code64)
43#define REX_X(s) ((s)->rex_x)
44#define REX_B(s) ((s)->rex_b)
14ce26e7 45#else
14ce26e7
FB
46#define CODE64(s) 0
47#define REX_X(s) 0
48#define REX_B(s) 0
49#endif
50
bec93d72
RH
51#ifdef TARGET_X86_64
52# define ctztl ctz64
53# define clztl clz64
54#else
55# define ctztl ctz32
56# define clztl clz32
57#endif
58
1906b2af 59/* For a switch indexed by MODRM, match all memory operands for a given OP. */
880f8486 60#define CASE_MODRM_MEM_OP(OP) \
1906b2af
RH
61 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
62 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
63 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
64
880f8486
PB
65#define CASE_MODRM_OP(OP) \
66 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
67 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
68 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
69 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
70
57fec1fe
FB
71//#define MACRO_TEST 1
72
57fec1fe 73/* global register indexes */
1bcea73e 74static TCGv_env cpu_env;
a3251186 75static TCGv cpu_A0;
988c3eb0 76static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
a7812ae4 77static TCGv_i32 cpu_cc_op;
cc739bb0 78static TCGv cpu_regs[CPU_NB_REGS];
3558f805 79static TCGv cpu_seg_base[6];
149b427b
RH
80static TCGv_i64 cpu_bndl[4];
81static TCGv_i64 cpu_bndu[4];
1e4840bf 82/* local temps */
1d1cc4d0 83static TCGv cpu_T0, cpu_T1;
57fec1fe 84/* local register indexes (only used inside old micro ops) */
a7812ae4
PB
85static TCGv cpu_tmp0, cpu_tmp4;
86static TCGv_ptr cpu_ptr0, cpu_ptr1;
87static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
88static TCGv_i64 cpu_tmp1_i64;
57fec1fe 89
022c62cb 90#include "exec/gen-icount.h"
2e70f6ef 91
57fec1fe
FB
92#ifdef TARGET_X86_64
93static int x86_64_hregs;
ae063a68
FB
94#endif
95
2c0262af
FB
96typedef struct DisasContext {
97 /* current insn context */
98 int override; /* -1 if no override */
99 int prefix;
1d71ddb1 100 TCGMemOp aflag;
ab4e4aec 101 TCGMemOp dflag;
b9f9c5b4 102 target_ulong pc_start;
14ce26e7 103 target_ulong pc; /* pc = eip + cs_base */
2c0262af
FB
104 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
105 static state change (stop translation) */
106 /* current block context */
14ce26e7 107 target_ulong cs_base; /* base of CS segment */
2c0262af
FB
108 int pe; /* protected mode */
109 int code32; /* 32 bit code segment */
14ce26e7
FB
110#ifdef TARGET_X86_64
111 int lma; /* long mode active */
112 int code64; /* 64 bit code segment */
113 int rex_x, rex_b;
114#endif
701ed211
RH
115 int vex_l; /* vex vector length */
116 int vex_v; /* vex vvvv register, without 1's compliment. */
2c0262af 117 int ss32; /* 32 bit stack segment */
fee71888 118 CCOp cc_op; /* current CC operation */
e207582f 119 bool cc_op_dirty;
2c0262af
FB
120 int addseg; /* non zero if either DS/ES/SS have a non zero base */
121 int f_st; /* currently unused */
122 int vm86; /* vm86 mode */
123 int cpl;
124 int iopl;
125 int tf; /* TF cpu flag */
34865134 126 int singlestep_enabled; /* "hardware" single step enabled */
2c0262af 127 int jmp_opt; /* use direct block chaining for direct jumps */
c4d4525c 128 int repz_opt; /* optimize jumps within repz instructions */
2c0262af 129 int mem_index; /* select memory access functions */
c068688b 130 uint64_t flags; /* all execution flags */
2c0262af
FB
131 struct TranslationBlock *tb;
132 int popl_esp_hack; /* for correct popl with esp base handling */
14ce26e7
FB
133 int rip_offset; /* only used in x86_64, but left for simplicity */
134 int cpuid_features;
3d7374c5 135 int cpuid_ext_features;
e771edab 136 int cpuid_ext2_features;
12e26b75 137 int cpuid_ext3_features;
a9321a4d 138 int cpuid_7_0_ebx_features;
c9cfe8f9 139 int cpuid_xsave_features;
2c0262af
FB
140} DisasContext;
141
142static void gen_eob(DisasContext *s);
14ce26e7
FB
143static void gen_jmp(DisasContext *s, target_ulong eip);
144static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
d67dc9e6 145static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d);
2c0262af
FB
146
147/* i386 arith/logic operations */
148enum {
5fafdf24
TS
149 OP_ADDL,
150 OP_ORL,
151 OP_ADCL,
2c0262af 152 OP_SBBL,
5fafdf24
TS
153 OP_ANDL,
154 OP_SUBL,
155 OP_XORL,
2c0262af
FB
156 OP_CMPL,
157};
158
159/* i386 shift ops */
160enum {
5fafdf24
TS
161 OP_ROL,
162 OP_ROR,
163 OP_RCL,
164 OP_RCR,
165 OP_SHL,
166 OP_SHR,
2c0262af
FB
167 OP_SHL1, /* undocumented */
168 OP_SAR = 7,
169};
170
8e1c85e3
FB
171enum {
172 JCC_O,
173 JCC_B,
174 JCC_Z,
175 JCC_BE,
176 JCC_S,
177 JCC_P,
178 JCC_L,
179 JCC_LE,
180};
181
2c0262af
FB
182enum {
183 /* I386 int registers */
184 OR_EAX, /* MUST be even numbered */
185 OR_ECX,
186 OR_EDX,
187 OR_EBX,
188 OR_ESP,
189 OR_EBP,
190 OR_ESI,
191 OR_EDI,
14ce26e7
FB
192
193 OR_TMP0 = 16, /* temporary operand register */
2c0262af
FB
194 OR_TMP1,
195 OR_A0, /* temporary register used when doing address evaluation */
2c0262af
FB
196};
197
b666265b 198enum {
a3251186
RH
199 USES_CC_DST = 1,
200 USES_CC_SRC = 2,
988c3eb0
RH
201 USES_CC_SRC2 = 4,
202 USES_CC_SRCT = 8,
b666265b
RH
203};
204
205/* Bit set if the global variable is live after setting CC_OP to X. */
206static const uint8_t cc_op_live[CC_OP_NB] = {
988c3eb0 207 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
208 [CC_OP_EFLAGS] = USES_CC_SRC,
209 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
210 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
988c3eb0 211 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
a3251186 212 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
988c3eb0 213 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
214 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
215 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
216 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
217 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
218 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
bc4b43dc 219 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
cd7f97ca
RH
220 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
221 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
222 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
436ff2d2 223 [CC_OP_CLR] = 0,
b666265b
RH
224};
225
e207582f 226static void set_cc_op(DisasContext *s, CCOp op)
3ca51d07 227{
b666265b
RH
228 int dead;
229
230 if (s->cc_op == op) {
231 return;
232 }
233
234 /* Discard CC computation that will no longer be used. */
235 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
236 if (dead & USES_CC_DST) {
237 tcg_gen_discard_tl(cpu_cc_dst);
e207582f 238 }
b666265b
RH
239 if (dead & USES_CC_SRC) {
240 tcg_gen_discard_tl(cpu_cc_src);
241 }
988c3eb0
RH
242 if (dead & USES_CC_SRC2) {
243 tcg_gen_discard_tl(cpu_cc_src2);
244 }
a3251186
RH
245 if (dead & USES_CC_SRCT) {
246 tcg_gen_discard_tl(cpu_cc_srcT);
247 }
b666265b 248
e2f515cf
RH
249 if (op == CC_OP_DYNAMIC) {
250 /* The DYNAMIC setting is translator only, and should never be
251 stored. Thus we always consider it clean. */
252 s->cc_op_dirty = false;
253 } else {
254 /* Discard any computed CC_OP value (see shifts). */
255 if (s->cc_op == CC_OP_DYNAMIC) {
256 tcg_gen_discard_i32(cpu_cc_op);
257 }
258 s->cc_op_dirty = true;
259 }
b666265b 260 s->cc_op = op;
e207582f
RH
261}
262
e207582f
RH
263static void gen_update_cc_op(DisasContext *s)
264{
265 if (s->cc_op_dirty) {
773cdfcc 266 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
e207582f
RH
267 s->cc_op_dirty = false;
268 }
3ca51d07
RH
269}
270
14ce26e7
FB
271#ifdef TARGET_X86_64
272
273#define NB_OP_SIZES 4
274
14ce26e7
FB
275#else /* !TARGET_X86_64 */
276
277#define NB_OP_SIZES 3
278
14ce26e7
FB
279#endif /* !TARGET_X86_64 */
280
e2542fe2 281#if defined(HOST_WORDS_BIGENDIAN)
57fec1fe
FB
282#define REG_B_OFFSET (sizeof(target_ulong) - 1)
283#define REG_H_OFFSET (sizeof(target_ulong) - 2)
284#define REG_W_OFFSET (sizeof(target_ulong) - 2)
285#define REG_L_OFFSET (sizeof(target_ulong) - 4)
286#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
14ce26e7 287#else
57fec1fe
FB
288#define REG_B_OFFSET 0
289#define REG_H_OFFSET 1
290#define REG_W_OFFSET 0
291#define REG_L_OFFSET 0
292#define REG_LH_OFFSET 4
14ce26e7 293#endif
57fec1fe 294
96d7073f
PM
295/* In instruction encodings for byte register accesses the
296 * register number usually indicates "low 8 bits of register N";
297 * however there are some special cases where N 4..7 indicates
298 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
299 * true for this special case, false otherwise.
300 */
301static inline bool byte_reg_is_xH(int reg)
302{
303 if (reg < 4) {
304 return false;
305 }
306#ifdef TARGET_X86_64
307 if (reg >= 8 || x86_64_hregs) {
308 return false;
309 }
310#endif
311 return true;
312}
313
ab4e4aec
RH
314/* Select the size of a push/pop operation. */
315static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
316{
317 if (CODE64(s)) {
318 return ot == MO_16 ? MO_16 : MO_64;
319 } else {
320 return ot;
321 }
322}
323
64ae256c
RH
324/* Select the size of the stack pointer. */
325static inline TCGMemOp mo_stacksize(DisasContext *s)
326{
327 return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
328}
329
ab4e4aec
RH
330/* Select only size 64 else 32. Used for SSE operand sizes. */
331static inline TCGMemOp mo_64_32(TCGMemOp ot)
332{
333#ifdef TARGET_X86_64
334 return ot == MO_64 ? MO_64 : MO_32;
335#else
336 return MO_32;
337#endif
338}
339
340/* Select size 8 if lsb of B is clear, else OT. Used for decoding
341 byte vs word opcodes. */
342static inline TCGMemOp mo_b_d(int b, TCGMemOp ot)
343{
344 return b & 1 ? ot : MO_8;
345}
346
347/* Select size 8 if lsb of B is clear, else OT capped at 32.
348 Used for decoding operand size of port opcodes. */
349static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot)
350{
351 return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
352}
353
d67dc9e6 354static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGv t0)
57fec1fe
FB
355{
356 switch(ot) {
4ba9938c 357 case MO_8:
96d7073f 358 if (!byte_reg_is_xH(reg)) {
c832e3de 359 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
57fec1fe 360 } else {
c832e3de 361 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
57fec1fe
FB
362 }
363 break;
4ba9938c 364 case MO_16:
c832e3de 365 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
57fec1fe 366 break;
4ba9938c 367 case MO_32:
cc739bb0
LD
368 /* For x86_64, this sets the higher half of register to zero.
369 For i386, this is equivalent to a mov. */
370 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
57fec1fe 371 break;
cc739bb0 372#ifdef TARGET_X86_64
4ba9938c 373 case MO_64:
cc739bb0 374 tcg_gen_mov_tl(cpu_regs[reg], t0);
57fec1fe 375 break;
14ce26e7 376#endif
d67dc9e6
RH
377 default:
378 tcg_abort();
57fec1fe
FB
379 }
380}
2c0262af 381
d67dc9e6 382static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg)
57fec1fe 383{
4ba9938c 384 if (ot == MO_8 && byte_reg_is_xH(reg)) {
96d7073f
PM
385 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
386 tcg_gen_ext8u_tl(t0, t0);
387 } else {
cc739bb0 388 tcg_gen_mov_tl(t0, cpu_regs[reg]);
57fec1fe
FB
389 }
390}
391
57fec1fe
FB
392static void gen_add_A0_im(DisasContext *s, int val)
393{
4e85057b
RH
394 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
395 if (!CODE64(s)) {
396 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
397 }
57fec1fe 398}
2c0262af 399
74bdfbda 400static inline void gen_op_jmp_v(TCGv dest)
57fec1fe 401{
74bdfbda 402 tcg_gen_st_tl(dest, cpu_env, offsetof(CPUX86State, eip));
57fec1fe
FB
403}
404
d3f4bbe3 405static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val)
57fec1fe 406{
d3f4bbe3
RH
407 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
408 gen_op_mov_reg_v(size, reg, cpu_tmp0);
57fec1fe
FB
409}
410
d3f4bbe3 411static inline void gen_op_add_reg_T0(TCGMemOp size, int reg)
57fec1fe 412{
1d1cc4d0 413 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T0);
d3f4bbe3 414 gen_op_mov_reg_v(size, reg, cpu_tmp0);
6e0d8677 415}
57fec1fe 416
323d1876 417static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
57fec1fe 418{
3c5f4116 419 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
57fec1fe 420}
2c0262af 421
323d1876 422static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
57fec1fe 423{
3523e4bd 424 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
57fec1fe 425}
4f31916f 426
d4faa3e0
RH
427static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
428{
429 if (d == OR_TMP0) {
1d1cc4d0 430 gen_op_st_v(s, idx, cpu_T0, cpu_A0);
d4faa3e0 431 } else {
1d1cc4d0 432 gen_op_mov_reg_v(idx, d, cpu_T0);
d4faa3e0
RH
433 }
434}
435
14ce26e7
FB
436static inline void gen_jmp_im(target_ulong pc)
437{
57fec1fe 438 tcg_gen_movi_tl(cpu_tmp0, pc);
74bdfbda 439 gen_op_jmp_v(cpu_tmp0);
14ce26e7
FB
440}
441
ca2f29f5
RH
442/* Compute SEG:REG into A0. SEG is selected from the override segment
443 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
444 indicate no override. */
77ebcad0
RH
445static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0,
446 int def_seg, int ovr_seg)
2c0262af 447{
ca2f29f5 448 switch (aflag) {
14ce26e7 449#ifdef TARGET_X86_64
1d71ddb1 450 case MO_64:
ca2f29f5
RH
451 if (ovr_seg < 0) {
452 tcg_gen_mov_tl(cpu_A0, a0);
453 return;
14ce26e7 454 }
1d71ddb1 455 break;
14ce26e7 456#endif
1d71ddb1 457 case MO_32:
2c0262af 458 /* 32 bit address */
ca2f29f5
RH
459 if (ovr_seg < 0) {
460 if (s->addseg) {
461 ovr_seg = def_seg;
462 } else {
463 tcg_gen_ext32u_tl(cpu_A0, a0);
464 return;
465 }
2c0262af 466 }
1d71ddb1
RH
467 break;
468 case MO_16:
ca2f29f5 469 /* 16 bit address */
ca2f29f5 470 tcg_gen_ext16u_tl(cpu_A0, a0);
ca2f29f5 471 a0 = cpu_A0;
e2e02a82
PB
472 if (ovr_seg < 0) {
473 if (s->addseg) {
474 ovr_seg = def_seg;
475 } else {
476 return;
477 }
478 }
1d71ddb1
RH
479 break;
480 default:
481 tcg_abort();
2c0262af 482 }
2c0262af 483
ca2f29f5 484 if (ovr_seg >= 0) {
3558f805 485 TCGv seg = cpu_seg_base[ovr_seg];
ca2f29f5
RH
486
487 if (aflag == MO_64) {
488 tcg_gen_add_tl(cpu_A0, a0, seg);
489 } else if (CODE64(s)) {
490 tcg_gen_ext32u_tl(cpu_A0, a0);
491 tcg_gen_add_tl(cpu_A0, cpu_A0, seg);
2c0262af 492 } else {
ca2f29f5
RH
493 tcg_gen_add_tl(cpu_A0, a0, seg);
494 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
2c0262af 495 }
2c0262af
FB
496 }
497}
498
ca2f29f5
RH
499static inline void gen_string_movl_A0_ESI(DisasContext *s)
500{
77ebcad0 501 gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
ca2f29f5
RH
502}
503
504static inline void gen_string_movl_A0_EDI(DisasContext *s)
505{
77ebcad0 506 gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
ca2f29f5
RH
507}
508
d67dc9e6 509static inline void gen_op_movl_T0_Dshift(TCGMemOp ot)
6e0d8677 510{
1d1cc4d0
RH
511 tcg_gen_ld32s_tl(cpu_T0, cpu_env, offsetof(CPUX86State, df));
512 tcg_gen_shli_tl(cpu_T0, cpu_T0, ot);
2c0262af
FB
513};
514
d67dc9e6 515static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
6e0d8677 516{
d824df34 517 switch (size) {
4ba9938c 518 case MO_8:
d824df34
PB
519 if (sign) {
520 tcg_gen_ext8s_tl(dst, src);
521 } else {
522 tcg_gen_ext8u_tl(dst, src);
523 }
524 return dst;
4ba9938c 525 case MO_16:
d824df34
PB
526 if (sign) {
527 tcg_gen_ext16s_tl(dst, src);
528 } else {
529 tcg_gen_ext16u_tl(dst, src);
530 }
531 return dst;
532#ifdef TARGET_X86_64
4ba9938c 533 case MO_32:
d824df34
PB
534 if (sign) {
535 tcg_gen_ext32s_tl(dst, src);
536 } else {
537 tcg_gen_ext32u_tl(dst, src);
538 }
539 return dst;
540#endif
6e0d8677 541 default:
d824df34 542 return src;
6e0d8677
FB
543 }
544}
3b46e624 545
d67dc9e6 546static void gen_extu(TCGMemOp ot, TCGv reg)
d824df34
PB
547{
548 gen_ext_tl(reg, reg, ot, false);
549}
550
d67dc9e6 551static void gen_exts(TCGMemOp ot, TCGv reg)
6e0d8677 552{
d824df34 553 gen_ext_tl(reg, reg, ot, true);
6e0d8677 554}
2c0262af 555
42a268c2 556static inline void gen_op_jnz_ecx(TCGMemOp size, TCGLabel *label1)
6e0d8677 557{
cc739bb0 558 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
c92aa1ad 559 gen_extu(size, cpu_tmp0);
cb63669a 560 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
6e0d8677
FB
561}
562
42a268c2 563static inline void gen_op_jz_ecx(TCGMemOp size, TCGLabel *label1)
6e0d8677 564{
cc739bb0 565 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
c92aa1ad 566 gen_extu(size, cpu_tmp0);
cb63669a 567 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
6e0d8677 568}
2c0262af 569
d67dc9e6 570static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n)
a7812ae4
PB
571{
572 switch (ot) {
4ba9938c 573 case MO_8:
3f7d8464 574 gen_helper_inb(v, cpu_env, n);
93ab25d7 575 break;
4ba9938c 576 case MO_16:
3f7d8464 577 gen_helper_inw(v, cpu_env, n);
93ab25d7 578 break;
4ba9938c 579 case MO_32:
3f7d8464 580 gen_helper_inl(v, cpu_env, n);
93ab25d7 581 break;
d67dc9e6
RH
582 default:
583 tcg_abort();
a7812ae4 584 }
a7812ae4 585}
2c0262af 586
d67dc9e6 587static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
a7812ae4
PB
588{
589 switch (ot) {
4ba9938c 590 case MO_8:
3f7d8464 591 gen_helper_outb(cpu_env, v, n);
93ab25d7 592 break;
4ba9938c 593 case MO_16:
3f7d8464 594 gen_helper_outw(cpu_env, v, n);
93ab25d7 595 break;
4ba9938c 596 case MO_32:
3f7d8464 597 gen_helper_outl(cpu_env, v, n);
93ab25d7 598 break;
d67dc9e6
RH
599 default:
600 tcg_abort();
a7812ae4 601 }
a7812ae4 602}
f115e911 603
d67dc9e6 604static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
b8b6a50b 605 uint32_t svm_flags)
f115e911 606{
b8b6a50b
FB
607 target_ulong next_eip;
608
f115e911 609 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
1d1cc4d0 610 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
a7812ae4 611 switch (ot) {
4ba9938c 612 case MO_8:
4a7443be
BS
613 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
614 break;
4ba9938c 615 case MO_16:
4a7443be
BS
616 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
617 break;
4ba9938c 618 case MO_32:
4a7443be
BS
619 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
620 break;
d67dc9e6
RH
621 default:
622 tcg_abort();
a7812ae4 623 }
b8b6a50b 624 }
872929aa 625 if(s->flags & HF_SVMI_MASK) {
100ec099
PD
626 gen_update_cc_op(s);
627 gen_jmp_im(cur_eip);
b8b6a50b
FB
628 svm_flags |= (1 << (4 + ot));
629 next_eip = s->pc - s->cs_base;
1d1cc4d0 630 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
052e80d5
BS
631 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
632 tcg_const_i32(svm_flags),
a7812ae4 633 tcg_const_i32(next_eip - cur_eip));
f115e911
FB
634 }
635}
636
d67dc9e6 637static inline void gen_movs(DisasContext *s, TCGMemOp ot)
2c0262af
FB
638{
639 gen_string_movl_A0_ESI(s);
1d1cc4d0 640 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
2c0262af 641 gen_string_movl_A0_EDI(s);
1d1cc4d0 642 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
6e0d8677 643 gen_op_movl_T0_Dshift(ot);
1d71ddb1
RH
644 gen_op_add_reg_T0(s->aflag, R_ESI);
645 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
646}
647
b6abf97d
FB
648static void gen_op_update1_cc(void)
649{
1d1cc4d0 650 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
b6abf97d
FB
651}
652
653static void gen_op_update2_cc(void)
654{
1d1cc4d0
RH
655 tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
656 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
b6abf97d
FB
657}
658
988c3eb0
RH
659static void gen_op_update3_cc(TCGv reg)
660{
661 tcg_gen_mov_tl(cpu_cc_src2, reg);
1d1cc4d0
RH
662 tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
663 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
988c3eb0
RH
664}
665
b6abf97d
FB
666static inline void gen_op_testl_T0_T1_cc(void)
667{
1d1cc4d0 668 tcg_gen_and_tl(cpu_cc_dst, cpu_T0, cpu_T1);
b6abf97d
FB
669}
670
671static void gen_op_update_neg_cc(void)
672{
1d1cc4d0
RH
673 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
674 tcg_gen_neg_tl(cpu_cc_src, cpu_T0);
a3251186 675 tcg_gen_movi_tl(cpu_cc_srcT, 0);
b6abf97d
FB
676}
677
d229edce
RH
678/* compute all eflags to cc_src */
679static void gen_compute_eflags(DisasContext *s)
8e1c85e3 680{
988c3eb0 681 TCGv zero, dst, src1, src2;
db9f2597
RH
682 int live, dead;
683
d229edce
RH
684 if (s->cc_op == CC_OP_EFLAGS) {
685 return;
686 }
436ff2d2 687 if (s->cc_op == CC_OP_CLR) {
d2fe51bd 688 tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
436ff2d2
RH
689 set_cc_op(s, CC_OP_EFLAGS);
690 return;
691 }
db9f2597
RH
692
693 TCGV_UNUSED(zero);
694 dst = cpu_cc_dst;
695 src1 = cpu_cc_src;
988c3eb0 696 src2 = cpu_cc_src2;
db9f2597
RH
697
698 /* Take care to not read values that are not live. */
699 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
988c3eb0 700 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
db9f2597
RH
701 if (dead) {
702 zero = tcg_const_tl(0);
703 if (dead & USES_CC_DST) {
704 dst = zero;
705 }
706 if (dead & USES_CC_SRC) {
707 src1 = zero;
708 }
988c3eb0
RH
709 if (dead & USES_CC_SRC2) {
710 src2 = zero;
711 }
db9f2597
RH
712 }
713
773cdfcc 714 gen_update_cc_op(s);
988c3eb0 715 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
d229edce 716 set_cc_op(s, CC_OP_EFLAGS);
db9f2597
RH
717
718 if (dead) {
719 tcg_temp_free(zero);
720 }
8e1c85e3
FB
721}
722
bec93d72
RH
723typedef struct CCPrepare {
724 TCGCond cond;
725 TCGv reg;
726 TCGv reg2;
727 target_ulong imm;
728 target_ulong mask;
729 bool use_reg2;
730 bool no_setcond;
731} CCPrepare;
732
06847f1f 733/* compute eflags.C to reg */
bec93d72 734static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
06847f1f
RH
735{
736 TCGv t0, t1;
bec93d72 737 int size, shift;
06847f1f
RH
738
739 switch (s->cc_op) {
740 case CC_OP_SUBB ... CC_OP_SUBQ:
a3251186 741 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
06847f1f
RH
742 size = s->cc_op - CC_OP_SUBB;
743 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
744 /* If no temporary was used, be careful not to alias t1 and t0. */
745 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
a3251186 746 tcg_gen_mov_tl(t0, cpu_cc_srcT);
06847f1f
RH
747 gen_extu(size, t0);
748 goto add_sub;
749
750 case CC_OP_ADDB ... CC_OP_ADDQ:
751 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
752 size = s->cc_op - CC_OP_ADDB;
753 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
754 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
755 add_sub:
bec93d72
RH
756 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
757 .reg2 = t1, .mask = -1, .use_reg2 = true };
06847f1f 758
06847f1f 759 case CC_OP_LOGICB ... CC_OP_LOGICQ:
436ff2d2 760 case CC_OP_CLR:
bec93d72 761 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
06847f1f
RH
762
763 case CC_OP_INCB ... CC_OP_INCQ:
764 case CC_OP_DECB ... CC_OP_DECQ:
bec93d72
RH
765 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
766 .mask = -1, .no_setcond = true };
06847f1f
RH
767
768 case CC_OP_SHLB ... CC_OP_SHLQ:
769 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
770 size = s->cc_op - CC_OP_SHLB;
bec93d72
RH
771 shift = (8 << size) - 1;
772 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
773 .mask = (target_ulong)1 << shift };
06847f1f
RH
774
775 case CC_OP_MULB ... CC_OP_MULQ:
bec93d72
RH
776 return (CCPrepare) { .cond = TCG_COND_NE,
777 .reg = cpu_cc_src, .mask = -1 };
06847f1f 778
bc4b43dc
RH
779 case CC_OP_BMILGB ... CC_OP_BMILGQ:
780 size = s->cc_op - CC_OP_BMILGB;
781 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
782 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
783
cd7f97ca
RH
784 case CC_OP_ADCX:
785 case CC_OP_ADCOX:
786 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
787 .mask = -1, .no_setcond = true };
788
06847f1f
RH
789 case CC_OP_EFLAGS:
790 case CC_OP_SARB ... CC_OP_SARQ:
791 /* CC_SRC & 1 */
bec93d72
RH
792 return (CCPrepare) { .cond = TCG_COND_NE,
793 .reg = cpu_cc_src, .mask = CC_C };
06847f1f
RH
794
795 default:
796 /* The need to compute only C from CC_OP_DYNAMIC is important
797 in efficiently implementing e.g. INC at the start of a TB. */
798 gen_update_cc_op(s);
988c3eb0
RH
799 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
800 cpu_cc_src2, cpu_cc_op);
bec93d72
RH
801 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
802 .mask = -1, .no_setcond = true };
06847f1f
RH
803 }
804}
805
1608ecca 806/* compute eflags.P to reg */
bec93d72 807static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
1608ecca 808{
d229edce 809 gen_compute_eflags(s);
bec93d72
RH
810 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
811 .mask = CC_P };
1608ecca
PB
812}
813
814/* compute eflags.S to reg */
bec93d72 815static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1608ecca 816{
086c4077
RH
817 switch (s->cc_op) {
818 case CC_OP_DYNAMIC:
819 gen_compute_eflags(s);
820 /* FALLTHRU */
821 case CC_OP_EFLAGS:
cd7f97ca
RH
822 case CC_OP_ADCX:
823 case CC_OP_ADOX:
824 case CC_OP_ADCOX:
bec93d72
RH
825 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
826 .mask = CC_S };
436ff2d2
RH
827 case CC_OP_CLR:
828 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
086c4077
RH
829 default:
830 {
d67dc9e6 831 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
086c4077 832 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
bec93d72 833 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
086c4077 834 }
086c4077 835 }
1608ecca
PB
836}
837
838/* compute eflags.O to reg */
bec93d72 839static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1608ecca 840{
cd7f97ca
RH
841 switch (s->cc_op) {
842 case CC_OP_ADOX:
843 case CC_OP_ADCOX:
844 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
845 .mask = -1, .no_setcond = true };
436ff2d2
RH
846 case CC_OP_CLR:
847 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
cd7f97ca
RH
848 default:
849 gen_compute_eflags(s);
850 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
851 .mask = CC_O };
852 }
1608ecca
PB
853}
854
855/* compute eflags.Z to reg */
bec93d72 856static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1608ecca 857{
086c4077
RH
858 switch (s->cc_op) {
859 case CC_OP_DYNAMIC:
860 gen_compute_eflags(s);
861 /* FALLTHRU */
862 case CC_OP_EFLAGS:
cd7f97ca
RH
863 case CC_OP_ADCX:
864 case CC_OP_ADOX:
865 case CC_OP_ADCOX:
bec93d72
RH
866 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
867 .mask = CC_Z };
436ff2d2
RH
868 case CC_OP_CLR:
869 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
086c4077
RH
870 default:
871 {
d67dc9e6 872 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
086c4077 873 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
bec93d72 874 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
086c4077 875 }
bec93d72
RH
876 }
877}
878
c365395e
PB
879/* perform a conditional store into register 'reg' according to jump opcode
880 value 'b'. In the fast case, T0 is guaranted not to be used. */
276e6b5f 881static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
8e1c85e3 882{
d67dc9e6
RH
883 int inv, jcc_op, cond;
884 TCGMemOp size;
276e6b5f 885 CCPrepare cc;
c365395e
PB
886 TCGv t0;
887
888 inv = b & 1;
8e1c85e3 889 jcc_op = (b >> 1) & 7;
c365395e
PB
890
891 switch (s->cc_op) {
69d1aa31
RH
892 case CC_OP_SUBB ... CC_OP_SUBQ:
893 /* We optimize relational operators for the cmp/jcc case. */
c365395e
PB
894 size = s->cc_op - CC_OP_SUBB;
895 switch (jcc_op) {
896 case JCC_BE:
a3251186 897 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
c365395e
PB
898 gen_extu(size, cpu_tmp4);
899 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
276e6b5f
RH
900 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
901 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 902 break;
8e1c85e3 903
c365395e 904 case JCC_L:
276e6b5f 905 cond = TCG_COND_LT;
c365395e
PB
906 goto fast_jcc_l;
907 case JCC_LE:
276e6b5f 908 cond = TCG_COND_LE;
c365395e 909 fast_jcc_l:
a3251186 910 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
c365395e
PB
911 gen_exts(size, cpu_tmp4);
912 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
276e6b5f
RH
913 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
914 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 915 break;
8e1c85e3 916
c365395e 917 default:
8e1c85e3 918 goto slow_jcc;
c365395e 919 }
8e1c85e3 920 break;
c365395e 921
8e1c85e3
FB
922 default:
923 slow_jcc:
69d1aa31
RH
924 /* This actually generates good code for JC, JZ and JS. */
925 switch (jcc_op) {
926 case JCC_O:
927 cc = gen_prepare_eflags_o(s, reg);
928 break;
929 case JCC_B:
930 cc = gen_prepare_eflags_c(s, reg);
931 break;
932 case JCC_Z:
933 cc = gen_prepare_eflags_z(s, reg);
934 break;
935 case JCC_BE:
936 gen_compute_eflags(s);
937 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
938 .mask = CC_Z | CC_C };
939 break;
940 case JCC_S:
941 cc = gen_prepare_eflags_s(s, reg);
942 break;
943 case JCC_P:
944 cc = gen_prepare_eflags_p(s, reg);
945 break;
946 case JCC_L:
947 gen_compute_eflags(s);
948 if (TCGV_EQUAL(reg, cpu_cc_src)) {
949 reg = cpu_tmp0;
950 }
951 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
952 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
953 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
954 .mask = CC_S };
955 break;
956 default:
957 case JCC_LE:
958 gen_compute_eflags(s);
959 if (TCGV_EQUAL(reg, cpu_cc_src)) {
960 reg = cpu_tmp0;
961 }
962 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
963 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
964 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
965 .mask = CC_S | CC_Z };
966 break;
967 }
c365395e 968 break;
8e1c85e3 969 }
276e6b5f
RH
970
971 if (inv) {
972 cc.cond = tcg_invert_cond(cc.cond);
973 }
974 return cc;
8e1c85e3
FB
975}
976
cc8b6f5b
PB
977static void gen_setcc1(DisasContext *s, int b, TCGv reg)
978{
979 CCPrepare cc = gen_prepare_cc(s, b, reg);
980
981 if (cc.no_setcond) {
982 if (cc.cond == TCG_COND_EQ) {
983 tcg_gen_xori_tl(reg, cc.reg, 1);
984 } else {
985 tcg_gen_mov_tl(reg, cc.reg);
986 }
987 return;
988 }
989
990 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
991 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
992 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
993 tcg_gen_andi_tl(reg, reg, 1);
994 return;
995 }
996 if (cc.mask != -1) {
997 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
998 cc.reg = reg;
999 }
1000 if (cc.use_reg2) {
1001 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1002 } else {
1003 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1004 }
1005}
1006
1007static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1008{
1009 gen_setcc1(s, JCC_B << 1, reg);
1010}
276e6b5f 1011
8e1c85e3
FB
1012/* generate a conditional jump to label 'l1' according to jump opcode
1013 value 'b'. In the fast case, T0 is guaranted not to be used. */
42a268c2 1014static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
dc259201 1015{
1d1cc4d0 1016 CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
dc259201
RH
1017
1018 if (cc.mask != -1) {
1d1cc4d0
RH
1019 tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
1020 cc.reg = cpu_T0;
dc259201
RH
1021 }
1022 if (cc.use_reg2) {
1023 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1024 } else {
1025 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1026 }
1027}
1028
1029/* Generate a conditional jump to label 'l1' according to jump opcode
1030 value 'b'. In the fast case, T0 is guaranted not to be used.
1031 A translation block must end soon. */
42a268c2 1032static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
8e1c85e3 1033{
1d1cc4d0 1034 CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
8e1c85e3 1035
dc259201 1036 gen_update_cc_op(s);
943131ca 1037 if (cc.mask != -1) {
1d1cc4d0
RH
1038 tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
1039 cc.reg = cpu_T0;
943131ca 1040 }
dc259201 1041 set_cc_op(s, CC_OP_DYNAMIC);
943131ca
PB
1042 if (cc.use_reg2) {
1043 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1044 } else {
1045 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
8e1c85e3
FB
1046 }
1047}
1048
14ce26e7
FB
1049/* XXX: does not work with gdbstub "ice" single step - not a
1050 serious problem */
42a268c2 1051static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
2c0262af 1052{
42a268c2
RH
1053 TCGLabel *l1 = gen_new_label();
1054 TCGLabel *l2 = gen_new_label();
1d71ddb1 1055 gen_op_jnz_ecx(s->aflag, l1);
14ce26e7
FB
1056 gen_set_label(l2);
1057 gen_jmp_tb(s, next_eip, 1);
1058 gen_set_label(l1);
1059 return l2;
2c0262af
FB
1060}
1061
d67dc9e6 1062static inline void gen_stos(DisasContext *s, TCGMemOp ot)
2c0262af 1063{
1d1cc4d0 1064 gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
2c0262af 1065 gen_string_movl_A0_EDI(s);
1d1cc4d0 1066 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
6e0d8677 1067 gen_op_movl_T0_Dshift(ot);
1d71ddb1 1068 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1069}
1070
d67dc9e6 1071static inline void gen_lods(DisasContext *s, TCGMemOp ot)
2c0262af
FB
1072{
1073 gen_string_movl_A0_ESI(s);
1d1cc4d0
RH
1074 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1075 gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
6e0d8677 1076 gen_op_movl_T0_Dshift(ot);
1d71ddb1 1077 gen_op_add_reg_T0(s->aflag, R_ESI);
2c0262af
FB
1078}
1079
d67dc9e6 1080static inline void gen_scas(DisasContext *s, TCGMemOp ot)
2c0262af 1081{
2c0262af 1082 gen_string_movl_A0_EDI(s);
1d1cc4d0 1083 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
63633fe6 1084 gen_op(s, OP_CMPL, ot, R_EAX);
6e0d8677 1085 gen_op_movl_T0_Dshift(ot);
1d71ddb1 1086 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1087}
1088
d67dc9e6 1089static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
2c0262af 1090{
2c0262af 1091 gen_string_movl_A0_EDI(s);
1d1cc4d0 1092 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
63633fe6
RH
1093 gen_string_movl_A0_ESI(s);
1094 gen_op(s, OP_CMPL, ot, OR_TMP0);
6e0d8677 1095 gen_op_movl_T0_Dshift(ot);
1d71ddb1
RH
1096 gen_op_add_reg_T0(s->aflag, R_ESI);
1097 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1098}
1099
5223a942
EH
1100static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1101{
1102 if (s->flags & HF_IOBPT_MASK) {
1103 TCGv_i32 t_size = tcg_const_i32(1 << ot);
1104 TCGv t_next = tcg_const_tl(s->pc - s->cs_base);
1105
1106 gen_helper_bpt_io(cpu_env, t_port, t_size, t_next);
1107 tcg_temp_free_i32(t_size);
1108 tcg_temp_free(t_next);
1109 }
1110}
1111
1112
d67dc9e6 1113static inline void gen_ins(DisasContext *s, TCGMemOp ot)
2c0262af 1114{
bd79255d 1115 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 1116 gen_io_start();
bd79255d 1117 }
2c0262af 1118 gen_string_movl_A0_EDI(s);
6e0d8677
FB
1119 /* Note: we must do this dummy write first to be restartable in
1120 case of page fault. */
1d1cc4d0
RH
1121 tcg_gen_movi_tl(cpu_T0, 0);
1122 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
24b9c00f 1123 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
b6abf97d 1124 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1d1cc4d0
RH
1125 gen_helper_in_func(ot, cpu_T0, cpu_tmp2_i32);
1126 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
6e0d8677 1127 gen_op_movl_T0_Dshift(ot);
1d71ddb1 1128 gen_op_add_reg_T0(s->aflag, R_EDI);
5223a942 1129 gen_bpt_io(s, cpu_tmp2_i32, ot);
bd79255d 1130 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 1131 gen_io_end();
bd79255d 1132 }
2c0262af
FB
1133}
1134
d67dc9e6 1135static inline void gen_outs(DisasContext *s, TCGMemOp ot)
2c0262af 1136{
bd79255d 1137 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 1138 gen_io_start();
bd79255d 1139 }
2c0262af 1140 gen_string_movl_A0_ESI(s);
1d1cc4d0 1141 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
b8b6a50b 1142
24b9c00f 1143 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
b6abf97d 1144 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1d1cc4d0 1145 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T0);
a7812ae4 1146 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6e0d8677 1147 gen_op_movl_T0_Dshift(ot);
1d71ddb1 1148 gen_op_add_reg_T0(s->aflag, R_ESI);
5223a942 1149 gen_bpt_io(s, cpu_tmp2_i32, ot);
bd79255d 1150 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 1151 gen_io_end();
bd79255d 1152 }
2c0262af
FB
1153}
1154
1155/* same method as Valgrind : we generate jumps to current or next
1156 instruction */
1157#define GEN_REPZ(op) \
d67dc9e6 1158static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
14ce26e7 1159 target_ulong cur_eip, target_ulong next_eip) \
2c0262af 1160{ \
42a268c2 1161 TCGLabel *l2; \
2c0262af 1162 gen_update_cc_op(s); \
14ce26e7 1163 l2 = gen_jz_ecx_string(s, next_eip); \
2c0262af 1164 gen_ ## op(s, ot); \
1d71ddb1 1165 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
2c0262af
FB
1166 /* a loop would cause two single step exceptions if ECX = 1 \
1167 before rep string_insn */ \
c4d4525c 1168 if (s->repz_opt) \
1d71ddb1 1169 gen_op_jz_ecx(s->aflag, l2); \
2c0262af
FB
1170 gen_jmp(s, cur_eip); \
1171}
1172
1173#define GEN_REPZ2(op) \
d67dc9e6 1174static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
14ce26e7
FB
1175 target_ulong cur_eip, \
1176 target_ulong next_eip, \
2c0262af
FB
1177 int nz) \
1178{ \
42a268c2 1179 TCGLabel *l2; \
2c0262af 1180 gen_update_cc_op(s); \
14ce26e7 1181 l2 = gen_jz_ecx_string(s, next_eip); \
2c0262af 1182 gen_ ## op(s, ot); \
1d71ddb1 1183 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
773cdfcc 1184 gen_update_cc_op(s); \
b27fc131 1185 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
c4d4525c 1186 if (s->repz_opt) \
1d71ddb1 1187 gen_op_jz_ecx(s->aflag, l2); \
2c0262af
FB
1188 gen_jmp(s, cur_eip); \
1189}
1190
1191GEN_REPZ(movs)
1192GEN_REPZ(stos)
1193GEN_REPZ(lods)
1194GEN_REPZ(ins)
1195GEN_REPZ(outs)
1196GEN_REPZ2(scas)
1197GEN_REPZ2(cmps)
1198
a7812ae4
PB
1199static void gen_helper_fp_arith_ST0_FT0(int op)
1200{
1201 switch (op) {
d3eb5eae
BS
1202 case 0:
1203 gen_helper_fadd_ST0_FT0(cpu_env);
1204 break;
1205 case 1:
1206 gen_helper_fmul_ST0_FT0(cpu_env);
1207 break;
1208 case 2:
1209 gen_helper_fcom_ST0_FT0(cpu_env);
1210 break;
1211 case 3:
1212 gen_helper_fcom_ST0_FT0(cpu_env);
1213 break;
1214 case 4:
1215 gen_helper_fsub_ST0_FT0(cpu_env);
1216 break;
1217 case 5:
1218 gen_helper_fsubr_ST0_FT0(cpu_env);
1219 break;
1220 case 6:
1221 gen_helper_fdiv_ST0_FT0(cpu_env);
1222 break;
1223 case 7:
1224 gen_helper_fdivr_ST0_FT0(cpu_env);
1225 break;
a7812ae4
PB
1226 }
1227}
2c0262af
FB
1228
1229/* NOTE the exception in "r" op ordering */
a7812ae4
PB
1230static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1231{
1232 TCGv_i32 tmp = tcg_const_i32(opreg);
1233 switch (op) {
d3eb5eae
BS
1234 case 0:
1235 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1236 break;
1237 case 1:
1238 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1239 break;
1240 case 4:
1241 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1242 break;
1243 case 5:
1244 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1245 break;
1246 case 6:
1247 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1248 break;
1249 case 7:
1250 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1251 break;
a7812ae4
PB
1252 }
1253}
2c0262af
FB
1254
1255/* if d == OR_TMP0, it means memory operand (address in A0) */
d67dc9e6 1256static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
2c0262af 1257{
2c0262af 1258 if (d != OR_TMP0) {
1d1cc4d0 1259 gen_op_mov_v_reg(ot, cpu_T0, d);
2c0262af 1260 } else {
1d1cc4d0 1261 gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
2c0262af
FB
1262 }
1263 switch(op) {
1264 case OP_ADCL:
cc8b6f5b 1265 gen_compute_eflags_c(s1, cpu_tmp4);
1d1cc4d0
RH
1266 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
1267 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_tmp4);
d4faa3e0 1268 gen_op_st_rm_T0_A0(s1, ot, d);
988c3eb0
RH
1269 gen_op_update3_cc(cpu_tmp4);
1270 set_cc_op(s1, CC_OP_ADCB + ot);
cad3a37d 1271 break;
2c0262af 1272 case OP_SBBL:
cc8b6f5b 1273 gen_compute_eflags_c(s1, cpu_tmp4);
1d1cc4d0
RH
1274 tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
1275 tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_tmp4);
d4faa3e0 1276 gen_op_st_rm_T0_A0(s1, ot, d);
988c3eb0
RH
1277 gen_op_update3_cc(cpu_tmp4);
1278 set_cc_op(s1, CC_OP_SBBB + ot);
cad3a37d 1279 break;
2c0262af 1280 case OP_ADDL:
1d1cc4d0 1281 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
d4faa3e0 1282 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1283 gen_op_update2_cc();
3ca51d07 1284 set_cc_op(s1, CC_OP_ADDB + ot);
2c0262af
FB
1285 break;
1286 case OP_SUBL:
1d1cc4d0
RH
1287 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
1288 tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
d4faa3e0 1289 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1290 gen_op_update2_cc();
3ca51d07 1291 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1292 break;
1293 default:
1294 case OP_ANDL:
1d1cc4d0 1295 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
d4faa3e0 1296 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1297 gen_op_update1_cc();
3ca51d07 1298 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1299 break;
2c0262af 1300 case OP_ORL:
1d1cc4d0 1301 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
d4faa3e0 1302 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1303 gen_op_update1_cc();
3ca51d07 1304 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1305 break;
2c0262af 1306 case OP_XORL:
1d1cc4d0 1307 tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_T1);
d4faa3e0 1308 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1309 gen_op_update1_cc();
3ca51d07 1310 set_cc_op(s1, CC_OP_LOGICB + ot);
2c0262af
FB
1311 break;
1312 case OP_CMPL:
1d1cc4d0
RH
1313 tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
1314 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
1315 tcg_gen_sub_tl(cpu_cc_dst, cpu_T0, cpu_T1);
3ca51d07 1316 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1317 break;
1318 }
b6abf97d
FB
1319}
1320
2c0262af 1321/* if d == OR_TMP0, it means memory operand (address in A0) */
d67dc9e6 1322static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
2c0262af 1323{
909be183 1324 if (d != OR_TMP0) {
1d1cc4d0 1325 gen_op_mov_v_reg(ot, cpu_T0, d);
909be183 1326 } else {
1d1cc4d0 1327 gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
909be183 1328 }
cc8b6f5b 1329 gen_compute_eflags_c(s1, cpu_cc_src);
2c0262af 1330 if (c > 0) {
1d1cc4d0 1331 tcg_gen_addi_tl(cpu_T0, cpu_T0, 1);
3ca51d07 1332 set_cc_op(s1, CC_OP_INCB + ot);
2c0262af 1333 } else {
1d1cc4d0 1334 tcg_gen_addi_tl(cpu_T0, cpu_T0, -1);
3ca51d07 1335 set_cc_op(s1, CC_OP_DECB + ot);
2c0262af 1336 }
d4faa3e0 1337 gen_op_st_rm_T0_A0(s1, ot, d);
1d1cc4d0 1338 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
2c0262af
FB
1339}
1340
d67dc9e6
RH
1341static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
1342 TCGv shm1, TCGv count, bool is_right)
f437d0a3
RH
1343{
1344 TCGv_i32 z32, s32, oldop;
1345 TCGv z_tl;
1346
1347 /* Store the results into the CC variables. If we know that the
1348 variable must be dead, store unconditionally. Otherwise we'll
1349 need to not disrupt the current contents. */
1350 z_tl = tcg_const_tl(0);
1351 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1352 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1353 result, cpu_cc_dst);
1354 } else {
1355 tcg_gen_mov_tl(cpu_cc_dst, result);
1356 }
1357 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1358 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1359 shm1, cpu_cc_src);
1360 } else {
1361 tcg_gen_mov_tl(cpu_cc_src, shm1);
1362 }
1363 tcg_temp_free(z_tl);
1364
1365 /* Get the two potential CC_OP values into temporaries. */
1366 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1367 if (s->cc_op == CC_OP_DYNAMIC) {
1368 oldop = cpu_cc_op;
1369 } else {
1370 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1371 oldop = cpu_tmp3_i32;
1372 }
1373
1374 /* Conditionally store the CC_OP value. */
1375 z32 = tcg_const_i32(0);
1376 s32 = tcg_temp_new_i32();
1377 tcg_gen_trunc_tl_i32(s32, count);
1378 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1379 tcg_temp_free_i32(z32);
1380 tcg_temp_free_i32(s32);
1381
1382 /* The CC_OP value is no longer predictable. */
1383 set_cc_op(s, CC_OP_DYNAMIC);
1384}
1385
d67dc9e6 1386static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
b6abf97d 1387 int is_right, int is_arith)
2c0262af 1388{
4ba9938c 1389 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
3b46e624 1390
b6abf97d 1391 /* load */
82786041 1392 if (op1 == OR_TMP0) {
1d1cc4d0 1393 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
82786041 1394 } else {
1d1cc4d0 1395 gen_op_mov_v_reg(ot, cpu_T0, op1);
82786041 1396 }
b6abf97d 1397
1d1cc4d0
RH
1398 tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
1399 tcg_gen_subi_tl(cpu_tmp0, cpu_T1, 1);
b6abf97d
FB
1400
1401 if (is_right) {
1402 if (is_arith) {
1d1cc4d0
RH
1403 gen_exts(ot, cpu_T0);
1404 tcg_gen_sar_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1405 tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
b6abf97d 1406 } else {
1d1cc4d0
RH
1407 gen_extu(ot, cpu_T0);
1408 tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1409 tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
b6abf97d
FB
1410 }
1411 } else {
1d1cc4d0
RH
1412 tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1413 tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
b6abf97d
FB
1414 }
1415
1416 /* store */
d4faa3e0 1417 gen_op_st_rm_T0_A0(s, ot, op1);
82786041 1418
1d1cc4d0 1419 gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, cpu_T1, is_right);
b6abf97d
FB
1420}
1421
d67dc9e6 1422static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
c1c37968
FB
1423 int is_right, int is_arith)
1424{
4ba9938c 1425 int mask = (ot == MO_64 ? 0x3f : 0x1f);
c1c37968
FB
1426
1427 /* load */
1428 if (op1 == OR_TMP0)
1d1cc4d0 1429 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
c1c37968 1430 else
1d1cc4d0 1431 gen_op_mov_v_reg(ot, cpu_T0, op1);
c1c37968
FB
1432
1433 op2 &= mask;
1434 if (op2 != 0) {
1435 if (is_right) {
1436 if (is_arith) {
1d1cc4d0
RH
1437 gen_exts(ot, cpu_T0);
1438 tcg_gen_sari_tl(cpu_tmp4, cpu_T0, op2 - 1);
1439 tcg_gen_sari_tl(cpu_T0, cpu_T0, op2);
c1c37968 1440 } else {
1d1cc4d0
RH
1441 gen_extu(ot, cpu_T0);
1442 tcg_gen_shri_tl(cpu_tmp4, cpu_T0, op2 - 1);
1443 tcg_gen_shri_tl(cpu_T0, cpu_T0, op2);
c1c37968
FB
1444 }
1445 } else {
1d1cc4d0
RH
1446 tcg_gen_shli_tl(cpu_tmp4, cpu_T0, op2 - 1);
1447 tcg_gen_shli_tl(cpu_T0, cpu_T0, op2);
c1c37968
FB
1448 }
1449 }
1450
1451 /* store */
d4faa3e0
RH
1452 gen_op_st_rm_T0_A0(s, ot, op1);
1453
c1c37968
FB
1454 /* update eflags if non zero shift */
1455 if (op2 != 0) {
2a449d14 1456 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1d1cc4d0 1457 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
3ca51d07 1458 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
c1c37968
FB
1459 }
1460}
1461
d67dc9e6 1462static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
b6abf97d 1463{
4ba9938c 1464 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
34d80a55 1465 TCGv_i32 t0, t1;
b6abf97d
FB
1466
1467 /* load */
1e4840bf 1468 if (op1 == OR_TMP0) {
1d1cc4d0 1469 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1e4840bf 1470 } else {
1d1cc4d0 1471 gen_op_mov_v_reg(ot, cpu_T0, op1);
1e4840bf 1472 }
b6abf97d 1473
1d1cc4d0 1474 tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
b6abf97d 1475
34d80a55 1476 switch (ot) {
4ba9938c 1477 case MO_8:
34d80a55 1478 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1d1cc4d0
RH
1479 tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
1480 tcg_gen_muli_tl(cpu_T0, cpu_T0, 0x01010101);
34d80a55 1481 goto do_long;
4ba9938c 1482 case MO_16:
34d80a55 1483 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1d1cc4d0 1484 tcg_gen_deposit_tl(cpu_T0, cpu_T0, cpu_T0, 16, 16);
34d80a55
RH
1485 goto do_long;
1486 do_long:
1487#ifdef TARGET_X86_64
4ba9938c 1488 case MO_32:
1d1cc4d0
RH
1489 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
1490 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
34d80a55
RH
1491 if (is_right) {
1492 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1493 } else {
1494 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1495 }
1d1cc4d0 1496 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
34d80a55
RH
1497 break;
1498#endif
1499 default:
1500 if (is_right) {
1d1cc4d0 1501 tcg_gen_rotr_tl(cpu_T0, cpu_T0, cpu_T1);
34d80a55 1502 } else {
1d1cc4d0 1503 tcg_gen_rotl_tl(cpu_T0, cpu_T0, cpu_T1);
34d80a55
RH
1504 }
1505 break;
b6abf97d 1506 }
b6abf97d 1507
b6abf97d 1508 /* store */
d4faa3e0 1509 gen_op_st_rm_T0_A0(s, ot, op1);
b6abf97d 1510
34d80a55
RH
1511 /* We'll need the flags computed into CC_SRC. */
1512 gen_compute_eflags(s);
b6abf97d 1513
34d80a55
RH
1514 /* The value that was "rotated out" is now present at the other end
1515 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1516 since we've computed the flags into CC_SRC, these variables are
1517 currently dead. */
b6abf97d 1518 if (is_right) {
1d1cc4d0
RH
1519 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
1520 tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
089305ac 1521 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
34d80a55 1522 } else {
1d1cc4d0
RH
1523 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
1524 tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
b6abf97d 1525 }
34d80a55
RH
1526 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1527 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1528
1529 /* Now conditionally store the new CC_OP value. If the shift count
1530 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1531 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1532 exactly as we computed above. */
1533 t0 = tcg_const_i32(0);
1534 t1 = tcg_temp_new_i32();
1d1cc4d0 1535 tcg_gen_trunc_tl_i32(t1, cpu_T1);
34d80a55
RH
1536 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1537 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1538 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1539 cpu_tmp2_i32, cpu_tmp3_i32);
1540 tcg_temp_free_i32(t0);
1541 tcg_temp_free_i32(t1);
1542
1543 /* The CC_OP value is no longer predictable. */
1544 set_cc_op(s, CC_OP_DYNAMIC);
b6abf97d
FB
1545}
1546
d67dc9e6 1547static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
8cd6345d 1548 int is_right)
1549{
4ba9938c 1550 int mask = (ot == MO_64 ? 0x3f : 0x1f);
34d80a55 1551 int shift;
8cd6345d 1552
1553 /* load */
1554 if (op1 == OR_TMP0) {
1d1cc4d0 1555 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
8cd6345d 1556 } else {
1d1cc4d0 1557 gen_op_mov_v_reg(ot, cpu_T0, op1);
8cd6345d 1558 }
1559
8cd6345d 1560 op2 &= mask;
8cd6345d 1561 if (op2 != 0) {
34d80a55
RH
1562 switch (ot) {
1563#ifdef TARGET_X86_64
4ba9938c 1564 case MO_32:
1d1cc4d0 1565 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
34d80a55
RH
1566 if (is_right) {
1567 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1568 } else {
1569 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1570 }
1d1cc4d0 1571 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
34d80a55
RH
1572 break;
1573#endif
1574 default:
1575 if (is_right) {
1d1cc4d0 1576 tcg_gen_rotri_tl(cpu_T0, cpu_T0, op2);
34d80a55 1577 } else {
1d1cc4d0 1578 tcg_gen_rotli_tl(cpu_T0, cpu_T0, op2);
34d80a55
RH
1579 }
1580 break;
4ba9938c 1581 case MO_8:
34d80a55
RH
1582 mask = 7;
1583 goto do_shifts;
4ba9938c 1584 case MO_16:
34d80a55
RH
1585 mask = 15;
1586 do_shifts:
1587 shift = op2 & mask;
1588 if (is_right) {
1589 shift = mask + 1 - shift;
1590 }
1d1cc4d0
RH
1591 gen_extu(ot, cpu_T0);
1592 tcg_gen_shli_tl(cpu_tmp0, cpu_T0, shift);
1593 tcg_gen_shri_tl(cpu_T0, cpu_T0, mask + 1 - shift);
1594 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
34d80a55 1595 break;
8cd6345d 1596 }
8cd6345d 1597 }
1598
1599 /* store */
d4faa3e0 1600 gen_op_st_rm_T0_A0(s, ot, op1);
8cd6345d 1601
1602 if (op2 != 0) {
34d80a55 1603 /* Compute the flags into CC_SRC. */
d229edce 1604 gen_compute_eflags(s);
0ff6addd 1605
34d80a55
RH
1606 /* The value that was "rotated out" is now present at the other end
1607 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1608 since we've computed the flags into CC_SRC, these variables are
1609 currently dead. */
8cd6345d 1610 if (is_right) {
1d1cc4d0
RH
1611 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
1612 tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
38ebb396 1613 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
34d80a55 1614 } else {
1d1cc4d0
RH
1615 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
1616 tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
8cd6345d 1617 }
34d80a55
RH
1618 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1619 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1620 set_cc_op(s, CC_OP_ADCOX);
8cd6345d 1621 }
8cd6345d 1622}
1623
b6abf97d 1624/* XXX: add faster immediate = 1 case */
d67dc9e6 1625static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
b6abf97d
FB
1626 int is_right)
1627{
d229edce 1628 gen_compute_eflags(s);
c7b3c873 1629 assert(s->cc_op == CC_OP_EFLAGS);
b6abf97d
FB
1630
1631 /* load */
1632 if (op1 == OR_TMP0)
1d1cc4d0 1633 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
b6abf97d 1634 else
1d1cc4d0 1635 gen_op_mov_v_reg(ot, cpu_T0, op1);
b6abf97d 1636
a7812ae4
PB
1637 if (is_right) {
1638 switch (ot) {
4ba9938c 1639 case MO_8:
1d1cc4d0 1640 gen_helper_rcrb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
7923057b 1641 break;
4ba9938c 1642 case MO_16:
1d1cc4d0 1643 gen_helper_rcrw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
7923057b 1644 break;
4ba9938c 1645 case MO_32:
1d1cc4d0 1646 gen_helper_rcrl(cpu_T0, cpu_env, cpu_T0, cpu_T1);
7923057b 1647 break;
a7812ae4 1648#ifdef TARGET_X86_64
4ba9938c 1649 case MO_64:
1d1cc4d0 1650 gen_helper_rcrq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
7923057b 1651 break;
a7812ae4 1652#endif
d67dc9e6
RH
1653 default:
1654 tcg_abort();
a7812ae4
PB
1655 }
1656 } else {
1657 switch (ot) {
4ba9938c 1658 case MO_8:
1d1cc4d0 1659 gen_helper_rclb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
7923057b 1660 break;
4ba9938c 1661 case MO_16:
1d1cc4d0 1662 gen_helper_rclw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
7923057b 1663 break;
4ba9938c 1664 case MO_32:
1d1cc4d0 1665 gen_helper_rcll(cpu_T0, cpu_env, cpu_T0, cpu_T1);
7923057b 1666 break;
a7812ae4 1667#ifdef TARGET_X86_64
4ba9938c 1668 case MO_64:
1d1cc4d0 1669 gen_helper_rclq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
7923057b 1670 break;
a7812ae4 1671#endif
d67dc9e6
RH
1672 default:
1673 tcg_abort();
a7812ae4
PB
1674 }
1675 }
b6abf97d 1676 /* store */
d4faa3e0 1677 gen_op_st_rm_T0_A0(s, ot, op1);
b6abf97d
FB
1678}
1679
1680/* XXX: add faster immediate case */
d67dc9e6 1681static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
f437d0a3 1682 bool is_right, TCGv count_in)
b6abf97d 1683{
4ba9938c 1684 target_ulong mask = (ot == MO_64 ? 63 : 31);
f437d0a3 1685 TCGv count;
b6abf97d
FB
1686
1687 /* load */
1e4840bf 1688 if (op1 == OR_TMP0) {
1d1cc4d0 1689 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1e4840bf 1690 } else {
1d1cc4d0 1691 gen_op_mov_v_reg(ot, cpu_T0, op1);
1e4840bf 1692 }
b6abf97d 1693
f437d0a3
RH
1694 count = tcg_temp_new();
1695 tcg_gen_andi_tl(count, count_in, mask);
1e4840bf 1696
f437d0a3 1697 switch (ot) {
4ba9938c 1698 case MO_16:
f437d0a3
RH
1699 /* Note: we implement the Intel behaviour for shift count > 16.
1700 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1701 portion by constructing it as a 32-bit value. */
b6abf97d 1702 if (is_right) {
1d1cc4d0
RH
1703 tcg_gen_deposit_tl(cpu_tmp0, cpu_T0, cpu_T1, 16, 16);
1704 tcg_gen_mov_tl(cpu_T1, cpu_T0);
1705 tcg_gen_mov_tl(cpu_T0, cpu_tmp0);
b6abf97d 1706 } else {
1d1cc4d0 1707 tcg_gen_deposit_tl(cpu_T1, cpu_T0, cpu_T1, 16, 16);
b6abf97d 1708 }
f437d0a3
RH
1709 /* FALLTHRU */
1710#ifdef TARGET_X86_64
4ba9938c 1711 case MO_32:
f437d0a3
RH
1712 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1713 tcg_gen_subi_tl(cpu_tmp0, count, 1);
b6abf97d 1714 if (is_right) {
1d1cc4d0
RH
1715 tcg_gen_concat_tl_i64(cpu_T0, cpu_T0, cpu_T1);
1716 tcg_gen_shr_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
1717 tcg_gen_shr_i64(cpu_T0, cpu_T0, count);
f437d0a3 1718 } else {
1d1cc4d0
RH
1719 tcg_gen_concat_tl_i64(cpu_T0, cpu_T1, cpu_T0);
1720 tcg_gen_shl_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
1721 tcg_gen_shl_i64(cpu_T0, cpu_T0, count);
f437d0a3 1722 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1d1cc4d0 1723 tcg_gen_shri_i64(cpu_T0, cpu_T0, 32);
f437d0a3
RH
1724 }
1725 break;
1726#endif
1727 default:
1728 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1729 if (is_right) {
1d1cc4d0 1730 tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
b6abf97d 1731
f437d0a3 1732 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1d1cc4d0
RH
1733 tcg_gen_shr_tl(cpu_T0, cpu_T0, count);
1734 tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_tmp4);
b6abf97d 1735 } else {
1d1cc4d0 1736 tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
4ba9938c 1737 if (ot == MO_16) {
f437d0a3
RH
1738 /* Only needed if count > 16, for Intel behaviour. */
1739 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
1d1cc4d0 1740 tcg_gen_shr_tl(cpu_tmp4, cpu_T1, cpu_tmp4);
f437d0a3
RH
1741 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
1742 }
1743
1744 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1d1cc4d0
RH
1745 tcg_gen_shl_tl(cpu_T0, cpu_T0, count);
1746 tcg_gen_shr_tl(cpu_T1, cpu_T1, cpu_tmp4);
b6abf97d 1747 }
f437d0a3 1748 tcg_gen_movi_tl(cpu_tmp4, 0);
1d1cc4d0
RH
1749 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T1, count, cpu_tmp4,
1750 cpu_tmp4, cpu_T1);
1751 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
f437d0a3 1752 break;
b6abf97d 1753 }
b6abf97d 1754
b6abf97d 1755 /* store */
d4faa3e0 1756 gen_op_st_rm_T0_A0(s, ot, op1);
1e4840bf 1757
1d1cc4d0 1758 gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, count, is_right);
f437d0a3 1759 tcg_temp_free(count);
b6abf97d
FB
1760}
1761
d67dc9e6 1762static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
b6abf97d
FB
1763{
1764 if (s != OR_TMP1)
1d1cc4d0 1765 gen_op_mov_v_reg(ot, cpu_T1, s);
b6abf97d
FB
1766 switch(op) {
1767 case OP_ROL:
1768 gen_rot_rm_T1(s1, ot, d, 0);
1769 break;
1770 case OP_ROR:
1771 gen_rot_rm_T1(s1, ot, d, 1);
1772 break;
1773 case OP_SHL:
1774 case OP_SHL1:
1775 gen_shift_rm_T1(s1, ot, d, 0, 0);
1776 break;
1777 case OP_SHR:
1778 gen_shift_rm_T1(s1, ot, d, 1, 0);
1779 break;
1780 case OP_SAR:
1781 gen_shift_rm_T1(s1, ot, d, 1, 1);
1782 break;
1783 case OP_RCL:
1784 gen_rotc_rm_T1(s1, ot, d, 0);
1785 break;
1786 case OP_RCR:
1787 gen_rotc_rm_T1(s1, ot, d, 1);
1788 break;
1789 }
2c0262af
FB
1790}
1791
d67dc9e6 1792static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
2c0262af 1793{
c1c37968 1794 switch(op) {
8cd6345d 1795 case OP_ROL:
1796 gen_rot_rm_im(s1, ot, d, c, 0);
1797 break;
1798 case OP_ROR:
1799 gen_rot_rm_im(s1, ot, d, c, 1);
1800 break;
c1c37968
FB
1801 case OP_SHL:
1802 case OP_SHL1:
1803 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1804 break;
1805 case OP_SHR:
1806 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1807 break;
1808 case OP_SAR:
1809 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1810 break;
1811 default:
1812 /* currently not optimized */
1d1cc4d0 1813 tcg_gen_movi_tl(cpu_T1, c);
c1c37968
FB
1814 gen_shift(s1, op, ot, d, OR_TMP1);
1815 break;
1816 }
2c0262af
FB
1817}
1818
a074ce42
RH
1819/* Decompose an address. */
1820
1821typedef struct AddressParts {
1822 int def_seg;
1823 int base;
1824 int index;
1825 int scale;
1826 target_long disp;
1827} AddressParts;
1828
1829static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1830 int modrm)
2c0262af 1831{
a074ce42 1832 int def_seg, base, index, scale, mod, rm;
14ce26e7 1833 target_long disp;
a074ce42 1834 bool havesib;
2c0262af 1835
d6a29149 1836 def_seg = R_DS;
a074ce42
RH
1837 index = -1;
1838 scale = 0;
1839 disp = 0;
1840
2c0262af
FB
1841 mod = (modrm >> 6) & 3;
1842 rm = modrm & 7;
a074ce42
RH
1843 base = rm | REX_B(s);
1844
1845 if (mod == 3) {
1846 /* Normally filtered out earlier, but including this path
1847 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
1848 goto done;
1849 }
2c0262af 1850
1d71ddb1
RH
1851 switch (s->aflag) {
1852 case MO_64:
1853 case MO_32:
2c0262af 1854 havesib = 0;
a074ce42
RH
1855 if (rm == 4) {
1856 int code = cpu_ldub_code(env, s->pc++);
2c0262af 1857 scale = (code >> 6) & 3;
14ce26e7 1858 index = ((code >> 3) & 7) | REX_X(s);
7865eec4
RH
1859 if (index == 4) {
1860 index = -1; /* no index */
1861 }
a074ce42
RH
1862 base = (code & 7) | REX_B(s);
1863 havesib = 1;
2c0262af
FB
1864 }
1865
1866 switch (mod) {
1867 case 0:
14ce26e7 1868 if ((base & 7) == 5) {
2c0262af 1869 base = -1;
0af10c86 1870 disp = (int32_t)cpu_ldl_code(env, s->pc);
2c0262af 1871 s->pc += 4;
14ce26e7 1872 if (CODE64(s) && !havesib) {
a074ce42 1873 base = -2;
14ce26e7
FB
1874 disp += s->pc + s->rip_offset;
1875 }
2c0262af
FB
1876 }
1877 break;
1878 case 1:
0af10c86 1879 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2c0262af
FB
1880 break;
1881 default:
1882 case 2:
0af10c86 1883 disp = (int32_t)cpu_ldl_code(env, s->pc);
2c0262af
FB
1884 s->pc += 4;
1885 break;
1886 }
3b46e624 1887
7865eec4
RH
1888 /* For correct popl handling with esp. */
1889 if (base == R_ESP && s->popl_esp_hack) {
1890 disp += s->popl_esp_hack;
1891 }
d6a29149
RH
1892 if (base == R_EBP || base == R_ESP) {
1893 def_seg = R_SS;
2c0262af 1894 }
1d71ddb1
RH
1895 break;
1896
1897 case MO_16:
d6a29149 1898 if (mod == 0) {
2c0262af 1899 if (rm == 6) {
a074ce42 1900 base = -1;
0af10c86 1901 disp = cpu_lduw_code(env, s->pc);
2c0262af 1902 s->pc += 2;
d6a29149 1903 break;
2c0262af 1904 }
d6a29149 1905 } else if (mod == 1) {
0af10c86 1906 disp = (int8_t)cpu_ldub_code(env, s->pc++);
d6a29149 1907 } else {
7effd625 1908 disp = (int16_t)cpu_lduw_code(env, s->pc);
2c0262af 1909 s->pc += 2;
2c0262af 1910 }
7effd625 1911
7effd625 1912 switch (rm) {
2c0262af 1913 case 0:
a074ce42
RH
1914 base = R_EBX;
1915 index = R_ESI;
2c0262af
FB
1916 break;
1917 case 1:
a074ce42
RH
1918 base = R_EBX;
1919 index = R_EDI;
2c0262af
FB
1920 break;
1921 case 2:
a074ce42
RH
1922 base = R_EBP;
1923 index = R_ESI;
d6a29149 1924 def_seg = R_SS;
2c0262af
FB
1925 break;
1926 case 3:
a074ce42
RH
1927 base = R_EBP;
1928 index = R_EDI;
d6a29149 1929 def_seg = R_SS;
2c0262af
FB
1930 break;
1931 case 4:
a074ce42 1932 base = R_ESI;
2c0262af
FB
1933 break;
1934 case 5:
a074ce42 1935 base = R_EDI;
2c0262af
FB
1936 break;
1937 case 6:
a074ce42 1938 base = R_EBP;
d6a29149 1939 def_seg = R_SS;
2c0262af
FB
1940 break;
1941 default:
1942 case 7:
a074ce42 1943 base = R_EBX;
2c0262af
FB
1944 break;
1945 }
1d71ddb1
RH
1946 break;
1947
1948 default:
1949 tcg_abort();
2c0262af 1950 }
d6a29149 1951
a074ce42
RH
1952 done:
1953 return (AddressParts){ def_seg, base, index, scale, disp };
2c0262af
FB
1954}
1955
a074ce42
RH
1956/* Compute the address, with a minimum number of TCG ops. */
1957static TCGv gen_lea_modrm_1(AddressParts a)
e17a36ce 1958{
a074ce42 1959 TCGv ea;
3b46e624 1960
a074ce42
RH
1961 TCGV_UNUSED(ea);
1962 if (a.index >= 0) {
1963 if (a.scale == 0) {
1964 ea = cpu_regs[a.index];
1965 } else {
1966 tcg_gen_shli_tl(cpu_A0, cpu_regs[a.index], a.scale);
1967 ea = cpu_A0;
e17a36ce 1968 }
a074ce42
RH
1969 if (a.base >= 0) {
1970 tcg_gen_add_tl(cpu_A0, ea, cpu_regs[a.base]);
1971 ea = cpu_A0;
e17a36ce 1972 }
a074ce42
RH
1973 } else if (a.base >= 0) {
1974 ea = cpu_regs[a.base];
1975 }
1976 if (TCGV_IS_UNUSED(ea)) {
1977 tcg_gen_movi_tl(cpu_A0, a.disp);
1978 ea = cpu_A0;
1979 } else if (a.disp != 0) {
1980 tcg_gen_addi_tl(cpu_A0, ea, a.disp);
1981 ea = cpu_A0;
1982 }
1d71ddb1 1983
a074ce42
RH
1984 return ea;
1985}
1d71ddb1 1986
a074ce42
RH
1987static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
1988{
1989 AddressParts a = gen_lea_modrm_0(env, s, modrm);
1990 TCGv ea = gen_lea_modrm_1(a);
1991 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
1992}
1993
1994static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
1995{
1996 (void)gen_lea_modrm_0(env, s, modrm);
e17a36ce
FB
1997}
1998
523e28d7
RH
1999/* Used for BNDCL, BNDCU, BNDCN. */
2000static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
2001 TCGCond cond, TCGv_i64 bndv)
2002{
2003 TCGv ea = gen_lea_modrm_1(gen_lea_modrm_0(env, s, modrm));
2004
2005 tcg_gen_extu_tl_i64(cpu_tmp1_i64, ea);
2006 if (!CODE64(s)) {
2007 tcg_gen_ext32u_i64(cpu_tmp1_i64, cpu_tmp1_i64);
2008 }
2009 tcg_gen_setcond_i64(cond, cpu_tmp1_i64, cpu_tmp1_i64, bndv);
2010 tcg_gen_extrl_i64_i32(cpu_tmp2_i32, cpu_tmp1_i64);
2011 gen_helper_bndck(cpu_env, cpu_tmp2_i32);
2012}
2013
664e0f19
FB
2014/* used for LEA and MOV AX, mem */
2015static void gen_add_A0_ds_seg(DisasContext *s)
2016{
77ebcad0 2017 gen_lea_v_seg(s, s->aflag, cpu_A0, R_DS, s->override);
664e0f19
FB
2018}
2019
222a3336 2020/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2c0262af 2021 OR_TMP0 */
0af10c86 2022static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
d67dc9e6 2023 TCGMemOp ot, int reg, int is_store)
2c0262af 2024{
4eeb3939 2025 int mod, rm;
2c0262af
FB
2026
2027 mod = (modrm >> 6) & 3;
14ce26e7 2028 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
2029 if (mod == 3) {
2030 if (is_store) {
2031 if (reg != OR_TMP0)
1d1cc4d0
RH
2032 gen_op_mov_v_reg(ot, cpu_T0, reg);
2033 gen_op_mov_reg_v(ot, rm, cpu_T0);
2c0262af 2034 } else {
1d1cc4d0 2035 gen_op_mov_v_reg(ot, cpu_T0, rm);
2c0262af 2036 if (reg != OR_TMP0)
1d1cc4d0 2037 gen_op_mov_reg_v(ot, reg, cpu_T0);
2c0262af
FB
2038 }
2039 } else {
4eeb3939 2040 gen_lea_modrm(env, s, modrm);
2c0262af
FB
2041 if (is_store) {
2042 if (reg != OR_TMP0)
1d1cc4d0
RH
2043 gen_op_mov_v_reg(ot, cpu_T0, reg);
2044 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
2c0262af 2045 } else {
1d1cc4d0 2046 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
2c0262af 2047 if (reg != OR_TMP0)
1d1cc4d0 2048 gen_op_mov_reg_v(ot, reg, cpu_T0);
2c0262af
FB
2049 }
2050 }
2051}
2052
d67dc9e6 2053static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
2c0262af
FB
2054{
2055 uint32_t ret;
2056
d67dc9e6 2057 switch (ot) {
4ba9938c 2058 case MO_8:
0af10c86 2059 ret = cpu_ldub_code(env, s->pc);
2c0262af
FB
2060 s->pc++;
2061 break;
4ba9938c 2062 case MO_16:
0af10c86 2063 ret = cpu_lduw_code(env, s->pc);
2c0262af
FB
2064 s->pc += 2;
2065 break;
4ba9938c 2066 case MO_32:
d67dc9e6
RH
2067#ifdef TARGET_X86_64
2068 case MO_64:
2069#endif
0af10c86 2070 ret = cpu_ldl_code(env, s->pc);
2c0262af
FB
2071 s->pc += 4;
2072 break;
d67dc9e6
RH
2073 default:
2074 tcg_abort();
2c0262af
FB
2075 }
2076 return ret;
2077}
2078
d67dc9e6 2079static inline int insn_const_size(TCGMemOp ot)
14ce26e7 2080{
4ba9938c 2081 if (ot <= MO_32) {
14ce26e7 2082 return 1 << ot;
4ba9938c 2083 } else {
14ce26e7 2084 return 4;
4ba9938c 2085 }
14ce26e7
FB
2086}
2087
90aa39a1
SF
2088static inline bool use_goto_tb(DisasContext *s, target_ulong pc)
2089{
2090#ifndef CONFIG_USER_ONLY
2091 return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
2092 (pc & TARGET_PAGE_MASK) == (s->pc_start & TARGET_PAGE_MASK);
2093#else
2094 return true;
2095#endif
2096}
2097
6e256c93
FB
2098static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2099{
90aa39a1 2100 target_ulong pc = s->cs_base + eip;
6e256c93 2101
90aa39a1 2102 if (use_goto_tb(s, pc)) {
6e256c93 2103 /* jump to same page: we can use a direct jump */
57fec1fe 2104 tcg_gen_goto_tb(tb_num);
6e256c93 2105 gen_jmp_im(eip);
90aa39a1 2106 tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
6e256c93
FB
2107 } else {
2108 /* jump to another page: currently not optimized */
2109 gen_jmp_im(eip);
2110 gen_eob(s);
2111 }
2112}
2113
5fafdf24 2114static inline void gen_jcc(DisasContext *s, int b,
14ce26e7 2115 target_ulong val, target_ulong next_eip)
2c0262af 2116{
42a268c2 2117 TCGLabel *l1, *l2;
3b46e624 2118
2c0262af 2119 if (s->jmp_opt) {
14ce26e7 2120 l1 = gen_new_label();
b27fc131 2121 gen_jcc1(s, b, l1);
dc259201 2122
6e256c93 2123 gen_goto_tb(s, 0, next_eip);
14ce26e7
FB
2124
2125 gen_set_label(l1);
6e256c93 2126 gen_goto_tb(s, 1, val);
5779406a 2127 s->is_jmp = DISAS_TB_JUMP;
2c0262af 2128 } else {
14ce26e7
FB
2129 l1 = gen_new_label();
2130 l2 = gen_new_label();
b27fc131 2131 gen_jcc1(s, b, l1);
8e1c85e3 2132
14ce26e7 2133 gen_jmp_im(next_eip);
8e1c85e3
FB
2134 tcg_gen_br(l2);
2135
14ce26e7
FB
2136 gen_set_label(l1);
2137 gen_jmp_im(val);
2138 gen_set_label(l2);
2c0262af
FB
2139 gen_eob(s);
2140 }
2141}
2142
d67dc9e6 2143static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
f32d3781
PB
2144 int modrm, int reg)
2145{
57eb0cc8 2146 CCPrepare cc;
f32d3781 2147
57eb0cc8 2148 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
f32d3781 2149
1d1cc4d0 2150 cc = gen_prepare_cc(s, b, cpu_T1);
57eb0cc8
RH
2151 if (cc.mask != -1) {
2152 TCGv t0 = tcg_temp_new();
2153 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2154 cc.reg = t0;
2155 }
2156 if (!cc.use_reg2) {
2157 cc.reg2 = tcg_const_tl(cc.imm);
f32d3781
PB
2158 }
2159
1d1cc4d0
RH
2160 tcg_gen_movcond_tl(cc.cond, cpu_T0, cc.reg, cc.reg2,
2161 cpu_T0, cpu_regs[reg]);
2162 gen_op_mov_reg_v(ot, reg, cpu_T0);
57eb0cc8
RH
2163
2164 if (cc.mask != -1) {
2165 tcg_temp_free(cc.reg);
2166 }
2167 if (!cc.use_reg2) {
2168 tcg_temp_free(cc.reg2);
2169 }
f32d3781
PB
2170}
2171
3bd7da9e
FB
2172static inline void gen_op_movl_T0_seg(int seg_reg)
2173{
1d1cc4d0 2174 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
3bd7da9e
FB
2175 offsetof(CPUX86State,segs[seg_reg].selector));
2176}
2177
2178static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2179{
1d1cc4d0
RH
2180 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
2181 tcg_gen_st32_tl(cpu_T0, cpu_env,
3bd7da9e 2182 offsetof(CPUX86State,segs[seg_reg].selector));
1d1cc4d0 2183 tcg_gen_shli_tl(cpu_seg_base[seg_reg], cpu_T0, 4);
3bd7da9e
FB
2184}
2185
2c0262af
FB
2186/* move T0 to seg_reg and compute if the CPU state may change. Never
2187 call this function with seg_reg == R_CS */
100ec099 2188static void gen_movl_seg_T0(DisasContext *s, int seg_reg)
2c0262af 2189{
3415a4dd 2190 if (s->pe && !s->vm86) {
1d1cc4d0 2191 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
2999a0b2 2192 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
dc196a57
FB
2193 /* abort translation because the addseg value may change or
2194 because ss32 may change. For R_SS, translation must always
2195 stop as a special handling must be done to disable hardware
2196 interrupts for the next instruction */
2197 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
5779406a 2198 s->is_jmp = DISAS_TB_JUMP;
3415a4dd 2199 } else {
3bd7da9e 2200 gen_op_movl_seg_T0_vm(seg_reg);
dc196a57 2201 if (seg_reg == R_SS)
5779406a 2202 s->is_jmp = DISAS_TB_JUMP;
3415a4dd 2203 }
2c0262af
FB
2204}
2205
0573fbfc
TS
2206static inline int svm_is_rep(int prefixes)
2207{
2208 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2209}
2210
872929aa 2211static inline void
0573fbfc 2212gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
b8b6a50b 2213 uint32_t type, uint64_t param)
0573fbfc 2214{
872929aa
FB
2215 /* no SVM activated; fast case */
2216 if (likely(!(s->flags & HF_SVMI_MASK)))
2217 return;
773cdfcc 2218 gen_update_cc_op(s);
872929aa 2219 gen_jmp_im(pc_start - s->cs_base);
052e80d5 2220 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
a7812ae4 2221 tcg_const_i64(param));
0573fbfc
TS
2222}
2223
872929aa 2224static inline void
0573fbfc
TS
2225gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2226{
872929aa 2227 gen_svm_check_intercept_param(s, pc_start, type, 0);
0573fbfc
TS
2228}
2229
4f31916f
FB
2230static inline void gen_stack_update(DisasContext *s, int addend)
2231{
64ae256c 2232 gen_op_add_reg_im(mo_stacksize(s), R_ESP, addend);
4f31916f
FB
2233}
2234
432baffe
RH
2235/* Generate a push. It depends on ss32, addseg and dflag. */
2236static void gen_push_v(DisasContext *s, TCGv val)
2c0262af 2237{
64ae256c
RH
2238 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2239 TCGMemOp a_ot = mo_stacksize(s);
432baffe
RH
2240 int size = 1 << d_ot;
2241 TCGv new_esp = cpu_A0;
2242
2243 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_ESP], size);
2c0262af 2244
77ebcad0 2245 if (!CODE64(s)) {
432baffe
RH
2246 if (s->addseg) {
2247 new_esp = cpu_tmp4;
2248 tcg_gen_mov_tl(new_esp, cpu_A0);
2c0262af 2249 }
77ebcad0 2250 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2c0262af 2251 }
432baffe
RH
2252
2253 gen_op_st_v(s, d_ot, val, cpu_A0);
2254 gen_op_mov_reg_v(a_ot, R_ESP, new_esp);
2c0262af
FB
2255}
2256
4f31916f 2257/* two step pop is necessary for precise exceptions */
8e31d234 2258static TCGMemOp gen_pop_T0(DisasContext *s)
2c0262af 2259{
8e31d234 2260 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
8e31d234 2261
77ebcad0 2262 gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1);
1d1cc4d0 2263 gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
8e31d234 2264
8e31d234 2265 return d_ot;
2c0262af
FB
2266}
2267
77ebcad0 2268static inline void gen_pop_update(DisasContext *s, TCGMemOp ot)
2c0262af 2269{
8e31d234 2270 gen_stack_update(s, 1 << ot);
2c0262af
FB
2271}
2272
77ebcad0 2273static inline void gen_stack_A0(DisasContext *s)
2c0262af 2274{
77ebcad0 2275 gen_lea_v_seg(s, s->ss32 ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
2c0262af
FB
2276}
2277
2c0262af
FB
2278static void gen_pusha(DisasContext *s)
2279{
d37ea0c0
RH
2280 TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
2281 TCGMemOp d_ot = s->dflag;
2282 int size = 1 << d_ot;
2c0262af 2283 int i;
d37ea0c0
RH
2284
2285 for (i = 0; i < 8; i++) {
2286 tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], (i - 8) * size);
2287 gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
2288 gen_op_st_v(s, d_ot, cpu_regs[7 - i], cpu_A0);
2289 }
2290
2291 gen_stack_update(s, -8 * size);
2292}
2293
2c0262af
FB
2294static void gen_popa(DisasContext *s)
2295{
d37ea0c0
RH
2296 TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
2297 TCGMemOp d_ot = s->dflag;
2298 int size = 1 << d_ot;
2c0262af 2299 int i;
d37ea0c0
RH
2300
2301 for (i = 0; i < 8; i++) {
2c0262af 2302 /* ESP is not reloaded */
d37ea0c0
RH
2303 if (7 - i == R_ESP) {
2304 continue;
2c0262af 2305 }
d37ea0c0
RH
2306 tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], i * size);
2307 gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
1d1cc4d0
RH
2308 gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
2309 gen_op_mov_reg_v(d_ot, 7 - i, cpu_T0);
2c0262af 2310 }
d37ea0c0
RH
2311
2312 gen_stack_update(s, 8 * size);
2c0262af
FB
2313}
2314
2c0262af
FB
2315static void gen_enter(DisasContext *s, int esp_addend, int level)
2316{
743e398e
RH
2317 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2318 TCGMemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
2319 int size = 1 << d_ot;
2c0262af 2320
743e398e 2321 /* Push BP; compute FrameTemp into T1. */
1d1cc4d0
RH
2322 tcg_gen_subi_tl(cpu_T1, cpu_regs[R_ESP], size);
2323 gen_lea_v_seg(s, a_ot, cpu_T1, R_SS, -1);
743e398e
RH
2324 gen_op_st_v(s, d_ot, cpu_regs[R_EBP], cpu_A0);
2325
2326 level &= 31;
2327 if (level != 0) {
2328 int i;
2329
2330 /* Copy level-1 pointers from the previous frame. */
2331 for (i = 1; i < level; ++i) {
2332 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_EBP], size * i);
2333 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2334 gen_op_ld_v(s, d_ot, cpu_tmp0, cpu_A0);
2335
1d1cc4d0 2336 tcg_gen_subi_tl(cpu_A0, cpu_T1, size * i);
743e398e
RH
2337 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2338 gen_op_st_v(s, d_ot, cpu_tmp0, cpu_A0);
8f091a59 2339 }
743e398e
RH
2340
2341 /* Push the current FrameTemp as the last level. */
1d1cc4d0 2342 tcg_gen_subi_tl(cpu_A0, cpu_T1, size * level);
743e398e 2343 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
1d1cc4d0 2344 gen_op_st_v(s, d_ot, cpu_T1, cpu_A0);
2c0262af 2345 }
743e398e
RH
2346
2347 /* Copy the FrameTemp value to EBP. */
1d1cc4d0 2348 gen_op_mov_reg_v(a_ot, R_EBP, cpu_T1);
743e398e
RH
2349
2350 /* Compute the final value of ESP. */
1d1cc4d0
RH
2351 tcg_gen_subi_tl(cpu_T1, cpu_T1, esp_addend + size * level);
2352 gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
2c0262af
FB
2353}
2354
2045f04c
RH
2355static void gen_leave(DisasContext *s)
2356{
2357 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2358 TCGMemOp a_ot = mo_stacksize(s);
2359
2360 gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
1d1cc4d0 2361 gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
2045f04c 2362
1d1cc4d0 2363 tcg_gen_addi_tl(cpu_T1, cpu_regs[R_EBP], 1 << d_ot);
2045f04c 2364
1d1cc4d0
RH
2365 gen_op_mov_reg_v(d_ot, R_EBP, cpu_T0);
2366 gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
2045f04c
RH
2367}
2368
14ce26e7 2369static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2c0262af 2370{
773cdfcc 2371 gen_update_cc_op(s);
14ce26e7 2372 gen_jmp_im(cur_eip);
77b2bc2c 2373 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
5779406a 2374 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2375}
2376
b9f9c5b4
RH
2377/* Generate #UD for the current instruction. The assumption here is that
2378 the instruction is known, but it isn't allowed in the current cpu mode. */
2379static void gen_illegal_opcode(DisasContext *s)
2380{
2381 gen_exception(s, EXCP06_ILLOP, s->pc_start - s->cs_base);
2382}
2383
2384/* Similarly, except that the assumption here is that we don't decode
2385 the instruction at all -- either a missing opcode, an unimplemented
2386 feature, or just a bogus instruction stream. */
2387static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2388{
2389 gen_illegal_opcode(s);
2390
2391 if (qemu_loglevel_mask(LOG_UNIMP)) {
2392 target_ulong pc = s->pc_start, end = s->pc;
2393 qemu_log("ILLOPC: " TARGET_FMT_lx ":", pc);
2394 for (; pc < end; ++pc) {
2395 qemu_log(" %02x", cpu_ldub_code(env, pc));
2396 }
2397 qemu_log("\n");
2398 }
2399}
2400
2c0262af 2401/* an interrupt is different from an exception because of the
7f75ffd3 2402 privilege checks */
5fafdf24 2403static void gen_interrupt(DisasContext *s, int intno,
14ce26e7 2404 target_ulong cur_eip, target_ulong next_eip)
2c0262af 2405{
773cdfcc 2406 gen_update_cc_op(s);
14ce26e7 2407 gen_jmp_im(cur_eip);
77b2bc2c 2408 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
a7812ae4 2409 tcg_const_i32(next_eip - cur_eip));
5779406a 2410 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2411}
2412
14ce26e7 2413static void gen_debug(DisasContext *s, target_ulong cur_eip)
2c0262af 2414{
773cdfcc 2415 gen_update_cc_op(s);
14ce26e7 2416 gen_jmp_im(cur_eip);
4a7443be 2417 gen_helper_debug(cpu_env);
5779406a 2418 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2419}
2420
7f0b7141
RH
2421static void gen_set_hflag(DisasContext *s, uint32_t mask)
2422{
2423 if ((s->flags & mask) == 0) {
2424 TCGv_i32 t = tcg_temp_new_i32();
2425 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2426 tcg_gen_ori_i32(t, t, mask);
2427 tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2428 tcg_temp_free_i32(t);
2429 s->flags |= mask;
2430 }
2431}
2432
2433static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2434{
2435 if (s->flags & mask) {
2436 TCGv_i32 t = tcg_temp_new_i32();
2437 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2438 tcg_gen_andi_i32(t, t, ~mask);
2439 tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2440 tcg_temp_free_i32(t);
2441 s->flags &= ~mask;
2442 }
2443}
2444
7d117ce8
RH
2445/* Clear BND registers during legacy branches. */
2446static void gen_bnd_jmp(DisasContext *s)
2447{
8b33e82b
PB
2448 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2449 and if the BNDREGs are known to be in use (non-zero) already.
2450 The helper itself will check BNDPRESERVE at runtime. */
7d117ce8 2451 if ((s->prefix & PREFIX_REPNZ) == 0
8b33e82b
PB
2452 && (s->flags & HF_MPX_EN_MASK) != 0
2453 && (s->flags & HF_MPX_IU_MASK) != 0) {
7d117ce8
RH
2454 gen_helper_bnd_jmp(cpu_env);
2455 }
2456}
2457
f083d92c
RH
2458/* Generate an end of block. Trace exception is also generated if needed.
2459 If IIM, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2460static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
2c0262af 2461{
773cdfcc 2462 gen_update_cc_op(s);
f083d92c
RH
2463
2464 /* If several instructions disable interrupts, only the first does it. */
2465 if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
2466 gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2467 } else {
2468 gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2469 }
2470
a2397807 2471 if (s->tb->flags & HF_RF_MASK) {
f0967a1a 2472 gen_helper_reset_rf(cpu_env);
a2397807 2473 }
34865134 2474 if (s->singlestep_enabled) {
4a7443be 2475 gen_helper_debug(cpu_env);
34865134 2476 } else if (s->tf) {
4a7443be 2477 gen_helper_single_step(cpu_env);
2c0262af 2478 } else {
57fec1fe 2479 tcg_gen_exit_tb(0);
2c0262af 2480 }
5779406a 2481 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2482}
2483
f083d92c
RH
2484/* End of block, resetting the inhibit irq flag. */
2485static void gen_eob(DisasContext *s)
2486{
2487 gen_eob_inhibit_irq(s, false);
2488}
2489
2c0262af
FB
2490/* generate a jump to eip. No segment change must happen before as a
2491 direct call to the next block may occur */
14ce26e7 2492static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2c0262af 2493{
a3251186
RH
2494 gen_update_cc_op(s);
2495 set_cc_op(s, CC_OP_DYNAMIC);
2c0262af 2496 if (s->jmp_opt) {
6e256c93 2497 gen_goto_tb(s, tb_num, eip);
5779406a 2498 s->is_jmp = DISAS_TB_JUMP;
2c0262af 2499 } else {
14ce26e7 2500 gen_jmp_im(eip);
2c0262af
FB
2501 gen_eob(s);
2502 }
2503}
2504
14ce26e7
FB
2505static void gen_jmp(DisasContext *s, target_ulong eip)
2506{
2507 gen_jmp_tb(s, eip, 0);
2508}
2509
323d1876 2510static inline void gen_ldq_env_A0(DisasContext *s, int offset)
8686c490 2511{
3c5f4116 2512 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
b6abf97d 2513 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
8686c490 2514}
664e0f19 2515
323d1876 2516static inline void gen_stq_env_A0(DisasContext *s, int offset)
8686c490 2517{
b6abf97d 2518 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
3523e4bd 2519 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
8686c490 2520}
664e0f19 2521
323d1876 2522static inline void gen_ldo_env_A0(DisasContext *s, int offset)
8686c490 2523{
5c42a7cd 2524 int mem_index = s->mem_index;
3c5f4116 2525 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
19cbd87c 2526 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
8686c490 2527 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
3c5f4116 2528 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
19cbd87c 2529 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
8686c490 2530}
14ce26e7 2531
323d1876 2532static inline void gen_sto_env_A0(DisasContext *s, int offset)
8686c490 2533{
5c42a7cd 2534 int mem_index = s->mem_index;
19cbd87c 2535 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
3523e4bd 2536 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
8686c490 2537 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
19cbd87c 2538 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
3523e4bd 2539 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
8686c490 2540}
14ce26e7 2541
5af45186
FB
2542static inline void gen_op_movo(int d_offset, int s_offset)
2543{
19cbd87c
EH
2544 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(0)));
2545 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(0)));
2546 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(1)));
2547 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(1)));
5af45186
FB
2548}
2549
2550static inline void gen_op_movq(int d_offset, int s_offset)
2551{
b6abf97d
FB
2552 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2553 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
5af45186
FB
2554}
2555
2556static inline void gen_op_movl(int d_offset, int s_offset)
2557{
b6abf97d
FB
2558 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2559 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
5af45186
FB
2560}
2561
2562static inline void gen_op_movq_env_0(int d_offset)
2563{
b6abf97d
FB
2564 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2565 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
5af45186 2566}
664e0f19 2567
d3eb5eae
BS
2568typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2569typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2570typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2571typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2572typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2573typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2574 TCGv_i32 val);
c4baa050 2575typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
d3eb5eae
BS
2576typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2577 TCGv val);
c4baa050 2578
5af45186
FB
2579#define SSE_SPECIAL ((void *)1)
2580#define SSE_DUMMY ((void *)2)
664e0f19 2581
a7812ae4
PB
2582#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2583#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2584 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
5af45186 2585
d3eb5eae 2586static const SSEFunc_0_epp sse_op_table1[256][4] = {
a35f3ec7
AJ
2587 /* 3DNow! extensions */
2588 [0x0e] = { SSE_DUMMY }, /* femms */
2589 [0x0f] = { SSE_DUMMY }, /* pf... */
664e0f19
FB
2590 /* pure SSE operations */
2591 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2592 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
465e9838 2593 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
664e0f19 2594 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
a7812ae4
PB
2595 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2596 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
664e0f19
FB
2597 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2598 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2599
2600 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2601 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2602 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
d9f4bb27 2603 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
664e0f19
FB
2604 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2605 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
a7812ae4
PB
2606 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2607 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
664e0f19
FB
2608 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2609 [0x51] = SSE_FOP(sqrt),
a7812ae4
PB
2610 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2611 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2612 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2613 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2614 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2615 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
664e0f19
FB
2616 [0x58] = SSE_FOP(add),
2617 [0x59] = SSE_FOP(mul),
a7812ae4
PB
2618 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2619 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2620 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
664e0f19
FB
2621 [0x5c] = SSE_FOP(sub),
2622 [0x5d] = SSE_FOP(min),
2623 [0x5e] = SSE_FOP(div),
2624 [0x5f] = SSE_FOP(max),
2625
2626 [0xc2] = SSE_FOP(cmpeq),
d3eb5eae
BS
2627 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2628 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
664e0f19 2629
7073fbad
RH
2630 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2631 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2632 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
4242b1bd 2633
664e0f19
FB
2634 /* MMX ops and their SSE extensions */
2635 [0x60] = MMX_OP2(punpcklbw),
2636 [0x61] = MMX_OP2(punpcklwd),
2637 [0x62] = MMX_OP2(punpckldq),
2638 [0x63] = MMX_OP2(packsswb),
2639 [0x64] = MMX_OP2(pcmpgtb),
2640 [0x65] = MMX_OP2(pcmpgtw),
2641 [0x66] = MMX_OP2(pcmpgtl),
2642 [0x67] = MMX_OP2(packuswb),
2643 [0x68] = MMX_OP2(punpckhbw),
2644 [0x69] = MMX_OP2(punpckhwd),
2645 [0x6a] = MMX_OP2(punpckhdq),
2646 [0x6b] = MMX_OP2(packssdw),
a7812ae4
PB
2647 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2648 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
664e0f19
FB
2649 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2650 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
d3eb5eae
BS
2651 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2652 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2653 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2654 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
664e0f19
FB
2655 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2656 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2657 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2658 [0x74] = MMX_OP2(pcmpeqb),
2659 [0x75] = MMX_OP2(pcmpeqw),
2660 [0x76] = MMX_OP2(pcmpeql),
a35f3ec7 2661 [0x77] = { SSE_DUMMY }, /* emms */
d9f4bb27
AP
2662 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2663 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
a7812ae4
PB
2664 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2665 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
664e0f19
FB
2666 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2667 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2668 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2669 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
a7812ae4 2670 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
664e0f19
FB
2671 [0xd1] = MMX_OP2(psrlw),
2672 [0xd2] = MMX_OP2(psrld),
2673 [0xd3] = MMX_OP2(psrlq),
2674 [0xd4] = MMX_OP2(paddq),
2675 [0xd5] = MMX_OP2(pmullw),
2676 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2677 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2678 [0xd8] = MMX_OP2(psubusb),
2679 [0xd9] = MMX_OP2(psubusw),
2680 [0xda] = MMX_OP2(pminub),
2681 [0xdb] = MMX_OP2(pand),
2682 [0xdc] = MMX_OP2(paddusb),
2683 [0xdd] = MMX_OP2(paddusw),
2684 [0xde] = MMX_OP2(pmaxub),
2685 [0xdf] = MMX_OP2(pandn),
2686 [0xe0] = MMX_OP2(pavgb),
2687 [0xe1] = MMX_OP2(psraw),
2688 [0xe2] = MMX_OP2(psrad),
2689 [0xe3] = MMX_OP2(pavgw),
2690 [0xe4] = MMX_OP2(pmulhuw),
2691 [0xe5] = MMX_OP2(pmulhw),
a7812ae4 2692 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
664e0f19
FB
2693 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2694 [0xe8] = MMX_OP2(psubsb),
2695 [0xe9] = MMX_OP2(psubsw),
2696 [0xea] = MMX_OP2(pminsw),
2697 [0xeb] = MMX_OP2(por),
2698 [0xec] = MMX_OP2(paddsb),
2699 [0xed] = MMX_OP2(paddsw),
2700 [0xee] = MMX_OP2(pmaxsw),
2701 [0xef] = MMX_OP2(pxor),
465e9838 2702 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
664e0f19
FB
2703 [0xf1] = MMX_OP2(psllw),
2704 [0xf2] = MMX_OP2(pslld),
2705 [0xf3] = MMX_OP2(psllq),
2706 [0xf4] = MMX_OP2(pmuludq),
2707 [0xf5] = MMX_OP2(pmaddwd),
2708 [0xf6] = MMX_OP2(psadbw),
d3eb5eae
BS
2709 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2710 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
664e0f19
FB
2711 [0xf8] = MMX_OP2(psubb),
2712 [0xf9] = MMX_OP2(psubw),
2713 [0xfa] = MMX_OP2(psubl),
2714 [0xfb] = MMX_OP2(psubq),
2715 [0xfc] = MMX_OP2(paddb),
2716 [0xfd] = MMX_OP2(paddw),
2717 [0xfe] = MMX_OP2(paddl),
2718};
2719
d3eb5eae 2720static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
664e0f19
FB
2721 [0 + 2] = MMX_OP2(psrlw),
2722 [0 + 4] = MMX_OP2(psraw),
2723 [0 + 6] = MMX_OP2(psllw),
2724 [8 + 2] = MMX_OP2(psrld),
2725 [8 + 4] = MMX_OP2(psrad),
2726 [8 + 6] = MMX_OP2(pslld),
2727 [16 + 2] = MMX_OP2(psrlq),
a7812ae4 2728 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
664e0f19 2729 [16 + 6] = MMX_OP2(psllq),
a7812ae4 2730 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
664e0f19
FB
2731};
2732
d3eb5eae 2733static const SSEFunc_0_epi sse_op_table3ai[] = {
a7812ae4 2734 gen_helper_cvtsi2ss,
11f8cdbc 2735 gen_helper_cvtsi2sd
c4baa050 2736};
a7812ae4 2737
11f8cdbc 2738#ifdef TARGET_X86_64
d3eb5eae 2739static const SSEFunc_0_epl sse_op_table3aq[] = {
11f8cdbc
SW
2740 gen_helper_cvtsq2ss,
2741 gen_helper_cvtsq2sd
2742};
2743#endif
2744
d3eb5eae 2745static const SSEFunc_i_ep sse_op_table3bi[] = {
a7812ae4 2746 gen_helper_cvttss2si,
a7812ae4 2747 gen_helper_cvtss2si,
bedc2ac1 2748 gen_helper_cvttsd2si,
11f8cdbc 2749 gen_helper_cvtsd2si
664e0f19 2750};
3b46e624 2751
11f8cdbc 2752#ifdef TARGET_X86_64
d3eb5eae 2753static const SSEFunc_l_ep sse_op_table3bq[] = {
11f8cdbc 2754 gen_helper_cvttss2sq,
11f8cdbc 2755 gen_helper_cvtss2sq,
bedc2ac1 2756 gen_helper_cvttsd2sq,
11f8cdbc
SW
2757 gen_helper_cvtsd2sq
2758};
2759#endif
2760
d3eb5eae 2761static const SSEFunc_0_epp sse_op_table4[8][4] = {
664e0f19
FB
2762 SSE_FOP(cmpeq),
2763 SSE_FOP(cmplt),
2764 SSE_FOP(cmple),
2765 SSE_FOP(cmpunord),
2766 SSE_FOP(cmpneq),
2767 SSE_FOP(cmpnlt),
2768 SSE_FOP(cmpnle),
2769 SSE_FOP(cmpord),
2770};
3b46e624 2771
d3eb5eae 2772static const SSEFunc_0_epp sse_op_table5[256] = {
a7812ae4
PB
2773 [0x0c] = gen_helper_pi2fw,
2774 [0x0d] = gen_helper_pi2fd,
2775 [0x1c] = gen_helper_pf2iw,
2776 [0x1d] = gen_helper_pf2id,
2777 [0x8a] = gen_helper_pfnacc,
2778 [0x8e] = gen_helper_pfpnacc,
2779 [0x90] = gen_helper_pfcmpge,
2780 [0x94] = gen_helper_pfmin,
2781 [0x96] = gen_helper_pfrcp,
2782 [0x97] = gen_helper_pfrsqrt,
2783 [0x9a] = gen_helper_pfsub,
2784 [0x9e] = gen_helper_pfadd,
2785 [0xa0] = gen_helper_pfcmpgt,
2786 [0xa4] = gen_helper_pfmax,
2787 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2788 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2789 [0xaa] = gen_helper_pfsubr,
2790 [0xae] = gen_helper_pfacc,
2791 [0xb0] = gen_helper_pfcmpeq,
2792 [0xb4] = gen_helper_pfmul,
2793 [0xb6] = gen_helper_movq, /* pfrcpit2 */
2794 [0xb7] = gen_helper_pmulhrw_mmx,
2795 [0xbb] = gen_helper_pswapd,
2796 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
a35f3ec7
AJ
2797};
2798
d3eb5eae
BS
2799struct SSEOpHelper_epp {
2800 SSEFunc_0_epp op[2];
c4baa050
BS
2801 uint32_t ext_mask;
2802};
2803
d3eb5eae
BS
2804struct SSEOpHelper_eppi {
2805 SSEFunc_0_eppi op[2];
c4baa050 2806 uint32_t ext_mask;
222a3336 2807};
c4baa050 2808
222a3336 2809#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
a7812ae4
PB
2810#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2811#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
222a3336 2812#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
e71827bc
AJ
2813#define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2814 CPUID_EXT_PCLMULQDQ }
d640045a 2815#define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
c4baa050 2816
d3eb5eae 2817static const struct SSEOpHelper_epp sse_op_table6[256] = {
222a3336
AZ
2818 [0x00] = SSSE3_OP(pshufb),
2819 [0x01] = SSSE3_OP(phaddw),
2820 [0x02] = SSSE3_OP(phaddd),
2821 [0x03] = SSSE3_OP(phaddsw),
2822 [0x04] = SSSE3_OP(pmaddubsw),
2823 [0x05] = SSSE3_OP(phsubw),
2824 [0x06] = SSSE3_OP(phsubd),
2825 [0x07] = SSSE3_OP(phsubsw),
2826 [0x08] = SSSE3_OP(psignb),
2827 [0x09] = SSSE3_OP(psignw),
2828 [0x0a] = SSSE3_OP(psignd),
2829 [0x0b] = SSSE3_OP(pmulhrsw),
2830 [0x10] = SSE41_OP(pblendvb),
2831 [0x14] = SSE41_OP(blendvps),
2832 [0x15] = SSE41_OP(blendvpd),
2833 [0x17] = SSE41_OP(ptest),
2834 [0x1c] = SSSE3_OP(pabsb),
2835 [0x1d] = SSSE3_OP(pabsw),
2836 [0x1e] = SSSE3_OP(pabsd),
2837 [0x20] = SSE41_OP(pmovsxbw),
2838 [0x21] = SSE41_OP(pmovsxbd),
2839 [0x22] = SSE41_OP(pmovsxbq),
2840 [0x23] = SSE41_OP(pmovsxwd),
2841 [0x24] = SSE41_OP(pmovsxwq),
2842 [0x25] = SSE41_OP(pmovsxdq),
2843 [0x28] = SSE41_OP(pmuldq),
2844 [0x29] = SSE41_OP(pcmpeqq),
2845 [0x2a] = SSE41_SPECIAL, /* movntqda */
2846 [0x2b] = SSE41_OP(packusdw),
2847 [0x30] = SSE41_OP(pmovzxbw),
2848 [0x31] = SSE41_OP(pmovzxbd),
2849 [0x32] = SSE41_OP(pmovzxbq),
2850 [0x33] = SSE41_OP(pmovzxwd),
2851 [0x34] = SSE41_OP(pmovzxwq),
2852 [0x35] = SSE41_OP(pmovzxdq),
2853 [0x37] = SSE42_OP(pcmpgtq),
2854 [0x38] = SSE41_OP(pminsb),
2855 [0x39] = SSE41_OP(pminsd),
2856 [0x3a] = SSE41_OP(pminuw),
2857 [0x3b] = SSE41_OP(pminud),
2858 [0x3c] = SSE41_OP(pmaxsb),
2859 [0x3d] = SSE41_OP(pmaxsd),
2860 [0x3e] = SSE41_OP(pmaxuw),
2861 [0x3f] = SSE41_OP(pmaxud),
2862 [0x40] = SSE41_OP(pmulld),
2863 [0x41] = SSE41_OP(phminposuw),
d640045a
AJ
2864 [0xdb] = AESNI_OP(aesimc),
2865 [0xdc] = AESNI_OP(aesenc),
2866 [0xdd] = AESNI_OP(aesenclast),
2867 [0xde] = AESNI_OP(aesdec),
2868 [0xdf] = AESNI_OP(aesdeclast),
4242b1bd
AZ
2869};
2870
d3eb5eae 2871static const struct SSEOpHelper_eppi sse_op_table7[256] = {
222a3336
AZ
2872 [0x08] = SSE41_OP(roundps),
2873 [0x09] = SSE41_OP(roundpd),
2874 [0x0a] = SSE41_OP(roundss),
2875 [0x0b] = SSE41_OP(roundsd),
2876 [0x0c] = SSE41_OP(blendps),
2877 [0x0d] = SSE41_OP(blendpd),
2878 [0x0e] = SSE41_OP(pblendw),
2879 [0x0f] = SSSE3_OP(palignr),
2880 [0x14] = SSE41_SPECIAL, /* pextrb */
2881 [0x15] = SSE41_SPECIAL, /* pextrw */
2882 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
2883 [0x17] = SSE41_SPECIAL, /* extractps */
2884 [0x20] = SSE41_SPECIAL, /* pinsrb */
2885 [0x21] = SSE41_SPECIAL, /* insertps */
2886 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
2887 [0x40] = SSE41_OP(dpps),
2888 [0x41] = SSE41_OP(dppd),
2889 [0x42] = SSE41_OP(mpsadbw),
e71827bc 2890 [0x44] = PCLMULQDQ_OP(pclmulqdq),
222a3336
AZ
2891 [0x60] = SSE42_OP(pcmpestrm),
2892 [0x61] = SSE42_OP(pcmpestri),
2893 [0x62] = SSE42_OP(pcmpistrm),
2894 [0x63] = SSE42_OP(pcmpistri),
d640045a 2895 [0xdf] = AESNI_OP(aeskeygenassist),
4242b1bd
AZ
2896};
2897
0af10c86
BS
2898static void gen_sse(CPUX86State *env, DisasContext *s, int b,
2899 target_ulong pc_start, int rex_r)
664e0f19 2900{
d67dc9e6 2901 int b1, op1_offset, op2_offset, is_xmm, val;
4eeb3939 2902 int modrm, mod, rm, reg;
d3eb5eae
BS
2903 SSEFunc_0_epp sse_fn_epp;
2904 SSEFunc_0_eppi sse_fn_eppi;
c4baa050 2905 SSEFunc_0_ppi sse_fn_ppi;
d3eb5eae 2906 SSEFunc_0_eppt sse_fn_eppt;
d67dc9e6 2907 TCGMemOp ot;
664e0f19
FB
2908
2909 b &= 0xff;
5fafdf24 2910 if (s->prefix & PREFIX_DATA)
664e0f19 2911 b1 = 1;
5fafdf24 2912 else if (s->prefix & PREFIX_REPZ)
664e0f19 2913 b1 = 2;
5fafdf24 2914 else if (s->prefix & PREFIX_REPNZ)
664e0f19
FB
2915 b1 = 3;
2916 else
2917 b1 = 0;
d3eb5eae
BS
2918 sse_fn_epp = sse_op_table1[b][b1];
2919 if (!sse_fn_epp) {
b9f9c5b4 2920 goto unknown_op;
c4baa050 2921 }
a35f3ec7 2922 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
664e0f19
FB
2923 is_xmm = 1;
2924 } else {
2925 if (b1 == 0) {
2926 /* MMX case */
2927 is_xmm = 0;
2928 } else {
2929 is_xmm = 1;
2930 }
2931 }
2932 /* simple MMX/SSE operation */
2933 if (s->flags & HF_TS_MASK) {
2934 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
2935 return;
2936 }
2937 if (s->flags & HF_EM_MASK) {
2938 illegal_op:
b9f9c5b4 2939 gen_illegal_opcode(s);
664e0f19
FB
2940 return;
2941 }
b9f9c5b4
RH
2942 if (is_xmm
2943 && !(s->flags & HF_OSFXSR_MASK)
2944 && ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))) {
2945 goto unknown_op;
2946 }
e771edab 2947 if (b == 0x0e) {
b9f9c5b4
RH
2948 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
2949 /* If we were fully decoding this we might use illegal_op. */
2950 goto unknown_op;
2951 }
e771edab 2952 /* femms */
d3eb5eae 2953 gen_helper_emms(cpu_env);
e771edab
AJ
2954 return;
2955 }
2956 if (b == 0x77) {
2957 /* emms */
d3eb5eae 2958 gen_helper_emms(cpu_env);
664e0f19
FB
2959 return;
2960 }
2961 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
2962 the static cpu state) */
2963 if (!is_xmm) {
d3eb5eae 2964 gen_helper_enter_mmx(cpu_env);
664e0f19
FB
2965 }
2966
0af10c86 2967 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
2968 reg = ((modrm >> 3) & 7);
2969 if (is_xmm)
2970 reg |= rex_r;
2971 mod = (modrm >> 6) & 3;
d3eb5eae 2972 if (sse_fn_epp == SSE_SPECIAL) {
664e0f19
FB
2973 b |= (b1 << 8);
2974 switch(b) {
2975 case 0x0e7: /* movntq */
b9f9c5b4 2976 if (mod == 3) {
664e0f19 2977 goto illegal_op;
b9f9c5b4 2978 }
4eeb3939 2979 gen_lea_modrm(env, s, modrm);
323d1876 2980 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
664e0f19
FB
2981 break;
2982 case 0x1e7: /* movntdq */
2983 case 0x02b: /* movntps */
2984 case 0x12b: /* movntps */
2e21e749
T
2985 if (mod == 3)
2986 goto illegal_op;
4eeb3939 2987 gen_lea_modrm(env, s, modrm);
323d1876 2988 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
2e21e749 2989 break;
465e9838
FB
2990 case 0x3f0: /* lddqu */
2991 if (mod == 3)
664e0f19 2992 goto illegal_op;
4eeb3939 2993 gen_lea_modrm(env, s, modrm);
323d1876 2994 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19 2995 break;
d9f4bb27
AP
2996 case 0x22b: /* movntss */
2997 case 0x32b: /* movntsd */
2998 if (mod == 3)
2999 goto illegal_op;
4eeb3939 3000 gen_lea_modrm(env, s, modrm);
d9f4bb27 3001 if (b1 & 1) {
07958082 3002 gen_stq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3003 xmm_regs[reg].ZMM_Q(0)));
d9f4bb27 3004 } else {
1d1cc4d0 3005 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
19cbd87c 3006 xmm_regs[reg].ZMM_L(0)));
1d1cc4d0 3007 gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
d9f4bb27
AP
3008 }
3009 break;
664e0f19 3010 case 0x6e: /* movd mm, ea */
dabd98dd 3011#ifdef TARGET_X86_64
ab4e4aec 3012 if (s->dflag == MO_64) {
4ba9938c 3013 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
1d1cc4d0 3014 tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
5fafdf24 3015 } else
dabd98dd
FB
3016#endif
3017 {
4ba9938c 3018 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
5af45186
FB
3019 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3020 offsetof(CPUX86State,fpregs[reg].mmx));
1d1cc4d0 3021 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
a7812ae4 3022 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
dabd98dd 3023 }
664e0f19
FB
3024 break;
3025 case 0x16e: /* movd xmm, ea */
dabd98dd 3026#ifdef TARGET_X86_64
ab4e4aec 3027 if (s->dflag == MO_64) {
4ba9938c 3028 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
5af45186
FB
3029 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3030 offsetof(CPUX86State,xmm_regs[reg]));
1d1cc4d0 3031 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T0);
5fafdf24 3032 } else
dabd98dd
FB
3033#endif
3034 {
4ba9938c 3035 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
5af45186
FB
3036 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3037 offsetof(CPUX86State,xmm_regs[reg]));
1d1cc4d0 3038 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
a7812ae4 3039 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
dabd98dd 3040 }
664e0f19
FB
3041 break;
3042 case 0x6f: /* movq mm, ea */
3043 if (mod != 3) {
4eeb3939 3044 gen_lea_modrm(env, s, modrm);
323d1876 3045 gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
664e0f19
FB
3046 } else {
3047 rm = (modrm & 7);
b6abf97d 3048 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
5af45186 3049 offsetof(CPUX86State,fpregs[rm].mmx));
b6abf97d 3050 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
5af45186 3051 offsetof(CPUX86State,fpregs[reg].mmx));
664e0f19
FB
3052 }
3053 break;
3054 case 0x010: /* movups */
3055 case 0x110: /* movupd */
3056 case 0x028: /* movaps */
3057 case 0x128: /* movapd */
3058 case 0x16f: /* movdqa xmm, ea */
3059 case 0x26f: /* movdqu xmm, ea */
3060 if (mod != 3) {
4eeb3939 3061 gen_lea_modrm(env, s, modrm);
323d1876 3062 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19
FB
3063 } else {
3064 rm = (modrm & 7) | REX_B(s);
3065 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3066 offsetof(CPUX86State,xmm_regs[rm]));
3067 }
3068 break;
3069 case 0x210: /* movss xmm, ea */
3070 if (mod != 3) {
4eeb3939 3071 gen_lea_modrm(env, s, modrm);
1d1cc4d0
RH
3072 gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
3073 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3074 tcg_gen_movi_tl(cpu_T0, 0);
3075 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
3076 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3077 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
664e0f19
FB
3078 } else {
3079 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3080 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3081 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
664e0f19
FB
3082 }
3083 break;
3084 case 0x310: /* movsd xmm, ea */
3085 if (mod != 3) {
4eeb3939 3086 gen_lea_modrm(env, s, modrm);
323d1876 3087 gen_ldq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3088 xmm_regs[reg].ZMM_Q(0)));
1d1cc4d0
RH
3089 tcg_gen_movi_tl(cpu_T0, 0);
3090 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3091 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
664e0f19
FB
3092 } else {
3093 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3094 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3095 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
664e0f19
FB
3096 }
3097 break;
3098 case 0x012: /* movlps */
3099 case 0x112: /* movlpd */
3100 if (mod != 3) {
4eeb3939 3101 gen_lea_modrm(env, s, modrm);
323d1876 3102 gen_ldq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3103 xmm_regs[reg].ZMM_Q(0)));
664e0f19
FB
3104 } else {
3105 /* movhlps */
3106 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3107 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3108 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
664e0f19
FB
3109 }
3110 break;
465e9838
FB
3111 case 0x212: /* movsldup */
3112 if (mod != 3) {
4eeb3939 3113 gen_lea_modrm(env, s, modrm);
323d1876 3114 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
465e9838
FB
3115 } else {
3116 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3117 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3118 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
3119 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
3120 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(2)));
465e9838 3121 }
19cbd87c
EH
3122 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
3123 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3124 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
3125 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
465e9838
FB
3126 break;
3127 case 0x312: /* movddup */
3128 if (mod != 3) {
4eeb3939 3129 gen_lea_modrm(env, s, modrm);
323d1876 3130 gen_ldq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3131 xmm_regs[reg].ZMM_Q(0)));
465e9838
FB
3132 } else {
3133 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3134 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3135 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
465e9838 3136 }
19cbd87c
EH
3137 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
3138 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
465e9838 3139 break;
664e0f19
FB
3140 case 0x016: /* movhps */
3141 case 0x116: /* movhpd */
3142 if (mod != 3) {
4eeb3939 3143 gen_lea_modrm(env, s, modrm);
323d1876 3144 gen_ldq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3145 xmm_regs[reg].ZMM_Q(1)));
664e0f19
FB
3146 } else {
3147 /* movlhps */
3148 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3149 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
3150 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
664e0f19
FB
3151 }
3152 break;
3153 case 0x216: /* movshdup */
3154 if (mod != 3) {
4eeb3939 3155 gen_lea_modrm(env, s, modrm);
323d1876 3156 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19
FB
3157 } else {
3158 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3159 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
3160 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(1)));
3161 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
3162 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(3)));
664e0f19 3163 }
19cbd87c
EH
3164 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3165 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
3166 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
3167 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
664e0f19 3168 break;
d9f4bb27
AP
3169 case 0x178:
3170 case 0x378:
3171 {
3172 int bit_index, field_length;
3173
3174 if (b1 == 1 && reg != 0)
3175 goto illegal_op;
0af10c86
BS
3176 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3177 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
d9f4bb27
AP
3178 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3179 offsetof(CPUX86State,xmm_regs[reg]));
3180 if (b1 == 1)
d3eb5eae
BS
3181 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3182 tcg_const_i32(bit_index),
3183 tcg_const_i32(field_length));
d9f4bb27 3184 else
d3eb5eae
BS
3185 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3186 tcg_const_i32(bit_index),
3187 tcg_const_i32(field_length));
d9f4bb27
AP
3188 }
3189 break;
664e0f19 3190 case 0x7e: /* movd ea, mm */
dabd98dd 3191#ifdef TARGET_X86_64
ab4e4aec 3192 if (s->dflag == MO_64) {
1d1cc4d0 3193 tcg_gen_ld_i64(cpu_T0, cpu_env,
5af45186 3194 offsetof(CPUX86State,fpregs[reg].mmx));
4ba9938c 3195 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
5fafdf24 3196 } else
dabd98dd
FB
3197#endif
3198 {
1d1cc4d0 3199 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
5af45186 3200 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
4ba9938c 3201 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
dabd98dd 3202 }
664e0f19
FB
3203 break;
3204 case 0x17e: /* movd ea, xmm */
dabd98dd 3205#ifdef TARGET_X86_64
ab4e4aec 3206 if (s->dflag == MO_64) {
1d1cc4d0 3207 tcg_gen_ld_i64(cpu_T0, cpu_env,
19cbd87c 3208 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
4ba9938c 3209 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
5fafdf24 3210 } else
dabd98dd
FB
3211#endif
3212 {
1d1cc4d0 3213 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
19cbd87c 3214 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
4ba9938c 3215 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
dabd98dd 3216 }
664e0f19
FB
3217 break;
3218 case 0x27e: /* movq xmm, ea */
3219 if (mod != 3) {
4eeb3939 3220 gen_lea_modrm(env, s, modrm);
323d1876 3221 gen_ldq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3222 xmm_regs[reg].ZMM_Q(0)));
664e0f19
FB
3223 } else {
3224 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3225 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3226 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
664e0f19 3227 }
19cbd87c 3228 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
664e0f19
FB
3229 break;
3230 case 0x7f: /* movq ea, mm */
3231 if (mod != 3) {
4eeb3939 3232 gen_lea_modrm(env, s, modrm);
323d1876 3233 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
664e0f19
FB
3234 } else {
3235 rm = (modrm & 7);
3236 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3237 offsetof(CPUX86State,fpregs[reg].mmx));
3238 }
3239 break;
3240 case 0x011: /* movups */
3241 case 0x111: /* movupd */
3242 case 0x029: /* movaps */
3243 case 0x129: /* movapd */
3244 case 0x17f: /* movdqa ea, xmm */
3245 case 0x27f: /* movdqu ea, xmm */
3246 if (mod != 3) {
4eeb3939 3247 gen_lea_modrm(env, s, modrm);
323d1876 3248 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19
FB
3249 } else {
3250 rm = (modrm & 7) | REX_B(s);
3251 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3252 offsetof(CPUX86State,xmm_regs[reg]));
3253 }
3254 break;
3255 case 0x211: /* movss ea, xmm */
3256 if (mod != 3) {
4eeb3939 3257 gen_lea_modrm(env, s, modrm);
1d1cc4d0
RH
3258 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3259 gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
664e0f19
FB
3260 } else {
3261 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3262 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)),
3263 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
664e0f19
FB
3264 }
3265 break;
3266 case 0x311: /* movsd ea, xmm */
3267 if (mod != 3) {
4eeb3939 3268 gen_lea_modrm(env, s, modrm);
323d1876 3269 gen_stq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3270 xmm_regs[reg].ZMM_Q(0)));
664e0f19
FB
3271 } else {
3272 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3273 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
3274 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
664e0f19
FB
3275 }
3276 break;
3277 case 0x013: /* movlps */
3278 case 0x113: /* movlpd */
3279 if (mod != 3) {
4eeb3939 3280 gen_lea_modrm(env, s, modrm);
323d1876 3281 gen_stq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3282 xmm_regs[reg].ZMM_Q(0)));
664e0f19
FB
3283 } else {
3284 goto illegal_op;
3285 }
3286 break;
3287 case 0x017: /* movhps */
3288 case 0x117: /* movhpd */
3289 if (mod != 3) {
4eeb3939 3290 gen_lea_modrm(env, s, modrm);
323d1876 3291 gen_stq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3292 xmm_regs[reg].ZMM_Q(1)));
664e0f19
FB
3293 } else {
3294 goto illegal_op;
3295 }
3296 break;
3297 case 0x71: /* shift mm, im */
3298 case 0x72:
3299 case 0x73:
3300 case 0x171: /* shift xmm, im */
3301 case 0x172:
3302 case 0x173:
c045af25 3303 if (b1 >= 2) {
b9f9c5b4 3304 goto unknown_op;
c045af25 3305 }
0af10c86 3306 val = cpu_ldub_code(env, s->pc++);
664e0f19 3307 if (is_xmm) {
1d1cc4d0
RH
3308 tcg_gen_movi_tl(cpu_T0, val);
3309 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
3310 tcg_gen_movi_tl(cpu_T0, 0);
3311 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(1)));
664e0f19
FB
3312 op1_offset = offsetof(CPUX86State,xmm_t0);
3313 } else {
1d1cc4d0
RH
3314 tcg_gen_movi_tl(cpu_T0, val);
3315 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3316 tcg_gen_movi_tl(cpu_T0, 0);
3317 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
664e0f19
FB
3318 op1_offset = offsetof(CPUX86State,mmx_t0);
3319 }
d3eb5eae
BS
3320 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3321 (((modrm >> 3)) & 7)][b1];
3322 if (!sse_fn_epp) {
b9f9c5b4 3323 goto unknown_op;
c4baa050 3324 }
664e0f19
FB
3325 if (is_xmm) {
3326 rm = (modrm & 7) | REX_B(s);
3327 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3328 } else {
3329 rm = (modrm & 7);
3330 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3331 }
5af45186
FB
3332 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3333 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
d3eb5eae 3334 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3335 break;
3336 case 0x050: /* movmskps */
664e0f19 3337 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3338 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3339 offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3340 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
a7fbcbe5 3341 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
664e0f19
FB
3342 break;
3343 case 0x150: /* movmskpd */
664e0f19 3344 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3345 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3346 offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3347 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
a7fbcbe5 3348 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
664e0f19
FB
3349 break;
3350 case 0x02a: /* cvtpi2ps */
3351 case 0x12a: /* cvtpi2pd */
d3eb5eae 3352 gen_helper_enter_mmx(cpu_env);
664e0f19 3353 if (mod != 3) {
4eeb3939 3354 gen_lea_modrm(env, s, modrm);
664e0f19 3355 op2_offset = offsetof(CPUX86State,mmx_t0);
323d1876 3356 gen_ldq_env_A0(s, op2_offset);
664e0f19
FB
3357 } else {
3358 rm = (modrm & 7);
3359 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3360 }
3361 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
5af45186
FB
3362 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3363 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
664e0f19
FB
3364 switch(b >> 8) {
3365 case 0x0:
d3eb5eae 3366 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3367 break;
3368 default:
3369 case 0x1:
d3eb5eae 3370 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3371 break;
3372 }
3373 break;
3374 case 0x22a: /* cvtsi2ss */
3375 case 0x32a: /* cvtsi2sd */
ab4e4aec 3376 ot = mo_64_32(s->dflag);
0af10c86 3377 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
664e0f19 3378 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
5af45186 3379 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4ba9938c 3380 if (ot == MO_32) {
d3eb5eae 3381 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
1d1cc4d0 3382 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
d3eb5eae 3383 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
28e10711 3384 } else {
11f8cdbc 3385#ifdef TARGET_X86_64
d3eb5eae 3386 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
1d1cc4d0 3387 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T0);
11f8cdbc
SW
3388#else
3389 goto illegal_op;
3390#endif
28e10711 3391 }
664e0f19
FB
3392 break;
3393 case 0x02c: /* cvttps2pi */
3394 case 0x12c: /* cvttpd2pi */
3395 case 0x02d: /* cvtps2pi */
3396 case 0x12d: /* cvtpd2pi */
d3eb5eae 3397 gen_helper_enter_mmx(cpu_env);
664e0f19 3398 if (mod != 3) {
4eeb3939 3399 gen_lea_modrm(env, s, modrm);
664e0f19 3400 op2_offset = offsetof(CPUX86State,xmm_t0);
323d1876 3401 gen_ldo_env_A0(s, op2_offset);
664e0f19
FB
3402 } else {
3403 rm = (modrm & 7) | REX_B(s);
3404 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3405 }
3406 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
5af45186
FB
3407 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3408 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
664e0f19
FB
3409 switch(b) {
3410 case 0x02c:
d3eb5eae 3411 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3412 break;
3413 case 0x12c:
d3eb5eae 3414 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3415 break;
3416 case 0x02d:
d3eb5eae 3417 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3418 break;
3419 case 0x12d:
d3eb5eae 3420 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3421 break;
3422 }
3423 break;
3424 case 0x22c: /* cvttss2si */
3425 case 0x32c: /* cvttsd2si */
3426 case 0x22d: /* cvtss2si */
3427 case 0x32d: /* cvtsd2si */
ab4e4aec 3428 ot = mo_64_32(s->dflag);
31313213 3429 if (mod != 3) {
4eeb3939 3430 gen_lea_modrm(env, s, modrm);
31313213 3431 if ((b >> 8) & 1) {
19cbd87c 3432 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0)));
31313213 3433 } else {
1d1cc4d0
RH
3434 gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
3435 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
31313213
FB
3436 }
3437 op2_offset = offsetof(CPUX86State,xmm_t0);
3438 } else {
3439 rm = (modrm & 7) | REX_B(s);
3440 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3441 }
5af45186 3442 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
4ba9938c 3443 if (ot == MO_32) {
d3eb5eae 3444 SSEFunc_i_ep sse_fn_i_ep =
bedc2ac1 3445 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
d3eb5eae 3446 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
1d1cc4d0 3447 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
5af45186 3448 } else {
11f8cdbc 3449#ifdef TARGET_X86_64
d3eb5eae 3450 SSEFunc_l_ep sse_fn_l_ep =
bedc2ac1 3451 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
1d1cc4d0 3452 sse_fn_l_ep(cpu_T0, cpu_env, cpu_ptr0);
11f8cdbc
SW
3453#else
3454 goto illegal_op;
3455#endif
5af45186 3456 }
1d1cc4d0 3457 gen_op_mov_reg_v(ot, reg, cpu_T0);
664e0f19
FB
3458 break;
3459 case 0xc4: /* pinsrw */
5fafdf24 3460 case 0x1c4:
d1e42c5c 3461 s->rip_offset = 1;
4ba9938c 3462 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
0af10c86 3463 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3464 if (b1) {
3465 val &= 7;
1d1cc4d0 3466 tcg_gen_st16_tl(cpu_T0, cpu_env,
19cbd87c 3467 offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val)));
664e0f19
FB
3468 } else {
3469 val &= 3;
1d1cc4d0 3470 tcg_gen_st16_tl(cpu_T0, cpu_env,
5af45186 3471 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
664e0f19
FB
3472 }
3473 break;
3474 case 0xc5: /* pextrw */
5fafdf24 3475 case 0x1c5:
664e0f19
FB
3476 if (mod != 3)
3477 goto illegal_op;
ab4e4aec 3478 ot = mo_64_32(s->dflag);
0af10c86 3479 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3480 if (b1) {
3481 val &= 7;
3482 rm = (modrm & 7) | REX_B(s);
1d1cc4d0 3483 tcg_gen_ld16u_tl(cpu_T0, cpu_env,
19cbd87c 3484 offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val)));
664e0f19
FB
3485 } else {
3486 val &= 3;
3487 rm = (modrm & 7);
1d1cc4d0 3488 tcg_gen_ld16u_tl(cpu_T0, cpu_env,
5af45186 3489 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
664e0f19
FB
3490 }
3491 reg = ((modrm >> 3) & 7) | rex_r;
1d1cc4d0 3492 gen_op_mov_reg_v(ot, reg, cpu_T0);
664e0f19
FB
3493 break;
3494 case 0x1d6: /* movq ea, xmm */
3495 if (mod != 3) {
4eeb3939 3496 gen_lea_modrm(env, s, modrm);
323d1876 3497 gen_stq_env_A0(s, offsetof(CPUX86State,
19cbd87c 3498 xmm_regs[reg].ZMM_Q(0)));
664e0f19
FB
3499 } else {
3500 rm = (modrm & 7) | REX_B(s);
19cbd87c
EH
3501 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
3502 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3503 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
664e0f19
FB
3504 }
3505 break;
3506 case 0x2d6: /* movq2dq */
d3eb5eae 3507 gen_helper_enter_mmx(cpu_env);
480c1cdb 3508 rm = (modrm & 7);
19cbd87c 3509 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
480c1cdb 3510 offsetof(CPUX86State,fpregs[rm].mmx));
19cbd87c 3511 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
664e0f19
FB
3512 break;
3513 case 0x3d6: /* movdq2q */
d3eb5eae 3514 gen_helper_enter_mmx(cpu_env);
480c1cdb
FB
3515 rm = (modrm & 7) | REX_B(s);
3516 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
19cbd87c 3517 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
664e0f19
FB
3518 break;
3519 case 0xd7: /* pmovmskb */
3520 case 0x1d7:
3521 if (mod != 3)
3522 goto illegal_op;
3523 if (b1) {
3524 rm = (modrm & 7) | REX_B(s);
5af45186 3525 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3526 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
664e0f19
FB
3527 } else {
3528 rm = (modrm & 7);
5af45186 3529 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
d3eb5eae 3530 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
664e0f19
FB
3531 }
3532 reg = ((modrm >> 3) & 7) | rex_r;
a7fbcbe5 3533 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
664e0f19 3534 break;
111994ee 3535
4242b1bd 3536 case 0x138:
000cacf6 3537 case 0x038:
4242b1bd 3538 b = modrm;
111994ee
RH
3539 if ((b & 0xf0) == 0xf0) {
3540 goto do_0f_38_fx;
3541 }
0af10c86 3542 modrm = cpu_ldub_code(env, s->pc++);
4242b1bd
AZ
3543 rm = modrm & 7;
3544 reg = ((modrm >> 3) & 7) | rex_r;
3545 mod = (modrm >> 6) & 3;
c045af25 3546 if (b1 >= 2) {
b9f9c5b4 3547 goto unknown_op;
c045af25 3548 }
4242b1bd 3549
d3eb5eae
BS
3550 sse_fn_epp = sse_op_table6[b].op[b1];
3551 if (!sse_fn_epp) {
b9f9c5b4 3552 goto unknown_op;
c4baa050 3553 }
222a3336
AZ
3554 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3555 goto illegal_op;
4242b1bd
AZ
3556
3557 if (b1) {
3558 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3559 if (mod == 3) {
3560 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3561 } else {
3562 op2_offset = offsetof(CPUX86State,xmm_t0);
4eeb3939 3563 gen_lea_modrm(env, s, modrm);
222a3336
AZ
3564 switch (b) {
3565 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3566 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3567 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
323d1876 3568 gen_ldq_env_A0(s, op2_offset +
19cbd87c 3569 offsetof(ZMMReg, ZMM_Q(0)));
222a3336
AZ
3570 break;
3571 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3572 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3c5f4116
RH
3573 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
3574 s->mem_index, MO_LEUL);
222a3336 3575 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
19cbd87c 3576 offsetof(ZMMReg, ZMM_L(0)));
222a3336
AZ
3577 break;
3578 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3c5f4116
RH
3579 tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
3580 s->mem_index, MO_LEUW);
222a3336 3581 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
19cbd87c 3582 offsetof(ZMMReg, ZMM_W(0)));
222a3336
AZ
3583 break;
3584 case 0x2a: /* movntqda */
323d1876 3585 gen_ldo_env_A0(s, op1_offset);
222a3336
AZ
3586 return;
3587 default:
323d1876 3588 gen_ldo_env_A0(s, op2_offset);
222a3336 3589 }
4242b1bd
AZ
3590 }
3591 } else {
3592 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3593 if (mod == 3) {
3594 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3595 } else {
3596 op2_offset = offsetof(CPUX86State,mmx_t0);
4eeb3939 3597 gen_lea_modrm(env, s, modrm);
323d1876 3598 gen_ldq_env_A0(s, op2_offset);
4242b1bd
AZ
3599 }
3600 }
d3eb5eae 3601 if (sse_fn_epp == SSE_SPECIAL) {
b9f9c5b4 3602 goto unknown_op;
c4baa050 3603 }
222a3336 3604
4242b1bd
AZ
3605 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3606 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 3607 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
222a3336 3608
3ca51d07
RH
3609 if (b == 0x17) {
3610 set_cc_op(s, CC_OP_EFLAGS);
3611 }
4242b1bd 3612 break;
111994ee
RH
3613
3614 case 0x238:
3615 case 0x338:
3616 do_0f_38_fx:
3617 /* Various integer extensions at 0f 38 f[0-f]. */
3618 b = modrm | (b1 << 8);
0af10c86 3619 modrm = cpu_ldub_code(env, s->pc++);
222a3336
AZ
3620 reg = ((modrm >> 3) & 7) | rex_r;
3621
111994ee
RH
3622 switch (b) {
3623 case 0x3f0: /* crc32 Gd,Eb */
3624 case 0x3f1: /* crc32 Gd,Ey */
3625 do_crc32:
3626 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3627 goto illegal_op;
3628 }
3629 if ((b & 0xff) == 0xf0) {
4ba9938c 3630 ot = MO_8;
ab4e4aec 3631 } else if (s->dflag != MO_64) {
4ba9938c 3632 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
111994ee 3633 } else {
4ba9938c 3634 ot = MO_64;
111994ee 3635 }
4242b1bd 3636
24b9c00f 3637 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
111994ee 3638 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
1d1cc4d0
RH
3639 gen_helper_crc32(cpu_T0, cpu_tmp2_i32,
3640 cpu_T0, tcg_const_i32(8 << ot));
222a3336 3641
ab4e4aec 3642 ot = mo_64_32(s->dflag);
1d1cc4d0 3643 gen_op_mov_reg_v(ot, reg, cpu_T0);
111994ee 3644 break;
222a3336 3645
111994ee
RH
3646 case 0x1f0: /* crc32 or movbe */
3647 case 0x1f1:
3648 /* For these insns, the f3 prefix is supposed to have priority
3649 over the 66 prefix, but that's not what we implement above
3650 setting b1. */
3651 if (s->prefix & PREFIX_REPNZ) {
3652 goto do_crc32;
3653 }
3654 /* FALLTHRU */
3655 case 0x0f0: /* movbe Gy,My */
3656 case 0x0f1: /* movbe My,Gy */
3657 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3658 goto illegal_op;
3659 }
ab4e4aec 3660 if (s->dflag != MO_64) {
4ba9938c 3661 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
111994ee 3662 } else {
4ba9938c 3663 ot = MO_64;
111994ee
RH
3664 }
3665
3655a19f 3666 gen_lea_modrm(env, s, modrm);
111994ee 3667 if ((b & 1) == 0) {
1d1cc4d0 3668 tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
3655a19f 3669 s->mem_index, ot | MO_BE);
1d1cc4d0 3670 gen_op_mov_reg_v(ot, reg, cpu_T0);
111994ee 3671 } else {
3655a19f
RH
3672 tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
3673 s->mem_index, ot | MO_BE);
111994ee
RH
3674 }
3675 break;
3676
7073fbad
RH
3677 case 0x0f2: /* andn Gy, By, Ey */
3678 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3679 || !(s->prefix & PREFIX_VEX)
3680 || s->vex_l != 0) {
3681 goto illegal_op;
3682 }
ab4e4aec 3683 ot = mo_64_32(s->dflag);
7073fbad 3684 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
1d1cc4d0
RH
3685 tcg_gen_andc_tl(cpu_T0, cpu_regs[s->vex_v], cpu_T0);
3686 gen_op_mov_reg_v(ot, reg, cpu_T0);
7073fbad
RH
3687 gen_op_update1_cc();
3688 set_cc_op(s, CC_OP_LOGICB + ot);
3689 break;
3690
c7ab7565
RH
3691 case 0x0f7: /* bextr Gy, Ey, By */
3692 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3693 || !(s->prefix & PREFIX_VEX)
3694 || s->vex_l != 0) {
3695 goto illegal_op;
3696 }
ab4e4aec 3697 ot = mo_64_32(s->dflag);
c7ab7565
RH
3698 {
3699 TCGv bound, zero;
3700
3701 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3702 /* Extract START, and shift the operand.
3703 Shifts larger than operand size get zeros. */
3704 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
1d1cc4d0 3705 tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_A0);
c7ab7565 3706
4ba9938c 3707 bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
c7ab7565 3708 zero = tcg_const_tl(0);
1d1cc4d0
RH
3709 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T0, cpu_A0, bound,
3710 cpu_T0, zero);
c7ab7565
RH
3711 tcg_temp_free(zero);
3712
3713 /* Extract the LEN into a mask. Lengths larger than
3714 operand size get all ones. */
3715 tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
3716 tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
3717 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
3718 cpu_A0, bound);
3719 tcg_temp_free(bound);
1d1cc4d0
RH
3720 tcg_gen_movi_tl(cpu_T1, 1);
3721 tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_A0);
3722 tcg_gen_subi_tl(cpu_T1, cpu_T1, 1);
3723 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
c7ab7565 3724
1d1cc4d0 3725 gen_op_mov_reg_v(ot, reg, cpu_T0);
c7ab7565
RH
3726 gen_op_update1_cc();
3727 set_cc_op(s, CC_OP_LOGICB + ot);
3728 }
3729 break;
3730
02ea1e6b
RH
3731 case 0x0f5: /* bzhi Gy, Ey, By */
3732 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3733 || !(s->prefix & PREFIX_VEX)
3734 || s->vex_l != 0) {
3735 goto illegal_op;
3736 }
ab4e4aec 3737 ot = mo_64_32(s->dflag);
02ea1e6b 3738 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
1d1cc4d0 3739 tcg_gen_ext8u_tl(cpu_T1, cpu_regs[s->vex_v]);
02ea1e6b 3740 {
4ba9938c 3741 TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
02ea1e6b
RH
3742 /* Note that since we're using BMILG (in order to get O
3743 cleared) we need to store the inverse into C. */
3744 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
1d1cc4d0
RH
3745 cpu_T1, bound);
3746 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T1, cpu_T1,
3747 bound, bound, cpu_T1);
02ea1e6b
RH
3748 tcg_temp_free(bound);
3749 }
3750 tcg_gen_movi_tl(cpu_A0, -1);
1d1cc4d0
RH
3751 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T1);
3752 tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_A0);
3753 gen_op_mov_reg_v(ot, reg, cpu_T0);
02ea1e6b
RH
3754 gen_op_update1_cc();
3755 set_cc_op(s, CC_OP_BMILGB + ot);
3756 break;
3757
5f1f4b17
RH
3758 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3759 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3760 || !(s->prefix & PREFIX_VEX)
3761 || s->vex_l != 0) {
3762 goto illegal_op;
3763 }
ab4e4aec 3764 ot = mo_64_32(s->dflag);
5f1f4b17
RH
3765 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3766 switch (ot) {
5f1f4b17 3767 default:
1d1cc4d0 3768 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
a4bcea3d
RH
3769 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
3770 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
3771 cpu_tmp2_i32, cpu_tmp3_i32);
3772 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
3773 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
5f1f4b17
RH
3774 break;
3775#ifdef TARGET_X86_64
4ba9938c 3776 case MO_64:
1d1cc4d0
RH
3777 tcg_gen_mulu2_i64(cpu_T0, cpu_T1,
3778 cpu_T0, cpu_regs[R_EDX]);
3779 tcg_gen_mov_i64(cpu_regs[s->vex_v], cpu_T0);
3780 tcg_gen_mov_i64(cpu_regs[reg], cpu_T1);
5f1f4b17
RH
3781 break;
3782#endif
3783 }
3784 break;
3785
0592f74a
RH
3786 case 0x3f5: /* pdep Gy, By, Ey */
3787 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3788 || !(s->prefix & PREFIX_VEX)
3789 || s->vex_l != 0) {
3790 goto illegal_op;
3791 }
ab4e4aec 3792 ot = mo_64_32(s->dflag);
0592f74a
RH
3793 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3794 /* Note that by zero-extending the mask operand, we
3795 automatically handle zero-extending the result. */
ab4e4aec 3796 if (ot == MO_64) {
1d1cc4d0 3797 tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
0592f74a 3798 } else {
1d1cc4d0 3799 tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
0592f74a 3800 }
1d1cc4d0 3801 gen_helper_pdep(cpu_regs[reg], cpu_T0, cpu_T1);
0592f74a
RH
3802 break;
3803
3804 case 0x2f5: /* pext Gy, By, Ey */
3805 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3806 || !(s->prefix & PREFIX_VEX)
3807 || s->vex_l != 0) {
3808 goto illegal_op;
3809 }
ab4e4aec 3810 ot = mo_64_32(s->dflag);
0592f74a
RH
3811 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3812 /* Note that by zero-extending the mask operand, we
3813 automatically handle zero-extending the result. */
ab4e4aec 3814 if (ot == MO_64) {
1d1cc4d0 3815 tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
0592f74a 3816 } else {
1d1cc4d0 3817 tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
0592f74a 3818 }
1d1cc4d0 3819 gen_helper_pext(cpu_regs[reg], cpu_T0, cpu_T1);
0592f74a
RH
3820 break;
3821
cd7f97ca
RH
3822 case 0x1f6: /* adcx Gy, Ey */
3823 case 0x2f6: /* adox Gy, Ey */
3824 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
3825 goto illegal_op;
3826 } else {
76f13133 3827 TCGv carry_in, carry_out, zero;
cd7f97ca
RH
3828 int end_op;
3829
ab4e4aec 3830 ot = mo_64_32(s->dflag);
cd7f97ca
RH
3831 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3832
3833 /* Re-use the carry-out from a previous round. */
3834 TCGV_UNUSED(carry_in);
3835 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
3836 switch (s->cc_op) {
3837 case CC_OP_ADCX:
3838 if (b == 0x1f6) {
3839 carry_in = cpu_cc_dst;
3840 end_op = CC_OP_ADCX;
3841 } else {
3842 end_op = CC_OP_ADCOX;
3843 }
3844 break;
3845 case CC_OP_ADOX:
3846 if (b == 0x1f6) {
3847 end_op = CC_OP_ADCOX;
3848 } else {
3849 carry_in = cpu_cc_src2;
3850 end_op = CC_OP_ADOX;
3851 }
3852 break;
3853 case CC_OP_ADCOX:
3854 end_op = CC_OP_ADCOX;
3855 carry_in = carry_out;
3856 break;
3857 default:
c53de1a2 3858 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
cd7f97ca
RH
3859 break;
3860 }
3861 /* If we can't reuse carry-out, get it out of EFLAGS. */
3862 if (TCGV_IS_UNUSED(carry_in)) {
3863 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
3864 gen_compute_eflags(s);
3865 }
3866 carry_in = cpu_tmp0;
3867 tcg_gen_shri_tl(carry_in, cpu_cc_src,
3868 ctz32(b == 0x1f6 ? CC_C : CC_O));
3869 tcg_gen_andi_tl(carry_in, carry_in, 1);
3870 }
3871
3872 switch (ot) {
3873#ifdef TARGET_X86_64
4ba9938c 3874 case MO_32:
cd7f97ca
RH
3875 /* If we know TL is 64-bit, and we want a 32-bit
3876 result, just do everything in 64-bit arithmetic. */
3877 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
1d1cc4d0
RH
3878 tcg_gen_ext32u_i64(cpu_T0, cpu_T0);
3879 tcg_gen_add_i64(cpu_T0, cpu_T0, cpu_regs[reg]);
3880 tcg_gen_add_i64(cpu_T0, cpu_T0, carry_in);
3881 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T0);
3882 tcg_gen_shri_i64(carry_out, cpu_T0, 32);
cd7f97ca
RH
3883 break;
3884#endif
3885 default:
3886 /* Otherwise compute the carry-out in two steps. */
76f13133 3887 zero = tcg_const_tl(0);
1d1cc4d0
RH
3888 tcg_gen_add2_tl(cpu_T0, carry_out,
3889 cpu_T0, zero,
76f13133
RH
3890 carry_in, zero);
3891 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
3892 cpu_regs[reg], carry_out,
1d1cc4d0 3893 cpu_T0, zero);
76f13133 3894 tcg_temp_free(zero);
cd7f97ca
RH
3895 break;
3896 }
cd7f97ca
RH
3897 set_cc_op(s, end_op);
3898 }
3899 break;
3900
4a554890
RH
3901 case 0x1f7: /* shlx Gy, Ey, By */
3902 case 0x2f7: /* sarx Gy, Ey, By */
3903 case 0x3f7: /* shrx Gy, Ey, By */
3904 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3905 || !(s->prefix & PREFIX_VEX)
3906 || s->vex_l != 0) {
3907 goto illegal_op;
3908 }
ab4e4aec 3909 ot = mo_64_32(s->dflag);
4a554890 3910 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4ba9938c 3911 if (ot == MO_64) {
1d1cc4d0 3912 tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 63);
4a554890 3913 } else {
1d1cc4d0 3914 tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 31);
4a554890
RH
3915 }
3916 if (b == 0x1f7) {
1d1cc4d0 3917 tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
4a554890 3918 } else if (b == 0x2f7) {
4ba9938c 3919 if (ot != MO_64) {
1d1cc4d0 3920 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
4a554890 3921 }
1d1cc4d0 3922 tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
4a554890 3923 } else {
4ba9938c 3924 if (ot != MO_64) {
1d1cc4d0 3925 tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
4a554890 3926 }
1d1cc4d0 3927 tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
4a554890 3928 }
1d1cc4d0 3929 gen_op_mov_reg_v(ot, reg, cpu_T0);
4a554890
RH
3930 break;
3931
bc4b43dc
RH
3932 case 0x0f3:
3933 case 0x1f3:
3934 case 0x2f3:
3935 case 0x3f3: /* Group 17 */
3936 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3937 || !(s->prefix & PREFIX_VEX)
3938 || s->vex_l != 0) {
3939 goto illegal_op;
3940 }
ab4e4aec 3941 ot = mo_64_32(s->dflag);
bc4b43dc
RH
3942 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3943
3944 switch (reg & 7) {
3945 case 1: /* blsr By,Ey */
1d1cc4d0
RH
3946 tcg_gen_neg_tl(cpu_T1, cpu_T0);
3947 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
3948 gen_op_mov_reg_v(ot, s->vex_v, cpu_T0);
bc4b43dc
RH
3949 gen_op_update2_cc();
3950 set_cc_op(s, CC_OP_BMILGB + ot);
3951 break;
3952
3953 case 2: /* blsmsk By,Ey */
1d1cc4d0
RH
3954 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
3955 tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
3956 tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_cc_src);
3957 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
bc4b43dc
RH
3958 set_cc_op(s, CC_OP_BMILGB + ot);
3959 break;
3960
3961 case 3: /* blsi By, Ey */
1d1cc4d0
RH
3962 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
3963 tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
3964 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_cc_src);
3965 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
bc4b43dc
RH
3966 set_cc_op(s, CC_OP_BMILGB + ot);
3967 break;
3968
3969 default:
b9f9c5b4 3970 goto unknown_op;
bc4b43dc
RH
3971 }
3972 break;
3973
111994ee 3974 default:
b9f9c5b4 3975 goto unknown_op;
111994ee 3976 }
222a3336 3977 break;
111994ee 3978
222a3336
AZ
3979 case 0x03a:
3980 case 0x13a:
4242b1bd 3981 b = modrm;
0af10c86 3982 modrm = cpu_ldub_code(env, s->pc++);
4242b1bd
AZ
3983 rm = modrm & 7;
3984 reg = ((modrm >> 3) & 7) | rex_r;
3985 mod = (modrm >> 6) & 3;
c045af25 3986 if (b1 >= 2) {
b9f9c5b4 3987 goto unknown_op;
c045af25 3988 }
4242b1bd 3989
d3eb5eae
BS
3990 sse_fn_eppi = sse_op_table7[b].op[b1];
3991 if (!sse_fn_eppi) {
b9f9c5b4 3992 goto unknown_op;
c4baa050 3993 }
222a3336
AZ
3994 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3995 goto illegal_op;
3996
d3eb5eae 3997 if (sse_fn_eppi == SSE_SPECIAL) {
ab4e4aec 3998 ot = mo_64_32(s->dflag);
222a3336
AZ
3999 rm = (modrm & 7) | REX_B(s);
4000 if (mod != 3)
4eeb3939 4001 gen_lea_modrm(env, s, modrm);
222a3336 4002 reg = ((modrm >> 3) & 7) | rex_r;
0af10c86 4003 val = cpu_ldub_code(env, s->pc++);
222a3336
AZ
4004 switch (b) {
4005 case 0x14: /* pextrb */
1d1cc4d0 4006 tcg_gen_ld8u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
19cbd87c 4007 xmm_regs[reg].ZMM_B(val & 15)));
3523e4bd 4008 if (mod == 3) {
1d1cc4d0 4009 gen_op_mov_reg_v(ot, rm, cpu_T0);
3523e4bd 4010 } else {
1d1cc4d0 4011 tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
3523e4bd
RH
4012 s->mem_index, MO_UB);
4013 }
222a3336
AZ
4014 break;
4015 case 0x15: /* pextrw */
1d1cc4d0 4016 tcg_gen_ld16u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
19cbd87c 4017 xmm_regs[reg].ZMM_W(val & 7)));
3523e4bd 4018 if (mod == 3) {
1d1cc4d0 4019 gen_op_mov_reg_v(ot, rm, cpu_T0);
3523e4bd 4020 } else {
1d1cc4d0 4021 tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
3523e4bd
RH
4022 s->mem_index, MO_LEUW);
4023 }
222a3336
AZ
4024 break;
4025 case 0x16:
4ba9938c 4026 if (ot == MO_32) { /* pextrd */
222a3336
AZ
4027 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4028 offsetof(CPUX86State,
19cbd87c 4029 xmm_regs[reg].ZMM_L(val & 3)));
3523e4bd 4030 if (mod == 3) {
a7fbcbe5 4031 tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
3523e4bd 4032 } else {
d5601ad0
RH
4033 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
4034 s->mem_index, MO_LEUL);
3523e4bd 4035 }
222a3336 4036 } else { /* pextrq */
a7812ae4 4037#ifdef TARGET_X86_64
222a3336
AZ
4038 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4039 offsetof(CPUX86State,
19cbd87c 4040 xmm_regs[reg].ZMM_Q(val & 1)));
3523e4bd 4041 if (mod == 3) {
a7fbcbe5 4042 tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
3523e4bd
RH
4043 } else {
4044 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
4045 s->mem_index, MO_LEQ);
4046 }
a7812ae4
PB
4047#else
4048 goto illegal_op;
4049#endif
222a3336
AZ
4050 }
4051 break;
4052 case 0x17: /* extractps */
1d1cc4d0 4053 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
19cbd87c 4054 xmm_regs[reg].ZMM_L(val & 3)));
3523e4bd 4055 if (mod == 3) {
1d1cc4d0 4056 gen_op_mov_reg_v(ot, rm, cpu_T0);
3523e4bd 4057 } else {
1d1cc4d0 4058 tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
3523e4bd
RH
4059 s->mem_index, MO_LEUL);
4060 }
222a3336
AZ
4061 break;
4062 case 0x20: /* pinsrb */
3c5f4116 4063 if (mod == 3) {
1d1cc4d0 4064 gen_op_mov_v_reg(MO_32, cpu_T0, rm);
3c5f4116 4065 } else {
1d1cc4d0 4066 tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
3c5f4116
RH
4067 s->mem_index, MO_UB);
4068 }
1d1cc4d0 4069 tcg_gen_st8_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
19cbd87c 4070 xmm_regs[reg].ZMM_B(val & 15)));
222a3336
AZ
4071 break;
4072 case 0x21: /* insertps */
a7812ae4 4073 if (mod == 3) {
222a3336
AZ
4074 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4075 offsetof(CPUX86State,xmm_regs[rm]
19cbd87c 4076 .ZMM_L((val >> 6) & 3)));
a7812ae4 4077 } else {
3c5f4116
RH
4078 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4079 s->mem_index, MO_LEUL);
a7812ae4 4080 }
222a3336
AZ
4081 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4082 offsetof(CPUX86State,xmm_regs[reg]
19cbd87c 4083 .ZMM_L((val >> 4) & 3)));
222a3336
AZ
4084 if ((val >> 0) & 1)
4085 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4086 cpu_env, offsetof(CPUX86State,
19cbd87c 4087 xmm_regs[reg].ZMM_L(0)));
222a3336
AZ
4088 if ((val >> 1) & 1)
4089 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4090 cpu_env, offsetof(CPUX86State,
19cbd87c 4091 xmm_regs[reg].ZMM_L(1)));
222a3336
AZ
4092 if ((val >> 2) & 1)
4093 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4094 cpu_env, offsetof(CPUX86State,
19cbd87c 4095 xmm_regs[reg].ZMM_L(2)));
222a3336
AZ
4096 if ((val >> 3) & 1)
4097 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4098 cpu_env, offsetof(CPUX86State,
19cbd87c 4099 xmm_regs[reg].ZMM_L(3)));
222a3336
AZ
4100 break;
4101 case 0x22:
4ba9938c 4102 if (ot == MO_32) { /* pinsrd */
3c5f4116 4103 if (mod == 3) {
80b02013 4104 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
3c5f4116 4105 } else {
80b02013
RH
4106 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4107 s->mem_index, MO_LEUL);
3c5f4116 4108 }
222a3336
AZ
4109 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4110 offsetof(CPUX86State,
19cbd87c 4111 xmm_regs[reg].ZMM_L(val & 3)));
222a3336 4112 } else { /* pinsrq */
a7812ae4 4113#ifdef TARGET_X86_64
3c5f4116 4114 if (mod == 3) {
222a3336 4115 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
3c5f4116
RH
4116 } else {
4117 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
4118 s->mem_index, MO_LEQ);
4119 }
222a3336
AZ
4120 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4121 offsetof(CPUX86State,
19cbd87c 4122 xmm_regs[reg].ZMM_Q(val & 1)));
a7812ae4
PB
4123#else
4124 goto illegal_op;
4125#endif
222a3336
AZ
4126 }
4127 break;
4128 }
4129 return;
4130 }
4242b1bd
AZ
4131
4132 if (b1) {
4133 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4134 if (mod == 3) {
4135 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4136 } else {
4137 op2_offset = offsetof(CPUX86State,xmm_t0);
4eeb3939 4138 gen_lea_modrm(env, s, modrm);
323d1876 4139 gen_ldo_env_A0(s, op2_offset);
4242b1bd
AZ
4140 }
4141 } else {
4142 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4143 if (mod == 3) {
4144 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4145 } else {
4146 op2_offset = offsetof(CPUX86State,mmx_t0);
4eeb3939 4147 gen_lea_modrm(env, s, modrm);
323d1876 4148 gen_ldq_env_A0(s, op2_offset);
4242b1bd
AZ
4149 }
4150 }
0af10c86 4151 val = cpu_ldub_code(env, s->pc++);
4242b1bd 4152
222a3336 4153 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
3ca51d07 4154 set_cc_op(s, CC_OP_EFLAGS);
222a3336 4155
ab4e4aec 4156 if (s->dflag == MO_64) {
222a3336
AZ
4157 /* The helper must use entire 64-bit gp registers */
4158 val |= 1 << 8;
ab4e4aec 4159 }
222a3336
AZ
4160 }
4161
4242b1bd
AZ
4162 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4163 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4164 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4242b1bd 4165 break;
e2c3c2c5
RH
4166
4167 case 0x33a:
4168 /* Various integer extensions at 0f 3a f[0-f]. */
4169 b = modrm | (b1 << 8);
4170 modrm = cpu_ldub_code(env, s->pc++);
4171 reg = ((modrm >> 3) & 7) | rex_r;
4172
4173 switch (b) {
4174 case 0x3f0: /* rorx Gy,Ey, Ib */
4175 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4176 || !(s->prefix & PREFIX_VEX)
4177 || s->vex_l != 0) {
4178 goto illegal_op;
4179 }
ab4e4aec 4180 ot = mo_64_32(s->dflag);
e2c3c2c5
RH
4181 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4182 b = cpu_ldub_code(env, s->pc++);
4ba9938c 4183 if (ot == MO_64) {
1d1cc4d0 4184 tcg_gen_rotri_tl(cpu_T0, cpu_T0, b & 63);
e2c3c2c5 4185 } else {
1d1cc4d0 4186 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
e2c3c2c5 4187 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
1d1cc4d0 4188 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
e2c3c2c5 4189 }
1d1cc4d0 4190 gen_op_mov_reg_v(ot, reg, cpu_T0);
e2c3c2c5
RH
4191 break;
4192
4193 default:
b9f9c5b4 4194 goto unknown_op;
e2c3c2c5
RH
4195 }
4196 break;
4197
664e0f19 4198 default:
b9f9c5b4
RH
4199 unknown_op:
4200 gen_unknown_opcode(env, s);
4201 return;
664e0f19
FB
4202 }
4203 } else {
4204 /* generic MMX or SSE operation */
d1e42c5c 4205 switch(b) {
d1e42c5c
FB
4206 case 0x70: /* pshufx insn */
4207 case 0xc6: /* pshufx insn */
4208 case 0xc2: /* compare insns */
4209 s->rip_offset = 1;
4210 break;
4211 default:
4212 break;
664e0f19
FB
4213 }
4214 if (is_xmm) {
4215 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4216 if (mod != 3) {
cb48da7f
RH
4217 int sz = 4;
4218
4eeb3939 4219 gen_lea_modrm(env, s, modrm);
664e0f19 4220 op2_offset = offsetof(CPUX86State,xmm_t0);
cb48da7f
RH
4221
4222 switch (b) {
4223 case 0x50 ... 0x5a:
4224 case 0x5c ... 0x5f:
4225 case 0xc2:
4226 /* Most sse scalar operations. */
664e0f19 4227 if (b1 == 2) {
cb48da7f
RH
4228 sz = 2;
4229 } else if (b1 == 3) {
4230 sz = 3;
4231 }
4232 break;
4233
4234 case 0x2e: /* ucomis[sd] */
4235 case 0x2f: /* comis[sd] */
4236 if (b1 == 0) {
4237 sz = 2;
664e0f19 4238 } else {
cb48da7f 4239 sz = 3;
664e0f19 4240 }
cb48da7f
RH
4241 break;
4242 }
4243
4244 switch (sz) {
4245 case 2:
4246 /* 32 bit access */
1d1cc4d0
RH
4247 gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
4248 tcg_gen_st32_tl(cpu_T0, cpu_env,
19cbd87c 4249 offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
cb48da7f
RH
4250 break;
4251 case 3:
4252 /* 64 bit access */
19cbd87c 4253 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_D(0)));
cb48da7f
RH
4254 break;
4255 default:
4256 /* 128 bit access */
323d1876 4257 gen_ldo_env_A0(s, op2_offset);
cb48da7f 4258 break;
664e0f19
FB
4259 }
4260 } else {
4261 rm = (modrm & 7) | REX_B(s);
4262 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4263 }
4264 } else {
4265 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4266 if (mod != 3) {
4eeb3939 4267 gen_lea_modrm(env, s, modrm);
664e0f19 4268 op2_offset = offsetof(CPUX86State,mmx_t0);
323d1876 4269 gen_ldq_env_A0(s, op2_offset);
664e0f19
FB
4270 } else {
4271 rm = (modrm & 7);
4272 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4273 }
4274 }
4275 switch(b) {
a35f3ec7 4276 case 0x0f: /* 3DNow! data insns */
0af10c86 4277 val = cpu_ldub_code(env, s->pc++);
d3eb5eae
BS
4278 sse_fn_epp = sse_op_table5[val];
4279 if (!sse_fn_epp) {
b9f9c5b4
RH
4280 goto unknown_op;
4281 }
4282 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
a35f3ec7 4283 goto illegal_op;
c4baa050 4284 }
5af45186
FB
4285 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4286 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4287 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
a35f3ec7 4288 break;
664e0f19
FB
4289 case 0x70: /* pshufx insn */
4290 case 0xc6: /* pshufx insn */
0af10c86 4291 val = cpu_ldub_code(env, s->pc++);
5af45186
FB
4292 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4293 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
c4baa050 4294 /* XXX: introduce a new table? */
d3eb5eae 4295 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
c4baa050 4296 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
664e0f19
FB
4297 break;
4298 case 0xc2:
4299 /* compare insns */
0af10c86 4300 val = cpu_ldub_code(env, s->pc++);
664e0f19 4301 if (val >= 8)
b9f9c5b4 4302 goto unknown_op;
d3eb5eae 4303 sse_fn_epp = sse_op_table4[val][b1];
c4baa050 4304
5af45186
FB
4305 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4306 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4307 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19 4308 break;
b8b6a50b
FB
4309 case 0xf7:
4310 /* maskmov : we must prepare A0 */
4311 if (mod != 3)
4312 goto illegal_op;
1d71ddb1
RH
4313 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);
4314 gen_extu(s->aflag, cpu_A0);
b8b6a50b
FB
4315 gen_add_A0_ds_seg(s);
4316
4317 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4318 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
c4baa050 4319 /* XXX: introduce a new table? */
d3eb5eae
BS
4320 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4321 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
b8b6a50b 4322 break;
664e0f19 4323 default:
5af45186
FB
4324 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4325 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4326 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
4327 break;
4328 }
4329 if (b == 0x2e || b == 0x2f) {
3ca51d07 4330 set_cc_op(s, CC_OP_EFLAGS);
664e0f19
FB
4331 }
4332 }
4333}
4334
2c0262af
FB
4335/* convert one instruction. s->is_jmp is set if the translation must
4336 be stopped. Return the next pc value */
0af10c86
BS
4337static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4338 target_ulong pc_start)
2c0262af 4339{
ab4e4aec 4340 int b, prefixes;
d67dc9e6 4341 int shift;
ab4e4aec 4342 TCGMemOp ot, aflag, dflag;
4eeb3939 4343 int modrm, reg, rm, mod, op, opreg, val;
14ce26e7
FB
4344 target_ulong next_eip, tval;
4345 int rex_w, rex_r;
2c0262af 4346
b9f9c5b4 4347 s->pc_start = s->pc = pc_start;
2c0262af 4348 prefixes = 0;
2c0262af 4349 s->override = -1;
14ce26e7
FB
4350 rex_w = -1;
4351 rex_r = 0;
4352#ifdef TARGET_X86_64
4353 s->rex_x = 0;
4354 s->rex_b = 0;
5fafdf24 4355 x86_64_hregs = 0;
14ce26e7
FB
4356#endif
4357 s->rip_offset = 0; /* for relative ip address */
701ed211
RH
4358 s->vex_l = 0;
4359 s->vex_v = 0;
2c0262af 4360 next_byte:
0af10c86 4361 b = cpu_ldub_code(env, s->pc);
2c0262af 4362 s->pc++;
4a6fd938
RH
4363 /* Collect prefixes. */
4364 switch (b) {
4365 case 0xf3:
4366 prefixes |= PREFIX_REPZ;
4367 goto next_byte;
4368 case 0xf2:
4369 prefixes |= PREFIX_REPNZ;
4370 goto next_byte;
4371 case 0xf0:
4372 prefixes |= PREFIX_LOCK;
4373 goto next_byte;
4374 case 0x2e:
4375 s->override = R_CS;
4376 goto next_byte;
4377 case 0x36:
4378 s->override = R_SS;
4379 goto next_byte;
4380 case 0x3e:
4381 s->override = R_DS;
4382 goto next_byte;
4383 case 0x26:
4384 s->override = R_ES;
4385 goto next_byte;
4386 case 0x64:
4387 s->override = R_FS;
4388 goto next_byte;
4389 case 0x65:
4390 s->override = R_GS;
4391 goto next_byte;
4392 case 0x66:
4393 prefixes |= PREFIX_DATA;
4394 goto next_byte;
4395 case 0x67:
4396 prefixes |= PREFIX_ADR;
4397 goto next_byte;
14ce26e7 4398#ifdef TARGET_X86_64
4a6fd938
RH
4399 case 0x40 ... 0x4f:
4400 if (CODE64(s)) {
14ce26e7
FB
4401 /* REX prefix */
4402 rex_w = (b >> 3) & 1;
4403 rex_r = (b & 0x4) << 1;
4404 s->rex_x = (b & 0x2) << 2;
4405 REX_B(s) = (b & 0x1) << 3;
4406 x86_64_hregs = 1; /* select uniform byte register addressing */
4407 goto next_byte;
4408 }
4a6fd938
RH
4409 break;
4410#endif
701ed211
RH
4411 case 0xc5: /* 2-byte VEX */
4412 case 0xc4: /* 3-byte VEX */
4413 /* VEX prefixes cannot be used except in 32-bit mode.
4414 Otherwise the instruction is LES or LDS. */
4415 if (s->code32 && !s->vm86) {
4416 static const int pp_prefix[4] = {
4417 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4418 };
4419 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4420
4421 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4422 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4423 otherwise the instruction is LES or LDS. */
4424 break;
4425 }
4426 s->pc++;
4427
085d8134 4428 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
701ed211
RH
4429 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4430 | PREFIX_LOCK | PREFIX_DATA)) {
4431 goto illegal_op;
4432 }
4433#ifdef TARGET_X86_64
4434 if (x86_64_hregs) {
4435 goto illegal_op;
4436 }
4437#endif
4438 rex_r = (~vex2 >> 4) & 8;
4439 if (b == 0xc5) {
4440 vex3 = vex2;
4441 b = cpu_ldub_code(env, s->pc++);
4442 } else {
4443#ifdef TARGET_X86_64
4444 s->rex_x = (~vex2 >> 3) & 8;
4445 s->rex_b = (~vex2 >> 2) & 8;
4446#endif
4447 vex3 = cpu_ldub_code(env, s->pc++);
4448 rex_w = (vex3 >> 7) & 1;
4449 switch (vex2 & 0x1f) {
4450 case 0x01: /* Implied 0f leading opcode bytes. */
4451 b = cpu_ldub_code(env, s->pc++) | 0x100;
4452 break;
4453 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4454 b = 0x138;
4455 break;
4456 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4457 b = 0x13a;
4458 break;
4459 default: /* Reserved for future use. */
b9f9c5b4 4460 goto unknown_op;
701ed211
RH
4461 }
4462 }
4463 s->vex_v = (~vex3 >> 3) & 0xf;
4464 s->vex_l = (vex3 >> 2) & 1;
4465 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4466 }
4467 break;
4a6fd938
RH
4468 }
4469
4470 /* Post-process prefixes. */
4a6fd938 4471 if (CODE64(s)) {
dec3fc96
RH
4472 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4473 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4474 over 0x66 if both are present. */
ab4e4aec 4475 dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
dec3fc96 4476 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
1d71ddb1 4477 aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
dec3fc96
RH
4478 } else {
4479 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
ab4e4aec
RH
4480 if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
4481 dflag = MO_32;
4482 } else {
4483 dflag = MO_16;
14ce26e7 4484 }
dec3fc96 4485 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
1d71ddb1
RH
4486 if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
4487 aflag = MO_32;
4488 } else {
4489 aflag = MO_16;
14ce26e7 4490 }
2c0262af
FB
4491 }
4492
2c0262af
FB
4493 s->prefix = prefixes;
4494 s->aflag = aflag;
4495 s->dflag = dflag;
4496
4497 /* lock generation */
4498 if (prefixes & PREFIX_LOCK)
a7812ae4 4499 gen_helper_lock();
2c0262af
FB
4500
4501 /* now check op code */
4502 reswitch:
4503 switch(b) {
4504 case 0x0f:
4505 /**************************/
4506 /* extended op code */
0af10c86 4507 b = cpu_ldub_code(env, s->pc++) | 0x100;
2c0262af 4508 goto reswitch;
3b46e624 4509
2c0262af
FB
4510 /**************************/
4511 /* arith & logic */
4512 case 0x00 ... 0x05:
4513 case 0x08 ... 0x0d:
4514 case 0x10 ... 0x15:
4515 case 0x18 ... 0x1d:
4516 case 0x20 ... 0x25:
4517 case 0x28 ... 0x2d:
4518 case 0x30 ... 0x35:
4519 case 0x38 ... 0x3d:
4520 {
4521 int op, f, val;
4522 op = (b >> 3) & 7;
4523 f = (b >> 1) & 3;
4524
ab4e4aec 4525 ot = mo_b_d(b, dflag);
3b46e624 4526
2c0262af
FB
4527 switch(f) {
4528 case 0: /* OP Ev, Gv */
0af10c86 4529 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 4530 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 4531 mod = (modrm >> 6) & 3;
14ce26e7 4532 rm = (modrm & 7) | REX_B(s);
2c0262af 4533 if (mod != 3) {
4eeb3939 4534 gen_lea_modrm(env, s, modrm);
2c0262af
FB
4535 opreg = OR_TMP0;
4536 } else if (op == OP_XORL && rm == reg) {
4537 xor_zero:
4538 /* xor reg, reg optimisation */
436ff2d2 4539 set_cc_op(s, CC_OP_CLR);
1d1cc4d0
RH
4540 tcg_gen_movi_tl(cpu_T0, 0);
4541 gen_op_mov_reg_v(ot, reg, cpu_T0);
2c0262af
FB
4542 break;
4543 } else {
4544 opreg = rm;
4545 }
1d1cc4d0 4546 gen_op_mov_v_reg(ot, cpu_T1, reg);
2c0262af
FB
4547 gen_op(s, op, ot, opreg);
4548 break;
4549 case 1: /* OP Gv, Ev */
0af10c86 4550 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4551 mod = (modrm >> 6) & 3;
14ce26e7
FB
4552 reg = ((modrm >> 3) & 7) | rex_r;
4553 rm = (modrm & 7) | REX_B(s);
2c0262af 4554 if (mod != 3) {
4eeb3939 4555 gen_lea_modrm(env, s, modrm);
1d1cc4d0 4556 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
2c0262af
FB
4557 } else if (op == OP_XORL && rm == reg) {
4558 goto xor_zero;
4559 } else {
1d1cc4d0 4560 gen_op_mov_v_reg(ot, cpu_T1, rm);
2c0262af
FB
4561 }
4562 gen_op(s, op, ot, reg);
4563 break;
4564 case 2: /* OP A, Iv */
0af10c86 4565 val = insn_get(env, s, ot);
1d1cc4d0 4566 tcg_gen_movi_tl(cpu_T1, val);
2c0262af
FB
4567 gen_op(s, op, ot, OR_EAX);
4568 break;
4569 }
4570 }
4571 break;
4572
ec9d6075
FB
4573 case 0x82:
4574 if (CODE64(s))
4575 goto illegal_op;
2c0262af
FB
4576 case 0x80: /* GRP1 */
4577 case 0x81:
4578 case 0x83:
4579 {
4580 int val;
4581
ab4e4aec 4582 ot = mo_b_d(b, dflag);
3b46e624 4583
0af10c86 4584 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4585 mod = (modrm >> 6) & 3;
14ce26e7 4586 rm = (modrm & 7) | REX_B(s);
2c0262af 4587 op = (modrm >> 3) & 7;
3b46e624 4588
2c0262af 4589 if (mod != 3) {
14ce26e7
FB
4590 if (b == 0x83)
4591 s->rip_offset = 1;
4592 else
4593 s->rip_offset = insn_const_size(ot);
4eeb3939 4594 gen_lea_modrm(env, s, modrm);
2c0262af
FB
4595 opreg = OR_TMP0;
4596 } else {
14ce26e7 4597 opreg = rm;
2c0262af
FB
4598 }
4599
4600 switch(b) {
4601 default:
4602 case 0x80:
4603 case 0x81:
d64477af 4604 case 0x82:
0af10c86 4605 val = insn_get(env, s, ot);
2c0262af
FB
4606 break;
4607 case 0x83:
4ba9938c 4608 val = (int8_t)insn_get(env, s, MO_8);
2c0262af
FB
4609 break;
4610 }
1d1cc4d0 4611 tcg_gen_movi_tl(cpu_T1, val);
2c0262af
FB
4612 gen_op(s, op, ot, opreg);
4613 }
4614 break;
4615
4616 /**************************/
4617 /* inc, dec, and other misc arith */
4618 case 0x40 ... 0x47: /* inc Gv */
ab4e4aec 4619 ot = dflag;
2c0262af
FB
4620 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4621 break;
4622 case 0x48 ... 0x4f: /* dec Gv */
ab4e4aec 4623 ot = dflag;
2c0262af
FB
4624 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4625 break;
4626 case 0xf6: /* GRP3 */
4627 case 0xf7:
ab4e4aec 4628 ot = mo_b_d(b, dflag);
2c0262af 4629
0af10c86 4630 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4631 mod = (modrm >> 6) & 3;
14ce26e7 4632 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
4633 op = (modrm >> 3) & 7;
4634 if (mod != 3) {
14ce26e7
FB
4635 if (op == 0)
4636 s->rip_offset = insn_const_size(ot);
4eeb3939 4637 gen_lea_modrm(env, s, modrm);
1d1cc4d0 4638 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
2c0262af 4639 } else {
1d1cc4d0 4640 gen_op_mov_v_reg(ot, cpu_T0, rm);
2c0262af
FB
4641 }
4642
4643 switch(op) {
4644 case 0: /* test */
0af10c86 4645 val = insn_get(env, s, ot);
1d1cc4d0 4646 tcg_gen_movi_tl(cpu_T1, val);
2c0262af 4647 gen_op_testl_T0_T1_cc();
3ca51d07 4648 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af
FB
4649 break;
4650 case 2: /* not */
1d1cc4d0 4651 tcg_gen_not_tl(cpu_T0, cpu_T0);
2c0262af 4652 if (mod != 3) {
1d1cc4d0 4653 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
2c0262af 4654 } else {
1d1cc4d0 4655 gen_op_mov_reg_v(ot, rm, cpu_T0);
2c0262af
FB
4656 }
4657 break;
4658 case 3: /* neg */
1d1cc4d0 4659 tcg_gen_neg_tl(cpu_T0, cpu_T0);
2c0262af 4660 if (mod != 3) {
1d1cc4d0 4661 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
2c0262af 4662 } else {
1d1cc4d0 4663 gen_op_mov_reg_v(ot, rm, cpu_T0);
2c0262af
FB
4664 }
4665 gen_op_update_neg_cc();
3ca51d07 4666 set_cc_op(s, CC_OP_SUBB + ot);
2c0262af
FB
4667 break;
4668 case 4: /* mul */
4669 switch(ot) {
4ba9938c 4670 case MO_8:
1d1cc4d0
RH
4671 gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
4672 tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
4673 tcg_gen_ext8u_tl(cpu_T1, cpu_T1);
0211e5af 4674 /* XXX: use 32 bit mul which could be faster */
1d1cc4d0
RH
4675 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4676 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4677 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4678 tcg_gen_andi_tl(cpu_cc_src, cpu_T0, 0xff00);
3ca51d07 4679 set_cc_op(s, CC_OP_MULB);
2c0262af 4680 break;
4ba9938c 4681 case MO_16:
1d1cc4d0
RH
4682 gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
4683 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
4684 tcg_gen_ext16u_tl(cpu_T1, cpu_T1);
0211e5af 4685 /* XXX: use 32 bit mul which could be faster */
1d1cc4d0
RH
4686 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4687 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4688 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4689 tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
4690 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
4691 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
3ca51d07 4692 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
4693 break;
4694 default:
4ba9938c 4695 case MO_32:
1d1cc4d0 4696 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
a4bcea3d
RH
4697 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4698 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4699 cpu_tmp2_i32, cpu_tmp3_i32);
4700 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4701 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4702 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4703 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 4704 set_cc_op(s, CC_OP_MULL);
2c0262af 4705 break;
14ce26e7 4706#ifdef TARGET_X86_64
4ba9938c 4707 case MO_64:
a4bcea3d 4708 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
1d1cc4d0 4709 cpu_T0, cpu_regs[R_EAX]);
a4bcea3d
RH
4710 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4711 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 4712 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
4713 break;
4714#endif
2c0262af 4715 }
2c0262af
FB
4716 break;
4717 case 5: /* imul */
4718 switch(ot) {
4ba9938c 4719 case MO_8:
1d1cc4d0
RH
4720 gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
4721 tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
4722 tcg_gen_ext8s_tl(cpu_T1, cpu_T1);
0211e5af 4723 /* XXX: use 32 bit mul which could be faster */
1d1cc4d0
RH
4724 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4725 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4726 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4727 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T0);
4728 tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
3ca51d07 4729 set_cc_op(s, CC_OP_MULB);
2c0262af 4730 break;
4ba9938c 4731 case MO_16:
1d1cc4d0
RH
4732 gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
4733 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
4734 tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
0211e5af 4735 /* XXX: use 32 bit mul which could be faster */
1d1cc4d0
RH
4736 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4737 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4738 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4739 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
4740 tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
4741 tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
4742 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
3ca51d07 4743 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
4744 break;
4745 default:
4ba9938c 4746 case MO_32:
1d1cc4d0 4747 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
a4bcea3d
RH
4748 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4749 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4750 cpu_tmp2_i32, cpu_tmp3_i32);
4751 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4752 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4753 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
4754 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4755 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
4756 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
3ca51d07 4757 set_cc_op(s, CC_OP_MULL);
2c0262af 4758 break;
14ce26e7 4759#ifdef TARGET_X86_64
4ba9938c 4760 case MO_64:
a4bcea3d 4761 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
1d1cc4d0 4762 cpu_T0, cpu_regs[R_EAX]);
a4bcea3d
RH
4763 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4764 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
4765 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 4766 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
4767 break;
4768#endif
2c0262af 4769 }
2c0262af
FB
4770 break;
4771 case 6: /* div */
4772 switch(ot) {
4ba9938c 4773 case MO_8:
1d1cc4d0 4774 gen_helper_divb_AL(cpu_env, cpu_T0);
2c0262af 4775 break;
4ba9938c 4776 case MO_16:
1d1cc4d0 4777 gen_helper_divw_AX(cpu_env, cpu_T0);
2c0262af
FB
4778 break;
4779 default:
4ba9938c 4780 case MO_32:
1d1cc4d0 4781 gen_helper_divl_EAX(cpu_env, cpu_T0);
14ce26e7
FB
4782 break;
4783#ifdef TARGET_X86_64
4ba9938c 4784 case MO_64:
1d1cc4d0 4785 gen_helper_divq_EAX(cpu_env, cpu_T0);
2c0262af 4786 break;
14ce26e7 4787#endif
2c0262af
FB
4788 }
4789 break;
4790 case 7: /* idiv */
4791 switch(ot) {
4ba9938c 4792 case MO_8:
1d1cc4d0 4793 gen_helper_idivb_AL(cpu_env, cpu_T0);
2c0262af 4794 break;
4ba9938c 4795 case MO_16:
1d1cc4d0 4796 gen_helper_idivw_AX(cpu_env, cpu_T0);
2c0262af
FB
4797 break;
4798 default:
4ba9938c 4799 case MO_32:
1d1cc4d0 4800 gen_helper_idivl_EAX(cpu_env, cpu_T0);
14ce26e7
FB
4801 break;
4802#ifdef TARGET_X86_64
4ba9938c 4803 case MO_64:
1d1cc4d0 4804 gen_helper_idivq_EAX(cpu_env, cpu_T0);
2c0262af 4805 break;
14ce26e7 4806#endif
2c0262af
FB
4807 }
4808 break;
4809 default:
b9f9c5b4 4810 goto unknown_op;
2c0262af
FB
4811 }
4812 break;
4813
4814 case 0xfe: /* GRP4 */
4815 case 0xff: /* GRP5 */
ab4e4aec 4816 ot = mo_b_d(b, dflag);
2c0262af 4817
0af10c86 4818 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4819 mod = (modrm >> 6) & 3;
14ce26e7 4820 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
4821 op = (modrm >> 3) & 7;
4822 if (op >= 2 && b == 0xfe) {
b9f9c5b4 4823 goto unknown_op;
2c0262af 4824 }
14ce26e7 4825 if (CODE64(s)) {
aba9d61e 4826 if (op == 2 || op == 4) {
14ce26e7 4827 /* operand size for jumps is 64 bit */
4ba9938c 4828 ot = MO_64;
aba9d61e 4829 } else if (op == 3 || op == 5) {
ab4e4aec 4830 ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
14ce26e7
FB
4831 } else if (op == 6) {
4832 /* default push size is 64 bit */
ab4e4aec 4833 ot = mo_pushpop(s, dflag);
14ce26e7
FB
4834 }
4835 }
2c0262af 4836 if (mod != 3) {
4eeb3939 4837 gen_lea_modrm(env, s, modrm);
2c0262af 4838 if (op >= 2 && op != 3 && op != 5)
1d1cc4d0 4839 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
2c0262af 4840 } else {
1d1cc4d0 4841 gen_op_mov_v_reg(ot, cpu_T0, rm);
2c0262af
FB
4842 }
4843
4844 switch(op) {
4845 case 0: /* inc Ev */
4846 if (mod != 3)
4847 opreg = OR_TMP0;
4848 else
4849 opreg = rm;
4850 gen_inc(s, ot, opreg, 1);
4851 break;
4852 case 1: /* dec Ev */
4853 if (mod != 3)
4854 opreg = OR_TMP0;
4855 else
4856 opreg = rm;
4857 gen_inc(s, ot, opreg, -1);
4858 break;
4859 case 2: /* call Ev */
4f31916f 4860 /* XXX: optimize if memory (no 'and' is necessary) */
ab4e4aec 4861 if (dflag == MO_16) {
1d1cc4d0 4862 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
40b90233 4863 }
2c0262af 4864 next_eip = s->pc - s->cs_base;
1d1cc4d0
RH
4865 tcg_gen_movi_tl(cpu_T1, next_eip);
4866 gen_push_v(s, cpu_T1);
4867 gen_op_jmp_v(cpu_T0);
7d117ce8 4868 gen_bnd_jmp(s);
2c0262af
FB
4869 gen_eob(s);
4870 break;
61382a50 4871 case 3: /* lcall Ev */
1d1cc4d0 4872 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
830a19a4 4873 gen_add_A0_im(s, 1 << ot);
1d1cc4d0 4874 gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
2c0262af
FB
4875 do_lcall:
4876 if (s->pe && !s->vm86) {
1d1cc4d0
RH
4877 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4878 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
ab4e4aec 4879 tcg_const_i32(dflag - 1),
100ec099 4880 tcg_const_tl(s->pc - s->cs_base));
2c0262af 4881 } else {
1d1cc4d0
RH
4882 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4883 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T1,
ab4e4aec 4884 tcg_const_i32(dflag - 1),
a7812ae4 4885 tcg_const_i32(s->pc - s->cs_base));
2c0262af
FB
4886 }
4887 gen_eob(s);
4888 break;
4889 case 4: /* jmp Ev */
ab4e4aec 4890 if (dflag == MO_16) {
1d1cc4d0 4891 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
40b90233 4892 }
1d1cc4d0 4893 gen_op_jmp_v(cpu_T0);
7d117ce8 4894 gen_bnd_jmp(s);
2c0262af
FB
4895 gen_eob(s);
4896 break;
4897 case 5: /* ljmp Ev */
1d1cc4d0 4898 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
830a19a4 4899 gen_add_A0_im(s, 1 << ot);
1d1cc4d0 4900 gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
2c0262af
FB
4901 do_ljmp:
4902 if (s->pe && !s->vm86) {
1d1cc4d0
RH
4903 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4904 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
100ec099 4905 tcg_const_tl(s->pc - s->cs_base));
2c0262af 4906 } else {
3bd7da9e 4907 gen_op_movl_seg_T0_vm(R_CS);
1d1cc4d0 4908 gen_op_jmp_v(cpu_T1);
2c0262af
FB
4909 }
4910 gen_eob(s);
4911 break;
4912 case 6: /* push Ev */
1d1cc4d0 4913 gen_push_v(s, cpu_T0);
2c0262af
FB
4914 break;
4915 default:
b9f9c5b4 4916 goto unknown_op;
2c0262af
FB
4917 }
4918 break;
4919
4920 case 0x84: /* test Ev, Gv */
5fafdf24 4921 case 0x85:
ab4e4aec 4922 ot = mo_b_d(b, dflag);
2c0262af 4923
0af10c86 4924 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 4925 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 4926
0af10c86 4927 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
1d1cc4d0 4928 gen_op_mov_v_reg(ot, cpu_T1, reg);
2c0262af 4929 gen_op_testl_T0_T1_cc();
3ca51d07 4930 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 4931 break;
3b46e624 4932
2c0262af
FB
4933 case 0xa8: /* test eAX, Iv */
4934 case 0xa9:
ab4e4aec 4935 ot = mo_b_d(b, dflag);
0af10c86 4936 val = insn_get(env, s, ot);
2c0262af 4937
1d1cc4d0
RH
4938 gen_op_mov_v_reg(ot, cpu_T0, OR_EAX);
4939 tcg_gen_movi_tl(cpu_T1, val);
2c0262af 4940 gen_op_testl_T0_T1_cc();
3ca51d07 4941 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 4942 break;
3b46e624 4943
2c0262af 4944 case 0x98: /* CWDE/CBW */
ab4e4aec 4945 switch (dflag) {
14ce26e7 4946#ifdef TARGET_X86_64
ab4e4aec 4947 case MO_64:
1d1cc4d0
RH
4948 gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
4949 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
4950 gen_op_mov_reg_v(MO_64, R_EAX, cpu_T0);
ab4e4aec 4951 break;
14ce26e7 4952#endif
ab4e4aec 4953 case MO_32:
1d1cc4d0
RH
4954 gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
4955 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
4956 gen_op_mov_reg_v(MO_32, R_EAX, cpu_T0);
ab4e4aec
RH
4957 break;
4958 case MO_16:
1d1cc4d0
RH
4959 gen_op_mov_v_reg(MO_8, cpu_T0, R_EAX);
4960 tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
4961 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
ab4e4aec
RH
4962 break;
4963 default:
4964 tcg_abort();
e108dd01 4965 }
2c0262af
FB
4966 break;
4967 case 0x99: /* CDQ/CWD */
ab4e4aec 4968 switch (dflag) {
14ce26e7 4969#ifdef TARGET_X86_64
ab4e4aec 4970 case MO_64:
1d1cc4d0
RH
4971 gen_op_mov_v_reg(MO_64, cpu_T0, R_EAX);
4972 tcg_gen_sari_tl(cpu_T0, cpu_T0, 63);
4973 gen_op_mov_reg_v(MO_64, R_EDX, cpu_T0);
ab4e4aec 4974 break;
14ce26e7 4975#endif
ab4e4aec 4976 case MO_32:
1d1cc4d0
RH
4977 gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
4978 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
4979 tcg_gen_sari_tl(cpu_T0, cpu_T0, 31);
4980 gen_op_mov_reg_v(MO_32, R_EDX, cpu_T0);
ab4e4aec
RH
4981 break;
4982 case MO_16:
1d1cc4d0
RH
4983 gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
4984 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
4985 tcg_gen_sari_tl(cpu_T0, cpu_T0, 15);
4986 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
ab4e4aec
RH
4987 break;
4988 default:
4989 tcg_abort();
e108dd01 4990 }
2c0262af
FB
4991 break;
4992 case 0x1af: /* imul Gv, Ev */
4993 case 0x69: /* imul Gv, Ev, I */
4994 case 0x6b:
ab4e4aec 4995 ot = dflag;
0af10c86 4996 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
4997 reg = ((modrm >> 3) & 7) | rex_r;
4998 if (b == 0x69)
4999 s->rip_offset = insn_const_size(ot);
5000 else if (b == 0x6b)
5001 s->rip_offset = 1;
0af10c86 5002 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2c0262af 5003 if (b == 0x69) {
0af10c86 5004 val = insn_get(env, s, ot);
1d1cc4d0 5005 tcg_gen_movi_tl(cpu_T1, val);
2c0262af 5006 } else if (b == 0x6b) {
4ba9938c 5007 val = (int8_t)insn_get(env, s, MO_8);
1d1cc4d0 5008 tcg_gen_movi_tl(cpu_T1, val);
2c0262af 5009 } else {
1d1cc4d0 5010 gen_op_mov_v_reg(ot, cpu_T1, reg);
2c0262af 5011 }
a4bcea3d 5012 switch (ot) {
0211e5af 5013#ifdef TARGET_X86_64
4ba9938c 5014 case MO_64:
1d1cc4d0 5015 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, cpu_T0, cpu_T1);
a4bcea3d
RH
5016 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5017 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
1d1cc4d0 5018 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T1);
a4bcea3d 5019 break;
0211e5af 5020#endif
4ba9938c 5021 case MO_32:
1d1cc4d0
RH
5022 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
5023 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
a4bcea3d
RH
5024 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5025 cpu_tmp2_i32, cpu_tmp3_i32);
5026 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
5027 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5028 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5029 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5030 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5031 break;
5032 default:
1d1cc4d0
RH
5033 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
5034 tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
0211e5af 5035 /* XXX: use 32 bit mul which could be faster */
1d1cc4d0
RH
5036 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
5037 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
5038 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
5039 tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
5040 gen_op_mov_reg_v(ot, reg, cpu_T0);
a4bcea3d 5041 break;
2c0262af 5042 }
3ca51d07 5043 set_cc_op(s, CC_OP_MULB + ot);
2c0262af
FB
5044 break;
5045 case 0x1c0:
5046 case 0x1c1: /* xadd Ev, Gv */
ab4e4aec 5047 ot = mo_b_d(b, dflag);
0af10c86 5048 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5049 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5050 mod = (modrm >> 6) & 3;
5051 if (mod == 3) {
14ce26e7 5052 rm = (modrm & 7) | REX_B(s);
1d1cc4d0
RH
5053 gen_op_mov_v_reg(ot, cpu_T0, reg);
5054 gen_op_mov_v_reg(ot, cpu_T1, rm);
5055 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
5056 gen_op_mov_reg_v(ot, reg, cpu_T1);
5057 gen_op_mov_reg_v(ot, rm, cpu_T0);
2c0262af 5058 } else {
4eeb3939 5059 gen_lea_modrm(env, s, modrm);
1d1cc4d0
RH
5060 gen_op_mov_v_reg(ot, cpu_T0, reg);
5061 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
5062 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
5063 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
5064 gen_op_mov_reg_v(ot, reg, cpu_T1);
2c0262af
FB
5065 }
5066 gen_op_update2_cc();
3ca51d07 5067 set_cc_op(s, CC_OP_ADDB + ot);
2c0262af
FB
5068 break;
5069 case 0x1b0:
5070 case 0x1b1: /* cmpxchg Ev, Gv */
cad3a37d 5071 {
42a268c2 5072 TCGLabel *label1, *label2;
1e4840bf 5073 TCGv t0, t1, t2, a0;
cad3a37d 5074
ab4e4aec 5075 ot = mo_b_d(b, dflag);
0af10c86 5076 modrm = cpu_ldub_code(env, s->pc++);
cad3a37d
FB
5077 reg = ((modrm >> 3) & 7) | rex_r;
5078 mod = (modrm >> 6) & 3;
a7812ae4
PB
5079 t0 = tcg_temp_local_new();
5080 t1 = tcg_temp_local_new();
5081 t2 = tcg_temp_local_new();
5082 a0 = tcg_temp_local_new();
1e4840bf 5083 gen_op_mov_v_reg(ot, t1, reg);
cad3a37d
FB
5084 if (mod == 3) {
5085 rm = (modrm & 7) | REX_B(s);
1e4840bf 5086 gen_op_mov_v_reg(ot, t0, rm);
cad3a37d 5087 } else {
4eeb3939 5088 gen_lea_modrm(env, s, modrm);
1e4840bf 5089 tcg_gen_mov_tl(a0, cpu_A0);
323d1876 5090 gen_op_ld_v(s, ot, t0, a0);
cad3a37d
FB
5091 rm = 0; /* avoid warning */
5092 }
5093 label1 = gen_new_label();
a3251186
RH
5094 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5095 gen_extu(ot, t0);
1e4840bf 5096 gen_extu(ot, t2);
a3251186 5097 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
f7e80adf 5098 label2 = gen_new_label();
cad3a37d 5099 if (mod == 3) {
1e4840bf 5100 gen_op_mov_reg_v(ot, R_EAX, t0);
1130328e
FB
5101 tcg_gen_br(label2);
5102 gen_set_label(label1);
1e4840bf 5103 gen_op_mov_reg_v(ot, rm, t1);
cad3a37d 5104 } else {
f7e80adf
AG
5105 /* perform no-op store cycle like physical cpu; must be
5106 before changing accumulator to ensure idempotency if
5107 the store faults and the instruction is restarted */
323d1876 5108 gen_op_st_v(s, ot, t0, a0);
1e4840bf 5109 gen_op_mov_reg_v(ot, R_EAX, t0);
f7e80adf 5110 tcg_gen_br(label2);
1130328e 5111 gen_set_label(label1);
323d1876 5112 gen_op_st_v(s, ot, t1, a0);
cad3a37d 5113 }
f7e80adf 5114 gen_set_label(label2);
1e4840bf 5115 tcg_gen_mov_tl(cpu_cc_src, t0);
a3251186
RH
5116 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5117 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
3ca51d07 5118 set_cc_op(s, CC_OP_SUBB + ot);
1e4840bf
FB
5119 tcg_temp_free(t0);
5120 tcg_temp_free(t1);
5121 tcg_temp_free(t2);
5122 tcg_temp_free(a0);
2c0262af 5123 }
2c0262af
FB
5124 break;
5125 case 0x1c7: /* cmpxchg8b */
0af10c86 5126 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5127 mod = (modrm >> 6) & 3;
71c3558e 5128 if ((mod == 3) || ((modrm & 0x38) != 0x8))
2c0262af 5129 goto illegal_op;
1b9d9ebb 5130#ifdef TARGET_X86_64
ab4e4aec 5131 if (dflag == MO_64) {
1b9d9ebb
FB
5132 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5133 goto illegal_op;
4eeb3939 5134 gen_lea_modrm(env, s, modrm);
92fc4b58 5135 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
1b9d9ebb
FB
5136 } else
5137#endif
5138 {
5139 if (!(s->cpuid_features & CPUID_CX8))
5140 goto illegal_op;
4eeb3939 5141 gen_lea_modrm(env, s, modrm);
92fc4b58 5142 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
1b9d9ebb 5143 }
3ca51d07 5144 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 5145 break;
3b46e624 5146
2c0262af
FB
5147 /**************************/
5148 /* push/pop */
5149 case 0x50 ... 0x57: /* push */
1d1cc4d0
RH
5150 gen_op_mov_v_reg(MO_32, cpu_T0, (b & 7) | REX_B(s));
5151 gen_push_v(s, cpu_T0);
2c0262af
FB
5152 break;
5153 case 0x58 ... 0x5f: /* pop */
8e31d234 5154 ot = gen_pop_T0(s);
77729c24 5155 /* NOTE: order is important for pop %sp */
8e31d234 5156 gen_pop_update(s, ot);
1d1cc4d0 5157 gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T0);
2c0262af
FB
5158 break;
5159 case 0x60: /* pusha */
14ce26e7
FB
5160 if (CODE64(s))
5161 goto illegal_op;
2c0262af
FB
5162 gen_pusha(s);
5163 break;
5164 case 0x61: /* popa */
14ce26e7
FB
5165 if (CODE64(s))
5166 goto illegal_op;
2c0262af
FB
5167 gen_popa(s);
5168 break;
5169 case 0x68: /* push Iv */
5170 case 0x6a:
ab4e4aec 5171 ot = mo_pushpop(s, dflag);
2c0262af 5172 if (b == 0x68)
0af10c86 5173 val = insn_get(env, s, ot);
2c0262af 5174 else
4ba9938c 5175 val = (int8_t)insn_get(env, s, MO_8);
1d1cc4d0
RH
5176 tcg_gen_movi_tl(cpu_T0, val);
5177 gen_push_v(s, cpu_T0);
2c0262af
FB
5178 break;
5179 case 0x8f: /* pop Ev */
0af10c86 5180 modrm = cpu_ldub_code(env, s->pc++);
77729c24 5181 mod = (modrm >> 6) & 3;
8e31d234 5182 ot = gen_pop_T0(s);
77729c24
FB
5183 if (mod == 3) {
5184 /* NOTE: order is important for pop %sp */
8e31d234 5185 gen_pop_update(s, ot);
14ce26e7 5186 rm = (modrm & 7) | REX_B(s);
1d1cc4d0 5187 gen_op_mov_reg_v(ot, rm, cpu_T0);
77729c24
FB
5188 } else {
5189 /* NOTE: order is important too for MMU exceptions */
14ce26e7 5190 s->popl_esp_hack = 1 << ot;
0af10c86 5191 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
77729c24 5192 s->popl_esp_hack = 0;
8e31d234 5193 gen_pop_update(s, ot);
77729c24 5194 }
2c0262af
FB
5195 break;
5196 case 0xc8: /* enter */
5197 {
5198 int level;
0af10c86 5199 val = cpu_lduw_code(env, s->pc);
2c0262af 5200 s->pc += 2;
0af10c86 5201 level = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5202 gen_enter(s, val, level);
5203 }
5204 break;
5205 case 0xc9: /* leave */
2045f04c 5206 gen_leave(s);
2c0262af
FB
5207 break;
5208 case 0x06: /* push es */
5209 case 0x0e: /* push cs */
5210 case 0x16: /* push ss */
5211 case 0x1e: /* push ds */
14ce26e7
FB
5212 if (CODE64(s))
5213 goto illegal_op;
2c0262af 5214 gen_op_movl_T0_seg(b >> 3);
1d1cc4d0 5215 gen_push_v(s, cpu_T0);
2c0262af
FB
5216 break;
5217 case 0x1a0: /* push fs */
5218 case 0x1a8: /* push gs */
5219 gen_op_movl_T0_seg((b >> 3) & 7);
1d1cc4d0 5220 gen_push_v(s, cpu_T0);
2c0262af
FB
5221 break;
5222 case 0x07: /* pop es */
5223 case 0x17: /* pop ss */
5224 case 0x1f: /* pop ds */
14ce26e7
FB
5225 if (CODE64(s))
5226 goto illegal_op;
2c0262af 5227 reg = b >> 3;
8e31d234 5228 ot = gen_pop_T0(s);
100ec099 5229 gen_movl_seg_T0(s, reg);
8e31d234 5230 gen_pop_update(s, ot);
f083d92c 5231 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
2c0262af 5232 if (s->is_jmp) {
14ce26e7 5233 gen_jmp_im(s->pc - s->cs_base);
f083d92c
RH
5234 if (reg == R_SS) {
5235 s->tf = 0;
5236 gen_eob_inhibit_irq(s, true);
5237 } else {
5238 gen_eob(s);
5239 }
2c0262af
FB
5240 }
5241 break;
5242 case 0x1a1: /* pop fs */
5243 case 0x1a9: /* pop gs */
8e31d234 5244 ot = gen_pop_T0(s);
100ec099 5245 gen_movl_seg_T0(s, (b >> 3) & 7);
8e31d234 5246 gen_pop_update(s, ot);
2c0262af 5247 if (s->is_jmp) {
14ce26e7 5248 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5249 gen_eob(s);
5250 }
5251 break;
5252
5253 /**************************/
5254 /* mov */
5255 case 0x88:
5256 case 0x89: /* mov Gv, Ev */
ab4e4aec 5257 ot = mo_b_d(b, dflag);
0af10c86 5258 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5259 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5260
2c0262af 5261 /* generate a generic store */
0af10c86 5262 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
2c0262af
FB
5263 break;
5264 case 0xc6:
5265 case 0xc7: /* mov Ev, Iv */
ab4e4aec 5266 ot = mo_b_d(b, dflag);
0af10c86 5267 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5268 mod = (modrm >> 6) & 3;
14ce26e7
FB
5269 if (mod != 3) {
5270 s->rip_offset = insn_const_size(ot);
4eeb3939 5271 gen_lea_modrm(env, s, modrm);
14ce26e7 5272 }
0af10c86 5273 val = insn_get(env, s, ot);
1d1cc4d0 5274 tcg_gen_movi_tl(cpu_T0, val);
fd8ca9f6 5275 if (mod != 3) {
1d1cc4d0 5276 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
fd8ca9f6 5277 } else {
1d1cc4d0 5278 gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T0);
fd8ca9f6 5279 }
2c0262af
FB
5280 break;
5281 case 0x8a:
5282 case 0x8b: /* mov Ev, Gv */
ab4e4aec 5283 ot = mo_b_d(b, dflag);
0af10c86 5284 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5285 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5286
0af10c86 5287 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
1d1cc4d0 5288 gen_op_mov_reg_v(ot, reg, cpu_T0);
2c0262af
FB
5289 break;
5290 case 0x8e: /* mov seg, Gv */
0af10c86 5291 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5292 reg = (modrm >> 3) & 7;
5293 if (reg >= 6 || reg == R_CS)
5294 goto illegal_op;
4ba9938c 5295 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
100ec099 5296 gen_movl_seg_T0(s, reg);
f083d92c 5297 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
2c0262af 5298 if (s->is_jmp) {
14ce26e7 5299 gen_jmp_im(s->pc - s->cs_base);
f083d92c
RH
5300 if (reg == R_SS) {
5301 s->tf = 0;
5302 gen_eob_inhibit_irq(s, true);
5303 } else {
5304 gen_eob(s);
5305 }
2c0262af
FB
5306 }
5307 break;
5308 case 0x8c: /* mov Gv, seg */
0af10c86 5309 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5310 reg = (modrm >> 3) & 7;
5311 mod = (modrm >> 6) & 3;
5312 if (reg >= 6)
5313 goto illegal_op;
5314 gen_op_movl_T0_seg(reg);
ab4e4aec 5315 ot = mod == 3 ? dflag : MO_16;
0af10c86 5316 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
5317 break;
5318
5319 case 0x1b6: /* movzbS Gv, Eb */
5320 case 0x1b7: /* movzwS Gv, Eb */
5321 case 0x1be: /* movsbS Gv, Eb */
5322 case 0x1bf: /* movswS Gv, Eb */
5323 {
c8fbc479
RH
5324 TCGMemOp d_ot;
5325 TCGMemOp s_ot;
5326
2c0262af 5327 /* d_ot is the size of destination */
ab4e4aec 5328 d_ot = dflag;
2c0262af 5329 /* ot is the size of source */
4ba9938c 5330 ot = (b & 1) + MO_8;
c8fbc479
RH
5331 /* s_ot is the sign+size of source */
5332 s_ot = b & 8 ? MO_SIGN | ot : ot;
5333
0af10c86 5334 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5335 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 5336 mod = (modrm >> 6) & 3;
14ce26e7 5337 rm = (modrm & 7) | REX_B(s);
3b46e624 5338
2c0262af 5339 if (mod == 3) {
1d1cc4d0 5340 gen_op_mov_v_reg(ot, cpu_T0, rm);
c8fbc479
RH
5341 switch (s_ot) {
5342 case MO_UB:
1d1cc4d0 5343 tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
2c0262af 5344 break;
c8fbc479 5345 case MO_SB:
1d1cc4d0 5346 tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
2c0262af 5347 break;
c8fbc479 5348 case MO_UW:
1d1cc4d0 5349 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
2c0262af
FB
5350 break;
5351 default:
c8fbc479 5352 case MO_SW:
1d1cc4d0 5353 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
2c0262af
FB
5354 break;
5355 }
1d1cc4d0 5356 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
2c0262af 5357 } else {
4eeb3939 5358 gen_lea_modrm(env, s, modrm);
1d1cc4d0
RH
5359 gen_op_ld_v(s, s_ot, cpu_T0, cpu_A0);
5360 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
2c0262af
FB
5361 }
5362 }
5363 break;
5364
5365 case 0x8d: /* lea */
0af10c86 5366 modrm = cpu_ldub_code(env, s->pc++);
3a1d9b8b
FB
5367 mod = (modrm >> 6) & 3;
5368 if (mod == 3)
5369 goto illegal_op;
14ce26e7 5370 reg = ((modrm >> 3) & 7) | rex_r;
a074ce42
RH
5371 {
5372 AddressParts a = gen_lea_modrm_0(env, s, modrm);
5373 TCGv ea = gen_lea_modrm_1(a);
5374 gen_op_mov_reg_v(dflag, reg, ea);
5375 }
2c0262af 5376 break;
3b46e624 5377
2c0262af
FB
5378 case 0xa0: /* mov EAX, Ov */
5379 case 0xa1:
5380 case 0xa2: /* mov Ov, EAX */
5381 case 0xa3:
2c0262af 5382 {
14ce26e7
FB
5383 target_ulong offset_addr;
5384
ab4e4aec 5385 ot = mo_b_d(b, dflag);
1d71ddb1 5386 switch (s->aflag) {
14ce26e7 5387#ifdef TARGET_X86_64
1d71ddb1 5388 case MO_64:
0af10c86 5389 offset_addr = cpu_ldq_code(env, s->pc);
14ce26e7 5390 s->pc += 8;
1d71ddb1 5391 break;
14ce26e7 5392#endif
1d71ddb1
RH
5393 default:
5394 offset_addr = insn_get(env, s, s->aflag);
5395 break;
14ce26e7 5396 }
3250cff8 5397 tcg_gen_movi_tl(cpu_A0, offset_addr);
664e0f19 5398 gen_add_A0_ds_seg(s);
14ce26e7 5399 if ((b & 2) == 0) {
1d1cc4d0
RH
5400 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
5401 gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
14ce26e7 5402 } else {
1d1cc4d0
RH
5403 gen_op_mov_v_reg(ot, cpu_T0, R_EAX);
5404 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
2c0262af
FB
5405 }
5406 }
2c0262af
FB
5407 break;
5408 case 0xd7: /* xlat */
1d71ddb1 5409 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
1d1cc4d0
RH
5410 tcg_gen_ext8u_tl(cpu_T0, cpu_regs[R_EAX]);
5411 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T0);
1d71ddb1 5412 gen_extu(s->aflag, cpu_A0);
664e0f19 5413 gen_add_A0_ds_seg(s);
1d1cc4d0
RH
5414 gen_op_ld_v(s, MO_8, cpu_T0, cpu_A0);
5415 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
2c0262af
FB
5416 break;
5417 case 0xb0 ... 0xb7: /* mov R, Ib */
4ba9938c 5418 val = insn_get(env, s, MO_8);
1d1cc4d0
RH
5419 tcg_gen_movi_tl(cpu_T0, val);
5420 gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T0);
2c0262af
FB
5421 break;
5422 case 0xb8 ... 0xbf: /* mov R, Iv */
14ce26e7 5423#ifdef TARGET_X86_64
ab4e4aec 5424 if (dflag == MO_64) {
14ce26e7
FB
5425 uint64_t tmp;
5426 /* 64 bit case */
0af10c86 5427 tmp = cpu_ldq_code(env, s->pc);
14ce26e7
FB
5428 s->pc += 8;
5429 reg = (b & 7) | REX_B(s);
1d1cc4d0
RH
5430 tcg_gen_movi_tl(cpu_T0, tmp);
5431 gen_op_mov_reg_v(MO_64, reg, cpu_T0);
5fafdf24 5432 } else
14ce26e7
FB
5433#endif
5434 {
ab4e4aec 5435 ot = dflag;
0af10c86 5436 val = insn_get(env, s, ot);
14ce26e7 5437 reg = (b & 7) | REX_B(s);
1d1cc4d0
RH
5438 tcg_gen_movi_tl(cpu_T0, val);
5439 gen_op_mov_reg_v(ot, reg, cpu_T0);
14ce26e7 5440 }
2c0262af
FB
5441 break;
5442
5443 case 0x91 ... 0x97: /* xchg R, EAX */
7418027e 5444 do_xchg_reg_eax:
ab4e4aec 5445 ot = dflag;
14ce26e7 5446 reg = (b & 7) | REX_B(s);
2c0262af
FB
5447 rm = R_EAX;
5448 goto do_xchg_reg;
5449 case 0x86:
5450 case 0x87: /* xchg Ev, Gv */
ab4e4aec 5451 ot = mo_b_d(b, dflag);
0af10c86 5452 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5453 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5454 mod = (modrm >> 6) & 3;
5455 if (mod == 3) {
14ce26e7 5456 rm = (modrm & 7) | REX_B(s);
2c0262af 5457 do_xchg_reg:
1d1cc4d0
RH
5458 gen_op_mov_v_reg(ot, cpu_T0, reg);
5459 gen_op_mov_v_reg(ot, cpu_T1, rm);
5460 gen_op_mov_reg_v(ot, rm, cpu_T0);
5461 gen_op_mov_reg_v(ot, reg, cpu_T1);
2c0262af 5462 } else {
4eeb3939 5463 gen_lea_modrm(env, s, modrm);
1d1cc4d0 5464 gen_op_mov_v_reg(ot, cpu_T0, reg);
2c0262af
FB
5465 /* for xchg, lock is implicit */
5466 if (!(prefixes & PREFIX_LOCK))
a7812ae4 5467 gen_helper_lock();
1d1cc4d0
RH
5468 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
5469 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
2c0262af 5470 if (!(prefixes & PREFIX_LOCK))
a7812ae4 5471 gen_helper_unlock();
1d1cc4d0 5472 gen_op_mov_reg_v(ot, reg, cpu_T1);
2c0262af
FB
5473 }
5474 break;
5475 case 0xc4: /* les Gv */
701ed211 5476 /* In CODE64 this is VEX3; see above. */
2c0262af
FB
5477 op = R_ES;
5478 goto do_lxx;
5479 case 0xc5: /* lds Gv */
701ed211 5480 /* In CODE64 this is VEX2; see above. */
2c0262af
FB
5481 op = R_DS;
5482 goto do_lxx;
5483 case 0x1b2: /* lss Gv */
5484 op = R_SS;
5485 goto do_lxx;
5486 case 0x1b4: /* lfs Gv */
5487 op = R_FS;
5488 goto do_lxx;
5489 case 0x1b5: /* lgs Gv */
5490 op = R_GS;
5491 do_lxx:
ab4e4aec 5492 ot = dflag != MO_16 ? MO_32 : MO_16;
0af10c86 5493 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5494 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5495 mod = (modrm >> 6) & 3;
5496 if (mod == 3)
5497 goto illegal_op;
4eeb3939 5498 gen_lea_modrm(env, s, modrm);
1d1cc4d0 5499 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
830a19a4 5500 gen_add_A0_im(s, 1 << ot);
2c0262af 5501 /* load the segment first to handle exceptions properly */
1d1cc4d0 5502 gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
100ec099 5503 gen_movl_seg_T0(s, op);
2c0262af 5504 /* then put the data */
1d1cc4d0 5505 gen_op_mov_reg_v(ot, reg, cpu_T1);
2c0262af 5506 if (s->is_jmp) {
14ce26e7 5507 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5508 gen_eob(s);
5509 }
5510 break;
3b46e624 5511
2c0262af
FB
5512 /************************/
5513 /* shifts */
5514 case 0xc0:
5515 case 0xc1:
5516 /* shift Ev,Ib */
5517 shift = 2;
5518 grp2:
5519 {
ab4e4aec 5520 ot = mo_b_d(b, dflag);
0af10c86 5521 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5522 mod = (modrm >> 6) & 3;
2c0262af 5523 op = (modrm >> 3) & 7;
3b46e624 5524
2c0262af 5525 if (mod != 3) {
14ce26e7
FB
5526 if (shift == 2) {
5527 s->rip_offset = 1;
5528 }
4eeb3939 5529 gen_lea_modrm(env, s, modrm);
2c0262af
FB
5530 opreg = OR_TMP0;
5531 } else {
14ce26e7 5532 opreg = (modrm & 7) | REX_B(s);
2c0262af
FB
5533 }
5534
5535 /* simpler op */
5536 if (shift == 0) {
5537 gen_shift(s, op, ot, opreg, OR_ECX);
5538 } else {
5539 if (shift == 2) {
0af10c86 5540 shift = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5541 }
5542 gen_shifti(s, op, ot, opreg, shift);
5543 }
5544 }
5545 break;
5546 case 0xd0:
5547 case 0xd1:
5548 /* shift Ev,1 */
5549 shift = 1;
5550 goto grp2;
5551 case 0xd2:
5552 case 0xd3:
5553 /* shift Ev,cl */
5554 shift = 0;
5555 goto grp2;
5556
5557 case 0x1a4: /* shld imm */
5558 op = 0;
5559 shift = 1;
5560 goto do_shiftd;
5561 case 0x1a5: /* shld cl */
5562 op = 0;
5563 shift = 0;
5564 goto do_shiftd;
5565 case 0x1ac: /* shrd imm */
5566 op = 1;
5567 shift = 1;
5568 goto do_shiftd;
5569 case 0x1ad: /* shrd cl */
5570 op = 1;
5571 shift = 0;
5572 do_shiftd:
ab4e4aec 5573 ot = dflag;
0af10c86 5574 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5575 mod = (modrm >> 6) & 3;
14ce26e7
FB
5576 rm = (modrm & 7) | REX_B(s);
5577 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 5578 if (mod != 3) {
4eeb3939 5579 gen_lea_modrm(env, s, modrm);
b6abf97d 5580 opreg = OR_TMP0;
2c0262af 5581 } else {
b6abf97d 5582 opreg = rm;
2c0262af 5583 }
1d1cc4d0 5584 gen_op_mov_v_reg(ot, cpu_T1, reg);
3b46e624 5585
2c0262af 5586 if (shift) {
3b9d3cf1
PB
5587 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5588 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5589 tcg_temp_free(imm);
2c0262af 5590 } else {
3b9d3cf1 5591 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
2c0262af
FB
5592 }
5593 break;
5594
5595 /************************/
5596 /* floats */
5fafdf24 5597 case 0xd8 ... 0xdf:
7eee2a50
FB
5598 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5599 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5600 /* XXX: what to do if illegal op ? */
5601 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5602 break;
5603 }
0af10c86 5604 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5605 mod = (modrm >> 6) & 3;
5606 rm = modrm & 7;
5607 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2c0262af
FB
5608 if (mod != 3) {
5609 /* memory op */
4eeb3939 5610 gen_lea_modrm(env, s, modrm);
2c0262af
FB
5611 switch(op) {
5612 case 0x00 ... 0x07: /* fxxxs */
5613 case 0x10 ... 0x17: /* fixxxl */
5614 case 0x20 ... 0x27: /* fxxxl */
5615 case 0x30 ... 0x37: /* fixxx */
5616 {
5617 int op1;
5618 op1 = op & 7;
5619
5620 switch(op >> 4) {
5621 case 0:
80b02013
RH
5622 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5623 s->mem_index, MO_LEUL);
d3eb5eae 5624 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5625 break;
5626 case 1:
80b02013
RH
5627 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5628 s->mem_index, MO_LEUL);
d3eb5eae 5629 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5630 break;
5631 case 2:
3c5f4116
RH
5632 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5633 s->mem_index, MO_LEQ);
d3eb5eae 5634 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
5635 break;
5636 case 3:
5637 default:
80b02013
RH
5638 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5639 s->mem_index, MO_LESW);
d3eb5eae 5640 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5641 break;
5642 }
3b46e624 5643
a7812ae4 5644 gen_helper_fp_arith_ST0_FT0(op1);
2c0262af
FB
5645 if (op1 == 3) {
5646 /* fcomp needs pop */
d3eb5eae 5647 gen_helper_fpop(cpu_env);
2c0262af
FB
5648 }
5649 }
5650 break;
5651 case 0x08: /* flds */
5652 case 0x0a: /* fsts */
5653 case 0x0b: /* fstps */
465e9838
FB
5654 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5655 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5656 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2c0262af
FB
5657 switch(op & 7) {
5658 case 0:
5659 switch(op >> 4) {
5660 case 0:
80b02013
RH
5661 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5662 s->mem_index, MO_LEUL);
d3eb5eae 5663 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5664 break;
5665 case 1:
80b02013
RH
5666 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5667 s->mem_index, MO_LEUL);
d3eb5eae 5668 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5669 break;
5670 case 2:
3c5f4116
RH
5671 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5672 s->mem_index, MO_LEQ);
d3eb5eae 5673 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
5674 break;
5675 case 3:
5676 default:
80b02013
RH
5677 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5678 s->mem_index, MO_LESW);
d3eb5eae 5679 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5680 break;
5681 }
5682 break;
465e9838 5683 case 1:
19e6c4b8 5684 /* XXX: the corresponding CPUID bit must be tested ! */
465e9838
FB
5685 switch(op >> 4) {
5686 case 1:
d3eb5eae 5687 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5688 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5689 s->mem_index, MO_LEUL);
465e9838
FB
5690 break;
5691 case 2:
d3eb5eae 5692 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
3523e4bd
RH
5693 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5694 s->mem_index, MO_LEQ);
465e9838
FB
5695 break;
5696 case 3:
5697 default:
d3eb5eae 5698 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5699 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5700 s->mem_index, MO_LEUW);
19e6c4b8 5701 break;
465e9838 5702 }
d3eb5eae 5703 gen_helper_fpop(cpu_env);
465e9838 5704 break;
2c0262af
FB
5705 default:
5706 switch(op >> 4) {
5707 case 0:
d3eb5eae 5708 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5709 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5710 s->mem_index, MO_LEUL);
2c0262af
FB
5711 break;
5712 case 1:
d3eb5eae 5713 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5714 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5715 s->mem_index, MO_LEUL);
2c0262af
FB
5716 break;
5717 case 2:
d3eb5eae 5718 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
3523e4bd
RH
5719 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5720 s->mem_index, MO_LEQ);
2c0262af
FB
5721 break;
5722 case 3:
5723 default:
d3eb5eae 5724 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5725 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5726 s->mem_index, MO_LEUW);
2c0262af
FB
5727 break;
5728 }
5729 if ((op & 7) == 3)
d3eb5eae 5730 gen_helper_fpop(cpu_env);
2c0262af
FB
5731 break;
5732 }
5733 break;
5734 case 0x0c: /* fldenv mem */
ab4e4aec 5735 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
2c0262af
FB
5736 break;
5737 case 0x0d: /* fldcw mem */
80b02013
RH
5738 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5739 s->mem_index, MO_LEUW);
d3eb5eae 5740 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5741 break;
5742 case 0x0e: /* fnstenv mem */
ab4e4aec 5743 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
2c0262af
FB
5744 break;
5745 case 0x0f: /* fnstcw mem */
d3eb5eae 5746 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5747 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5748 s->mem_index, MO_LEUW);
2c0262af
FB
5749 break;
5750 case 0x1d: /* fldt mem */
d3eb5eae 5751 gen_helper_fldt_ST0(cpu_env, cpu_A0);
2c0262af
FB
5752 break;
5753 case 0x1f: /* fstpt mem */
d3eb5eae
BS
5754 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5755 gen_helper_fpop(cpu_env);
2c0262af
FB
5756 break;
5757 case 0x2c: /* frstor mem */
ab4e4aec 5758 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
2c0262af
FB
5759 break;
5760 case 0x2e: /* fnsave mem */
ab4e4aec 5761 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
2c0262af
FB
5762 break;
5763 case 0x2f: /* fnstsw mem */
d3eb5eae 5764 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5765 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5766 s->mem_index, MO_LEUW);
2c0262af
FB
5767 break;
5768 case 0x3c: /* fbld */
d3eb5eae 5769 gen_helper_fbld_ST0(cpu_env, cpu_A0);
2c0262af
FB
5770 break;
5771 case 0x3e: /* fbstp */
d3eb5eae
BS
5772 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5773 gen_helper_fpop(cpu_env);
2c0262af
FB
5774 break;
5775 case 0x3d: /* fildll */
3c5f4116 5776 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
d3eb5eae 5777 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
5778 break;
5779 case 0x3f: /* fistpll */
d3eb5eae 5780 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
3523e4bd 5781 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
d3eb5eae 5782 gen_helper_fpop(cpu_env);
2c0262af
FB
5783 break;
5784 default:
b9f9c5b4 5785 goto unknown_op;
2c0262af
FB
5786 }
5787 } else {
5788 /* register float ops */
5789 opreg = rm;
5790
5791 switch(op) {
5792 case 0x08: /* fld sti */
d3eb5eae
BS
5793 gen_helper_fpush(cpu_env);
5794 gen_helper_fmov_ST0_STN(cpu_env,
5795 tcg_const_i32((opreg + 1) & 7));
2c0262af
FB
5796 break;
5797 case 0x09: /* fxchg sti */
c169c906
FB
5798 case 0x29: /* fxchg4 sti, undocumented op */
5799 case 0x39: /* fxchg7 sti, undocumented op */
d3eb5eae 5800 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
2c0262af
FB
5801 break;
5802 case 0x0a: /* grp d9/2 */
5803 switch(rm) {
5804 case 0: /* fnop */
023fe10d 5805 /* check exceptions (FreeBSD FPU probe) */
d3eb5eae 5806 gen_helper_fwait(cpu_env);
2c0262af
FB
5807 break;
5808 default:
b9f9c5b4 5809 goto unknown_op;
2c0262af
FB
5810 }
5811 break;
5812 case 0x0c: /* grp d9/4 */
5813 switch(rm) {
5814 case 0: /* fchs */
d3eb5eae 5815 gen_helper_fchs_ST0(cpu_env);
2c0262af
FB
5816 break;
5817 case 1: /* fabs */
d3eb5eae 5818 gen_helper_fabs_ST0(cpu_env);
2c0262af
FB
5819 break;
5820 case 4: /* ftst */
d3eb5eae
BS
5821 gen_helper_fldz_FT0(cpu_env);
5822 gen_helper_fcom_ST0_FT0(cpu_env);
2c0262af
FB
5823 break;
5824 case 5: /* fxam */
d3eb5eae 5825 gen_helper_fxam_ST0(cpu_env);
2c0262af
FB
5826 break;
5827 default:
b9f9c5b4 5828 goto unknown_op;
2c0262af
FB
5829 }
5830 break;
5831 case 0x0d: /* grp d9/5 */
5832 {
5833 switch(rm) {
5834 case 0:
d3eb5eae
BS
5835 gen_helper_fpush(cpu_env);
5836 gen_helper_fld1_ST0(cpu_env);
2c0262af
FB
5837 break;
5838 case 1:
d3eb5eae
BS
5839 gen_helper_fpush(cpu_env);
5840 gen_helper_fldl2t_ST0(cpu_env);
2c0262af
FB
5841 break;
5842 case 2:
d3eb5eae
BS
5843 gen_helper_fpush(cpu_env);
5844 gen_helper_fldl2e_ST0(cpu_env);
2c0262af
FB
5845 break;
5846 case 3:
d3eb5eae
BS
5847 gen_helper_fpush(cpu_env);
5848 gen_helper_fldpi_ST0(cpu_env);
2c0262af
FB
5849 break;
5850 case 4:
d3eb5eae
BS
5851 gen_helper_fpush(cpu_env);
5852 gen_helper_fldlg2_ST0(cpu_env);
2c0262af
FB
5853 break;
5854 case 5:
d3eb5eae
BS
5855 gen_helper_fpush(cpu_env);
5856 gen_helper_fldln2_ST0(cpu_env);
2c0262af
FB
5857 break;
5858 case 6:
d3eb5eae
BS
5859 gen_helper_fpush(cpu_env);
5860 gen_helper_fldz_ST0(cpu_env);
2c0262af
FB
5861 break;
5862 default:
b9f9c5b4 5863 goto unknown_op;
2c0262af
FB
5864 }
5865 }
5866 break;
5867 case 0x0e: /* grp d9/6 */
5868 switch(rm) {
5869 case 0: /* f2xm1 */
d3eb5eae 5870 gen_helper_f2xm1(cpu_env);
2c0262af
FB
5871 break;
5872 case 1: /* fyl2x */
d3eb5eae 5873 gen_helper_fyl2x(cpu_env);
2c0262af
FB
5874 break;
5875 case 2: /* fptan */
d3eb5eae 5876 gen_helper_fptan(cpu_env);
2c0262af
FB
5877 break;
5878 case 3: /* fpatan */
d3eb5eae 5879 gen_helper_fpatan(cpu_env);
2c0262af
FB
5880 break;
5881 case 4: /* fxtract */
d3eb5eae 5882 gen_helper_fxtract(cpu_env);
2c0262af
FB
5883 break;
5884 case 5: /* fprem1 */
d3eb5eae 5885 gen_helper_fprem1(cpu_env);
2c0262af
FB
5886 break;
5887 case 6: /* fdecstp */
d3eb5eae 5888 gen_helper_fdecstp(cpu_env);
2c0262af
FB
5889 break;
5890 default:
5891 case 7: /* fincstp */
d3eb5eae 5892 gen_helper_fincstp(cpu_env);
2c0262af
FB
5893 break;
5894 }
5895 break;
5896 case 0x0f: /* grp d9/7 */
5897 switch(rm) {
5898 case 0: /* fprem */
d3eb5eae 5899 gen_helper_fprem(cpu_env);
2c0262af
FB
5900 break;
5901 case 1: /* fyl2xp1 */
d3eb5eae 5902 gen_helper_fyl2xp1(cpu_env);
2c0262af
FB
5903 break;
5904 case 2: /* fsqrt */
d3eb5eae 5905 gen_helper_fsqrt(cpu_env);
2c0262af
FB
5906 break;
5907 case 3: /* fsincos */
d3eb5eae 5908 gen_helper_fsincos(cpu_env);
2c0262af
FB
5909 break;
5910 case 5: /* fscale */
d3eb5eae 5911 gen_helper_fscale(cpu_env);
2c0262af
FB
5912 break;
5913 case 4: /* frndint */
d3eb5eae 5914 gen_helper_frndint(cpu_env);
2c0262af
FB
5915 break;
5916 case 6: /* fsin */
d3eb5eae 5917 gen_helper_fsin(cpu_env);
2c0262af
FB
5918 break;
5919 default:
5920 case 7: /* fcos */
d3eb5eae 5921 gen_helper_fcos(cpu_env);
2c0262af
FB
5922 break;
5923 }
5924 break;
5925 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5926 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5927 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5928 {
5929 int op1;
3b46e624 5930
2c0262af
FB
5931 op1 = op & 7;
5932 if (op >= 0x20) {
a7812ae4 5933 gen_helper_fp_arith_STN_ST0(op1, opreg);
2c0262af 5934 if (op >= 0x30)
d3eb5eae 5935 gen_helper_fpop(cpu_env);
2c0262af 5936 } else {
d3eb5eae 5937 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
a7812ae4 5938 gen_helper_fp_arith_ST0_FT0(op1);
2c0262af
FB
5939 }
5940 }
5941 break;
5942 case 0x02: /* fcom */
c169c906 5943 case 0x22: /* fcom2, undocumented op */
d3eb5eae
BS
5944 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5945 gen_helper_fcom_ST0_FT0(cpu_env);
2c0262af
FB
5946 break;
5947 case 0x03: /* fcomp */
c169c906
FB
5948 case 0x23: /* fcomp3, undocumented op */
5949 case 0x32: /* fcomp5, undocumented op */
d3eb5eae
BS
5950 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5951 gen_helper_fcom_ST0_FT0(cpu_env);
5952 gen_helper_fpop(cpu_env);
2c0262af
FB
5953 break;
5954 case 0x15: /* da/5 */
5955 switch(rm) {
5956 case 1: /* fucompp */
d3eb5eae
BS
5957 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
5958 gen_helper_fucom_ST0_FT0(cpu_env);
5959 gen_helper_fpop(cpu_env);
5960 gen_helper_fpop(cpu_env);
2c0262af
FB
5961 break;
5962 default:
b9f9c5b4 5963 goto unknown_op;
2c0262af
FB
5964 }
5965 break;
5966 case 0x1c:
5967 switch(rm) {
5968 case 0: /* feni (287 only, just do nop here) */
5969 break;
5970 case 1: /* fdisi (287 only, just do nop here) */
5971 break;
5972 case 2: /* fclex */
d3eb5eae 5973 gen_helper_fclex(cpu_env);
2c0262af
FB
5974 break;
5975 case 3: /* fninit */
d3eb5eae 5976 gen_helper_fninit(cpu_env);
2c0262af
FB
5977 break;
5978 case 4: /* fsetpm (287 only, just do nop here) */
5979 break;
5980 default:
b9f9c5b4 5981 goto unknown_op;
2c0262af
FB
5982 }
5983 break;
5984 case 0x1d: /* fucomi */
bff93281
PM
5985 if (!(s->cpuid_features & CPUID_CMOV)) {
5986 goto illegal_op;
5987 }
773cdfcc 5988 gen_update_cc_op(s);
d3eb5eae
BS
5989 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5990 gen_helper_fucomi_ST0_FT0(cpu_env);
3ca51d07 5991 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
5992 break;
5993 case 0x1e: /* fcomi */
bff93281
PM
5994 if (!(s->cpuid_features & CPUID_CMOV)) {
5995 goto illegal_op;
5996 }
773cdfcc 5997 gen_update_cc_op(s);
d3eb5eae
BS
5998 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5999 gen_helper_fcomi_ST0_FT0(cpu_env);
3ca51d07 6000 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 6001 break;
658c8bda 6002 case 0x28: /* ffree sti */
d3eb5eae 6003 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
5fafdf24 6004 break;
2c0262af 6005 case 0x2a: /* fst sti */
d3eb5eae 6006 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
2c0262af
FB
6007 break;
6008 case 0x2b: /* fstp sti */
c169c906
FB
6009 case 0x0b: /* fstp1 sti, undocumented op */
6010 case 0x3a: /* fstp8 sti, undocumented op */
6011 case 0x3b: /* fstp9 sti, undocumented op */
d3eb5eae
BS
6012 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6013 gen_helper_fpop(cpu_env);
2c0262af
FB
6014 break;
6015 case 0x2c: /* fucom st(i) */
d3eb5eae
BS
6016 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6017 gen_helper_fucom_ST0_FT0(cpu_env);
2c0262af
FB
6018 break;
6019 case 0x2d: /* fucomp st(i) */
d3eb5eae
BS
6020 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6021 gen_helper_fucom_ST0_FT0(cpu_env);
6022 gen_helper_fpop(cpu_env);
2c0262af
FB
6023 break;
6024 case 0x33: /* de/3 */
6025 switch(rm) {
6026 case 1: /* fcompp */
d3eb5eae
BS
6027 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6028 gen_helper_fcom_ST0_FT0(cpu_env);
6029 gen_helper_fpop(cpu_env);
6030 gen_helper_fpop(cpu_env);
2c0262af
FB
6031 break;
6032 default:
b9f9c5b4 6033 goto unknown_op;
2c0262af
FB
6034 }
6035 break;
c169c906 6036 case 0x38: /* ffreep sti, undocumented op */
d3eb5eae
BS
6037 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6038 gen_helper_fpop(cpu_env);
c169c906 6039 break;
2c0262af
FB
6040 case 0x3c: /* df/4 */
6041 switch(rm) {
6042 case 0:
d3eb5eae 6043 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
1d1cc4d0
RH
6044 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
6045 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
2c0262af
FB
6046 break;
6047 default:
b9f9c5b4 6048 goto unknown_op;
2c0262af
FB
6049 }
6050 break;
6051 case 0x3d: /* fucomip */
bff93281
PM
6052 if (!(s->cpuid_features & CPUID_CMOV)) {
6053 goto illegal_op;
6054 }
773cdfcc 6055 gen_update_cc_op(s);
d3eb5eae
BS
6056 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6057 gen_helper_fucomi_ST0_FT0(cpu_env);
6058 gen_helper_fpop(cpu_env);
3ca51d07 6059 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6060 break;
6061 case 0x3e: /* fcomip */
bff93281
PM
6062 if (!(s->cpuid_features & CPUID_CMOV)) {
6063 goto illegal_op;
6064 }
773cdfcc 6065 gen_update_cc_op(s);
d3eb5eae
BS
6066 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6067 gen_helper_fcomi_ST0_FT0(cpu_env);
6068 gen_helper_fpop(cpu_env);
3ca51d07 6069 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 6070 break;
a2cc3b24
FB
6071 case 0x10 ... 0x13: /* fcmovxx */
6072 case 0x18 ... 0x1b:
6073 {
42a268c2
RH
6074 int op1;
6075 TCGLabel *l1;
d70040bc 6076 static const uint8_t fcmov_cc[8] = {
a2cc3b24
FB
6077 (JCC_B << 1),
6078 (JCC_Z << 1),
6079 (JCC_BE << 1),
6080 (JCC_P << 1),
6081 };
bff93281
PM
6082
6083 if (!(s->cpuid_features & CPUID_CMOV)) {
6084 goto illegal_op;
6085 }
1e4840bf 6086 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
19e6c4b8 6087 l1 = gen_new_label();
dc259201 6088 gen_jcc1_noeob(s, op1, l1);
d3eb5eae 6089 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
19e6c4b8 6090 gen_set_label(l1);
a2cc3b24
FB
6091 }
6092 break;
2c0262af 6093 default:
b9f9c5b4 6094 goto unknown_op;
2c0262af
FB
6095 }
6096 }
6097 break;
6098 /************************/
6099 /* string ops */
6100
6101 case 0xa4: /* movsS */
6102 case 0xa5:
ab4e4aec 6103 ot = mo_b_d(b, dflag);
2c0262af
FB
6104 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6105 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6106 } else {
6107 gen_movs(s, ot);
6108 }
6109 break;
3b46e624 6110
2c0262af
FB
6111 case 0xaa: /* stosS */
6112 case 0xab:
ab4e4aec 6113 ot = mo_b_d(b, dflag);
2c0262af
FB
6114 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6115 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6116 } else {
6117 gen_stos(s, ot);
6118 }
6119 break;
6120 case 0xac: /* lodsS */
6121 case 0xad:
ab4e4aec 6122 ot = mo_b_d(b, dflag);
2c0262af
FB
6123 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6124 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6125 } else {
6126 gen_lods(s, ot);
6127 }
6128 break;
6129 case 0xae: /* scasS */
6130 case 0xaf:
ab4e4aec 6131 ot = mo_b_d(b, dflag);
2c0262af
FB
6132 if (prefixes & PREFIX_REPNZ) {
6133 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6134 } else if (prefixes & PREFIX_REPZ) {
6135 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6136 } else {
6137 gen_scas(s, ot);
2c0262af
FB
6138 }
6139 break;
6140
6141 case 0xa6: /* cmpsS */
6142 case 0xa7:
ab4e4aec 6143 ot = mo_b_d(b, dflag);
2c0262af
FB
6144 if (prefixes & PREFIX_REPNZ) {
6145 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6146 } else if (prefixes & PREFIX_REPZ) {
6147 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6148 } else {
6149 gen_cmps(s, ot);
2c0262af
FB
6150 }
6151 break;
6152 case 0x6c: /* insS */
6153 case 0x6d:
ab4e4aec 6154 ot = mo_b_d32(b, dflag);
1d1cc4d0 6155 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
b8b6a50b
FB
6156 gen_check_io(s, ot, pc_start - s->cs_base,
6157 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
f115e911
FB
6158 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6159 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
2c0262af 6160 } else {
f115e911 6161 gen_ins(s, ot);
bd79255d 6162 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef
PB
6163 gen_jmp(s, s->pc - s->cs_base);
6164 }
2c0262af
FB
6165 }
6166 break;
6167 case 0x6e: /* outsS */
6168 case 0x6f:
ab4e4aec 6169 ot = mo_b_d32(b, dflag);
1d1cc4d0 6170 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
b8b6a50b
FB
6171 gen_check_io(s, ot, pc_start - s->cs_base,
6172 svm_is_rep(prefixes) | 4);
f115e911
FB
6173 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6174 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
2c0262af 6175 } else {
f115e911 6176 gen_outs(s, ot);
bd79255d 6177 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef
PB
6178 gen_jmp(s, s->pc - s->cs_base);
6179 }
2c0262af
FB
6180 }
6181 break;
6182
6183 /************************/
6184 /* port I/O */
0573fbfc 6185
2c0262af
FB
6186 case 0xe4:
6187 case 0xe5:
ab4e4aec 6188 ot = mo_b_d32(b, dflag);
0af10c86 6189 val = cpu_ldub_code(env, s->pc++);
1d1cc4d0 6190 tcg_gen_movi_tl(cpu_T0, val);
b8b6a50b
FB
6191 gen_check_io(s, ot, pc_start - s->cs_base,
6192 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
bd79255d 6193 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 6194 gen_io_start();
bd79255d 6195 }
1b90d56e 6196 tcg_gen_movi_i32(cpu_tmp2_i32, val);
1d1cc4d0
RH
6197 gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
6198 gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
5223a942 6199 gen_bpt_io(s, cpu_tmp2_i32, ot);
bd79255d 6200 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef
PB
6201 gen_io_end();
6202 gen_jmp(s, s->pc - s->cs_base);
6203 }
2c0262af
FB
6204 break;
6205 case 0xe6:
6206 case 0xe7:
ab4e4aec 6207 ot = mo_b_d32(b, dflag);
0af10c86 6208 val = cpu_ldub_code(env, s->pc++);
1d1cc4d0 6209 tcg_gen_movi_tl(cpu_T0, val);
b8b6a50b
FB
6210 gen_check_io(s, ot, pc_start - s->cs_base,
6211 svm_is_rep(prefixes));
1d1cc4d0 6212 gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
b8b6a50b 6213
bd79255d 6214 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 6215 gen_io_start();
bd79255d 6216 }
1b90d56e 6217 tcg_gen_movi_i32(cpu_tmp2_i32, val);
1d1cc4d0 6218 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
a7812ae4 6219 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
5223a942 6220 gen_bpt_io(s, cpu_tmp2_i32, ot);
bd79255d 6221 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef
PB
6222 gen_io_end();
6223 gen_jmp(s, s->pc - s->cs_base);
6224 }
2c0262af
FB
6225 break;
6226 case 0xec:
6227 case 0xed:
ab4e4aec 6228 ot = mo_b_d32(b, dflag);
1d1cc4d0 6229 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
b8b6a50b
FB
6230 gen_check_io(s, ot, pc_start - s->cs_base,
6231 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
bd79255d 6232 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 6233 gen_io_start();
bd79255d 6234 }
1d1cc4d0
RH
6235 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
6236 gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
6237 gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
5223a942 6238 gen_bpt_io(s, cpu_tmp2_i32, ot);
bd79255d 6239 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef
PB
6240 gen_io_end();
6241 gen_jmp(s, s->pc - s->cs_base);
6242 }
2c0262af
FB
6243 break;
6244 case 0xee:
6245 case 0xef:
ab4e4aec 6246 ot = mo_b_d32(b, dflag);
1d1cc4d0 6247 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
b8b6a50b
FB
6248 gen_check_io(s, ot, pc_start - s->cs_base,
6249 svm_is_rep(prefixes));
1d1cc4d0 6250 gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
b8b6a50b 6251
bd79255d 6252 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 6253 gen_io_start();
bd79255d 6254 }
1d1cc4d0
RH
6255 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
6256 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
a7812ae4 6257 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
5223a942 6258 gen_bpt_io(s, cpu_tmp2_i32, ot);
bd79255d 6259 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef
PB
6260 gen_io_end();
6261 gen_jmp(s, s->pc - s->cs_base);
6262 }
2c0262af
FB
6263 break;
6264
6265 /************************/
6266 /* control */
6267 case 0xc2: /* ret im */
0af10c86 6268 val = cpu_ldsw_code(env, s->pc);
2c0262af 6269 s->pc += 2;
8e31d234
RH
6270 ot = gen_pop_T0(s);
6271 gen_stack_update(s, val + (1 << ot));
6272 /* Note that gen_pop_T0 uses a zero-extending load. */
1d1cc4d0 6273 gen_op_jmp_v(cpu_T0);
7d117ce8 6274 gen_bnd_jmp(s);
2c0262af
FB
6275 gen_eob(s);
6276 break;
6277 case 0xc3: /* ret */
8e31d234
RH
6278 ot = gen_pop_T0(s);
6279 gen_pop_update(s, ot);
6280 /* Note that gen_pop_T0 uses a zero-extending load. */
1d1cc4d0 6281 gen_op_jmp_v(cpu_T0);
7d117ce8 6282 gen_bnd_jmp(s);
2c0262af
FB
6283 gen_eob(s);
6284 break;
6285 case 0xca: /* lret im */
0af10c86 6286 val = cpu_ldsw_code(env, s->pc);
2c0262af
FB
6287 s->pc += 2;
6288 do_lret:
6289 if (s->pe && !s->vm86) {
773cdfcc 6290 gen_update_cc_op(s);
14ce26e7 6291 gen_jmp_im(pc_start - s->cs_base);
ab4e4aec 6292 gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
a7812ae4 6293 tcg_const_i32(val));
2c0262af
FB
6294 } else {
6295 gen_stack_A0(s);
6296 /* pop offset */
1d1cc4d0 6297 gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
2c0262af
FB
6298 /* NOTE: keeping EIP updated is not a problem in case of
6299 exception */
1d1cc4d0 6300 gen_op_jmp_v(cpu_T0);
2c0262af 6301 /* pop selector */
4e85057b 6302 gen_add_A0_im(s, 1 << dflag);
1d1cc4d0 6303 gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
3bd7da9e 6304 gen_op_movl_seg_T0_vm(R_CS);
2c0262af 6305 /* add stack offset */
ab4e4aec 6306 gen_stack_update(s, val + (2 << dflag));
2c0262af
FB
6307 }
6308 gen_eob(s);
6309 break;
6310 case 0xcb: /* lret */
6311 val = 0;
6312 goto do_lret;
6313 case 0xcf: /* iret */
872929aa 6314 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
2c0262af
FB
6315 if (!s->pe) {
6316 /* real mode */
ab4e4aec 6317 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
3ca51d07 6318 set_cc_op(s, CC_OP_EFLAGS);
f115e911
FB
6319 } else if (s->vm86) {
6320 if (s->iopl != 3) {
6321 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6322 } else {
ab4e4aec 6323 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
3ca51d07 6324 set_cc_op(s, CC_OP_EFLAGS);
f115e911 6325 }
2c0262af 6326 } else {
ab4e4aec 6327 gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
a7812ae4 6328 tcg_const_i32(s->pc - s->cs_base));
3ca51d07 6329 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6330 }
6331 gen_eob(s);
6332 break;
6333 case 0xe8: /* call im */
6334 {
ab4e4aec 6335 if (dflag != MO_16) {
4ba9938c 6336 tval = (int32_t)insn_get(env, s, MO_32);
ab4e4aec 6337 } else {
4ba9938c 6338 tval = (int16_t)insn_get(env, s, MO_16);
ab4e4aec 6339 }
2c0262af 6340 next_eip = s->pc - s->cs_base;
14ce26e7 6341 tval += next_eip;
ab4e4aec 6342 if (dflag == MO_16) {
14ce26e7 6343 tval &= 0xffff;
ab4e4aec 6344 } else if (!CODE64(s)) {
99596385 6345 tval &= 0xffffffff;
ab4e4aec 6346 }
1d1cc4d0
RH
6347 tcg_gen_movi_tl(cpu_T0, next_eip);
6348 gen_push_v(s, cpu_T0);
7d117ce8 6349 gen_bnd_jmp(s);
14ce26e7 6350 gen_jmp(s, tval);
2c0262af
FB
6351 }
6352 break;
6353 case 0x9a: /* lcall im */
6354 {
6355 unsigned int selector, offset;
3b46e624 6356
14ce26e7
FB
6357 if (CODE64(s))
6358 goto illegal_op;
ab4e4aec 6359 ot = dflag;
0af10c86 6360 offset = insn_get(env, s, ot);
4ba9938c 6361 selector = insn_get(env, s, MO_16);
3b46e624 6362
1d1cc4d0
RH
6363 tcg_gen_movi_tl(cpu_T0, selector);
6364 tcg_gen_movi_tl(cpu_T1, offset);
2c0262af
FB
6365 }
6366 goto do_lcall;
ecada8a2 6367 case 0xe9: /* jmp im */
ab4e4aec 6368 if (dflag != MO_16) {
4ba9938c 6369 tval = (int32_t)insn_get(env, s, MO_32);
ab4e4aec 6370 } else {
4ba9938c 6371 tval = (int16_t)insn_get(env, s, MO_16);
ab4e4aec 6372 }
14ce26e7 6373 tval += s->pc - s->cs_base;
ab4e4aec 6374 if (dflag == MO_16) {
14ce26e7 6375 tval &= 0xffff;
ab4e4aec 6376 } else if (!CODE64(s)) {
32938e12 6377 tval &= 0xffffffff;
ab4e4aec 6378 }
7d117ce8 6379 gen_bnd_jmp(s);
14ce26e7 6380 gen_jmp(s, tval);
2c0262af
FB
6381 break;
6382 case 0xea: /* ljmp im */
6383 {
6384 unsigned int selector, offset;
6385
14ce26e7
FB
6386 if (CODE64(s))
6387 goto illegal_op;
ab4e4aec 6388 ot = dflag;
0af10c86 6389 offset = insn_get(env, s, ot);
4ba9938c 6390 selector = insn_get(env, s, MO_16);
3b46e624 6391
1d1cc4d0
RH
6392 tcg_gen_movi_tl(cpu_T0, selector);
6393 tcg_gen_movi_tl(cpu_T1, offset);
2c0262af
FB
6394 }
6395 goto do_ljmp;
6396 case 0xeb: /* jmp Jb */
4ba9938c 6397 tval = (int8_t)insn_get(env, s, MO_8);
14ce26e7 6398 tval += s->pc - s->cs_base;
ab4e4aec 6399 if (dflag == MO_16) {
14ce26e7 6400 tval &= 0xffff;
ab4e4aec 6401 }
14ce26e7 6402 gen_jmp(s, tval);
2c0262af
FB
6403 break;
6404 case 0x70 ... 0x7f: /* jcc Jb */
4ba9938c 6405 tval = (int8_t)insn_get(env, s, MO_8);
2c0262af
FB
6406 goto do_jcc;
6407 case 0x180 ... 0x18f: /* jcc Jv */
ab4e4aec 6408 if (dflag != MO_16) {
4ba9938c 6409 tval = (int32_t)insn_get(env, s, MO_32);
2c0262af 6410 } else {
4ba9938c 6411 tval = (int16_t)insn_get(env, s, MO_16);
2c0262af
FB
6412 }
6413 do_jcc:
6414 next_eip = s->pc - s->cs_base;
14ce26e7 6415 tval += next_eip;
ab4e4aec 6416 if (dflag == MO_16) {
14ce26e7 6417 tval &= 0xffff;
ab4e4aec 6418 }
7d117ce8 6419 gen_bnd_jmp(s);
14ce26e7 6420 gen_jcc(s, b, tval, next_eip);
2c0262af
FB
6421 break;
6422
6423 case 0x190 ... 0x19f: /* setcc Gv */
0af10c86 6424 modrm = cpu_ldub_code(env, s->pc++);
1d1cc4d0 6425 gen_setcc1(s, b, cpu_T0);
4ba9938c 6426 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
2c0262af
FB
6427 break;
6428 case 0x140 ... 0x14f: /* cmov Gv, Ev */
bff93281
PM
6429 if (!(s->cpuid_features & CPUID_CMOV)) {
6430 goto illegal_op;
6431 }
ab4e4aec 6432 ot = dflag;
f32d3781
PB
6433 modrm = cpu_ldub_code(env, s->pc++);
6434 reg = ((modrm >> 3) & 7) | rex_r;
6435 gen_cmovcc1(env, s, ot, b, modrm, reg);
2c0262af 6436 break;
3b46e624 6437
2c0262af
FB
6438 /************************/
6439 /* flags */
6440 case 0x9c: /* pushf */
872929aa 6441 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
2c0262af
FB
6442 if (s->vm86 && s->iopl != 3) {
6443 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6444 } else {
773cdfcc 6445 gen_update_cc_op(s);
1d1cc4d0
RH
6446 gen_helper_read_eflags(cpu_T0, cpu_env);
6447 gen_push_v(s, cpu_T0);
2c0262af
FB
6448 }
6449 break;
6450 case 0x9d: /* popf */
872929aa 6451 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
2c0262af
FB
6452 if (s->vm86 && s->iopl != 3) {
6453 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6454 } else {
8e31d234 6455 ot = gen_pop_T0(s);
2c0262af 6456 if (s->cpl == 0) {
ab4e4aec 6457 if (dflag != MO_16) {
1d1cc4d0 6458 gen_helper_write_eflags(cpu_env, cpu_T0,
f0967a1a
BS
6459 tcg_const_i32((TF_MASK | AC_MASK |
6460 ID_MASK | NT_MASK |
6461 IF_MASK |
6462 IOPL_MASK)));
2c0262af 6463 } else {
1d1cc4d0 6464 gen_helper_write_eflags(cpu_env, cpu_T0,
f0967a1a
BS
6465 tcg_const_i32((TF_MASK | AC_MASK |
6466 ID_MASK | NT_MASK |
6467 IF_MASK | IOPL_MASK)
6468 & 0xffff));
2c0262af
FB
6469 }
6470 } else {
4136f33c 6471 if (s->cpl <= s->iopl) {
ab4e4aec 6472 if (dflag != MO_16) {
1d1cc4d0 6473 gen_helper_write_eflags(cpu_env, cpu_T0,
f0967a1a
BS
6474 tcg_const_i32((TF_MASK |
6475 AC_MASK |
6476 ID_MASK |
6477 NT_MASK |
6478 IF_MASK)));
4136f33c 6479 } else {
1d1cc4d0 6480 gen_helper_write_eflags(cpu_env, cpu_T0,
f0967a1a
BS
6481 tcg_const_i32((TF_MASK |
6482 AC_MASK |
6483 ID_MASK |
6484 NT_MASK |
6485 IF_MASK)
6486 & 0xffff));
4136f33c 6487 }
2c0262af 6488 } else {
ab4e4aec 6489 if (dflag != MO_16) {
1d1cc4d0 6490 gen_helper_write_eflags(cpu_env, cpu_T0,
f0967a1a
BS
6491 tcg_const_i32((TF_MASK | AC_MASK |
6492 ID_MASK | NT_MASK)));
4136f33c 6493 } else {
1d1cc4d0 6494 gen_helper_write_eflags(cpu_env, cpu_T0,
f0967a1a
BS
6495 tcg_const_i32((TF_MASK | AC_MASK |
6496 ID_MASK | NT_MASK)
6497 & 0xffff));
4136f33c 6498 }
2c0262af
FB
6499 }
6500 }
8e31d234 6501 gen_pop_update(s, ot);
3ca51d07 6502 set_cc_op(s, CC_OP_EFLAGS);
a9321a4d 6503 /* abort translation because TF/AC flag may change */
14ce26e7 6504 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
6505 gen_eob(s);
6506 }
6507 break;
6508 case 0x9e: /* sahf */
12e26b75 6509 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 6510 goto illegal_op;
1d1cc4d0 6511 gen_op_mov_v_reg(MO_8, cpu_T0, R_AH);
d229edce 6512 gen_compute_eflags(s);
bd7a7b33 6513 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
1d1cc4d0
RH
6514 tcg_gen_andi_tl(cpu_T0, cpu_T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
6515 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T0);
2c0262af
FB
6516 break;
6517 case 0x9f: /* lahf */
12e26b75 6518 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 6519 goto illegal_op;
d229edce 6520 gen_compute_eflags(s);
bd7a7b33 6521 /* Note: gen_compute_eflags() only gives the condition codes */
1d1cc4d0
RH
6522 tcg_gen_ori_tl(cpu_T0, cpu_cc_src, 0x02);
6523 gen_op_mov_reg_v(MO_8, R_AH, cpu_T0);
2c0262af
FB
6524 break;
6525 case 0xf5: /* cmc */
d229edce 6526 gen_compute_eflags(s);
bd7a7b33 6527 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
6528 break;
6529 case 0xf8: /* clc */
d229edce 6530 gen_compute_eflags(s);
bd7a7b33 6531 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
2c0262af
FB
6532 break;
6533 case 0xf9: /* stc */
d229edce 6534 gen_compute_eflags(s);
bd7a7b33 6535 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
6536 break;
6537 case 0xfc: /* cld */
b6abf97d 6538 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
317ac620 6539 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
2c0262af
FB
6540 break;
6541 case 0xfd: /* std */
b6abf97d 6542 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
317ac620 6543 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
2c0262af
FB
6544 break;
6545
6546 /************************/
6547 /* bit operations */
6548 case 0x1ba: /* bt/bts/btr/btc Gv, im */
ab4e4aec 6549 ot = dflag;
0af10c86 6550 modrm = cpu_ldub_code(env, s->pc++);
33698e5f 6551 op = (modrm >> 3) & 7;
2c0262af 6552 mod = (modrm >> 6) & 3;
14ce26e7 6553 rm = (modrm & 7) | REX_B(s);
2c0262af 6554 if (mod != 3) {
14ce26e7 6555 s->rip_offset = 1;
4eeb3939 6556 gen_lea_modrm(env, s, modrm);
1d1cc4d0 6557 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
2c0262af 6558 } else {
1d1cc4d0 6559 gen_op_mov_v_reg(ot, cpu_T0, rm);
2c0262af
FB
6560 }
6561 /* load shift */
0af10c86 6562 val = cpu_ldub_code(env, s->pc++);
1d1cc4d0 6563 tcg_gen_movi_tl(cpu_T1, val);
2c0262af 6564 if (op < 4)
b9f9c5b4 6565 goto unknown_op;
2c0262af 6566 op -= 4;
f484d386 6567 goto bt_op;
2c0262af
FB
6568 case 0x1a3: /* bt Gv, Ev */
6569 op = 0;
6570 goto do_btx;
6571 case 0x1ab: /* bts */
6572 op = 1;
6573 goto do_btx;
6574 case 0x1b3: /* btr */
6575 op = 2;
6576 goto do_btx;
6577 case 0x1bb: /* btc */
6578 op = 3;
6579 do_btx:
ab4e4aec 6580 ot = dflag;
0af10c86 6581 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 6582 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 6583 mod = (modrm >> 6) & 3;
14ce26e7 6584 rm = (modrm & 7) | REX_B(s);
1d1cc4d0 6585 gen_op_mov_v_reg(MO_32, cpu_T1, reg);
2c0262af 6586 if (mod != 3) {
4eeb3939 6587 gen_lea_modrm(env, s, modrm);
2c0262af 6588 /* specific case: we need to add a displacement */
1d1cc4d0
RH
6589 gen_exts(ot, cpu_T1);
6590 tcg_gen_sari_tl(cpu_tmp0, cpu_T1, 3 + ot);
f484d386
FB
6591 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6592 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
1d1cc4d0 6593 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
2c0262af 6594 } else {
1d1cc4d0 6595 gen_op_mov_v_reg(ot, cpu_T0, rm);
2c0262af 6596 }
f484d386 6597 bt_op:
1d1cc4d0
RH
6598 tcg_gen_andi_tl(cpu_T1, cpu_T1, (1 << (3 + ot)) - 1);
6599 tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
f484d386
FB
6600 switch(op) {
6601 case 0:
f484d386
FB
6602 break;
6603 case 1:
f484d386 6604 tcg_gen_movi_tl(cpu_tmp0, 1);
1d1cc4d0
RH
6605 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
6606 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
f484d386
FB
6607 break;
6608 case 2:
f484d386 6609 tcg_gen_movi_tl(cpu_tmp0, 1);
1d1cc4d0
RH
6610 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
6611 tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_tmp0);
f484d386
FB
6612 break;
6613 default:
6614 case 3:
f484d386 6615 tcg_gen_movi_tl(cpu_tmp0, 1);
1d1cc4d0
RH
6616 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
6617 tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_tmp0);
f484d386
FB
6618 break;
6619 }
2c0262af 6620 if (op != 0) {
fd8ca9f6 6621 if (mod != 3) {
1d1cc4d0 6622 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
fd8ca9f6 6623 } else {
1d1cc4d0 6624 gen_op_mov_reg_v(ot, rm, cpu_T0);
fd8ca9f6 6625 }
dc1823ce
RH
6626 }
6627
6628 /* Delay all CC updates until after the store above. Note that
6629 C is the result of the test, Z is unchanged, and the others
6630 are all undefined. */
6631 switch (s->cc_op) {
6632 case CC_OP_MULB ... CC_OP_MULQ:
6633 case CC_OP_ADDB ... CC_OP_ADDQ:
6634 case CC_OP_ADCB ... CC_OP_ADCQ:
6635 case CC_OP_SUBB ... CC_OP_SUBQ:
6636 case CC_OP_SBBB ... CC_OP_SBBQ:
6637 case CC_OP_LOGICB ... CC_OP_LOGICQ:
6638 case CC_OP_INCB ... CC_OP_INCQ:
6639 case CC_OP_DECB ... CC_OP_DECQ:
6640 case CC_OP_SHLB ... CC_OP_SHLQ:
6641 case CC_OP_SARB ... CC_OP_SARQ:
6642 case CC_OP_BMILGB ... CC_OP_BMILGQ:
6643 /* Z was going to be computed from the non-zero status of CC_DST.
6644 We can get that same Z value (and the new C value) by leaving
6645 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6646 same width. */
f484d386 6647 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
dc1823ce
RH
6648 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
6649 break;
6650 default:
6651 /* Otherwise, generate EFLAGS and replace the C bit. */
6652 gen_compute_eflags(s);
6653 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4,
6654 ctz32(CC_C), 1);
6655 break;
2c0262af
FB
6656 }
6657 break;
321c5351
RH
6658 case 0x1bc: /* bsf / tzcnt */
6659 case 0x1bd: /* bsr / lzcnt */
ab4e4aec 6660 ot = dflag;
321c5351
RH
6661 modrm = cpu_ldub_code(env, s->pc++);
6662 reg = ((modrm >> 3) & 7) | rex_r;
6663 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
1d1cc4d0 6664 gen_extu(ot, cpu_T0);
321c5351
RH
6665
6666 /* Note that lzcnt and tzcnt are in different extensions. */
6667 if ((prefixes & PREFIX_REPZ)
6668 && (b & 1
6669 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
6670 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
6671 int size = 8 << ot;
1d1cc4d0 6672 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
321c5351
RH
6673 if (b & 1) {
6674 /* For lzcnt, reduce the target_ulong result by the
6675 number of zeros that we expect to find at the top. */
1d1cc4d0
RH
6676 gen_helper_clz(cpu_T0, cpu_T0);
6677 tcg_gen_subi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - size);
6191b059 6678 } else {
321c5351
RH
6679 /* For tzcnt, a zero input must return the operand size:
6680 force all bits outside the operand size to 1. */
6681 target_ulong mask = (target_ulong)-2 << (size - 1);
1d1cc4d0
RH
6682 tcg_gen_ori_tl(cpu_T0, cpu_T0, mask);
6683 gen_helper_ctz(cpu_T0, cpu_T0);
6191b059 6684 }
321c5351
RH
6685 /* For lzcnt/tzcnt, C and Z bits are defined and are
6686 related to the result. */
6687 gen_op_update1_cc();
6688 set_cc_op(s, CC_OP_BMILGB + ot);
6689 } else {
6690 /* For bsr/bsf, only the Z bit is defined and it is related
6691 to the input and not the result. */
1d1cc4d0 6692 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
321c5351
RH
6693 set_cc_op(s, CC_OP_LOGICB + ot);
6694 if (b & 1) {
6695 /* For bsr, return the bit index of the first 1 bit,
6696 not the count of leading zeros. */
1d1cc4d0
RH
6697 gen_helper_clz(cpu_T0, cpu_T0);
6698 tcg_gen_xori_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - 1);
321c5351 6699 } else {
1d1cc4d0 6700 gen_helper_ctz(cpu_T0, cpu_T0);
321c5351
RH
6701 }
6702 /* ??? The manual says that the output is undefined when the
6703 input is zero, but real hardware leaves it unchanged, and
6704 real programs appear to depend on that. */
6705 tcg_gen_movi_tl(cpu_tmp0, 0);
1d1cc4d0
RH
6706 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T0, cpu_cc_dst, cpu_tmp0,
6707 cpu_regs[reg], cpu_T0);
6191b059 6708 }
1d1cc4d0 6709 gen_op_mov_reg_v(ot, reg, cpu_T0);
2c0262af
FB
6710 break;
6711 /************************/
6712 /* bcd */
6713 case 0x27: /* daa */
14ce26e7
FB
6714 if (CODE64(s))
6715 goto illegal_op;
773cdfcc 6716 gen_update_cc_op(s);
7923057b 6717 gen_helper_daa(cpu_env);
3ca51d07 6718 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6719 break;
6720 case 0x2f: /* das */
14ce26e7
FB
6721 if (CODE64(s))
6722 goto illegal_op;
773cdfcc 6723 gen_update_cc_op(s);
7923057b 6724 gen_helper_das(cpu_env);
3ca51d07 6725 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6726 break;
6727 case 0x37: /* aaa */
14ce26e7
FB
6728 if (CODE64(s))
6729 goto illegal_op;
773cdfcc 6730 gen_update_cc_op(s);
7923057b 6731 gen_helper_aaa(cpu_env);
3ca51d07 6732 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6733 break;
6734 case 0x3f: /* aas */
14ce26e7
FB
6735 if (CODE64(s))
6736 goto illegal_op;
773cdfcc 6737 gen_update_cc_op(s);
7923057b 6738 gen_helper_aas(cpu_env);
3ca51d07 6739 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6740 break;
6741 case 0xd4: /* aam */
14ce26e7
FB
6742 if (CODE64(s))
6743 goto illegal_op;
0af10c86 6744 val = cpu_ldub_code(env, s->pc++);
b6d7c3db
TS
6745 if (val == 0) {
6746 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6747 } else {
7923057b 6748 gen_helper_aam(cpu_env, tcg_const_i32(val));
3ca51d07 6749 set_cc_op(s, CC_OP_LOGICB);
b6d7c3db 6750 }
2c0262af
FB
6751 break;
6752 case 0xd5: /* aad */
14ce26e7
FB
6753 if (CODE64(s))
6754 goto illegal_op;
0af10c86 6755 val = cpu_ldub_code(env, s->pc++);
7923057b 6756 gen_helper_aad(cpu_env, tcg_const_i32(val));
3ca51d07 6757 set_cc_op(s, CC_OP_LOGICB);
2c0262af
FB
6758 break;
6759 /************************/
6760 /* misc */
6761 case 0x90: /* nop */
ab1f142b 6762 /* XXX: correct lock test for all insn */
7418027e 6763 if (prefixes & PREFIX_LOCK) {
ab1f142b 6764 goto illegal_op;
7418027e
RH
6765 }
6766 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6767 if (REX_B(s)) {
6768 goto do_xchg_reg_eax;
6769 }
0573fbfc 6770 if (prefixes & PREFIX_REPZ) {
81f3053b
PB
6771 gen_update_cc_op(s);
6772 gen_jmp_im(pc_start - s->cs_base);
6773 gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
6774 s->is_jmp = DISAS_TB_JUMP;
0573fbfc 6775 }
2c0262af
FB
6776 break;
6777 case 0x9b: /* fwait */
5fafdf24 6778 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
7eee2a50
FB
6779 (HF_MP_MASK | HF_TS_MASK)) {
6780 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
2ee73ac3 6781 } else {
d3eb5eae 6782 gen_helper_fwait(cpu_env);
7eee2a50 6783 }
2c0262af
FB
6784 break;
6785 case 0xcc: /* int3 */
6786 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6787 break;
6788 case 0xcd: /* int N */
0af10c86 6789 val = cpu_ldub_code(env, s->pc++);
f115e911 6790 if (s->vm86 && s->iopl != 3) {
5fafdf24 6791 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
f115e911
FB
6792 } else {
6793 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6794 }
2c0262af
FB
6795 break;
6796 case 0xce: /* into */
14ce26e7
FB
6797 if (CODE64(s))
6798 goto illegal_op;
773cdfcc 6799 gen_update_cc_op(s);
a8ede8ba 6800 gen_jmp_im(pc_start - s->cs_base);
4a7443be 6801 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
2c0262af 6802 break;
0b97134b 6803#ifdef WANT_ICEBP
2c0262af 6804 case 0xf1: /* icebp (undocumented, exits to external debugger) */
872929aa 6805 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
aba9d61e 6806#if 1
2c0262af 6807 gen_debug(s, pc_start - s->cs_base);
aba9d61e
FB
6808#else
6809 /* start debug */
bbd77c18 6810 tb_flush(CPU(x86_env_get_cpu(env)));
24537a01 6811 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
aba9d61e 6812#endif
2c0262af 6813 break;
0b97134b 6814#endif
2c0262af
FB
6815 case 0xfa: /* cli */
6816 if (!s->vm86) {
6817 if (s->cpl <= s->iopl) {
f0967a1a 6818 gen_helper_cli(cpu_env);
2c0262af
FB
6819 } else {
6820 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6821 }
6822 } else {
6823 if (s->iopl == 3) {
f0967a1a 6824 gen_helper_cli(cpu_env);
2c0262af
FB
6825 } else {
6826 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6827 }
6828 }
6829 break;
6830 case 0xfb: /* sti */
f083d92c
RH
6831 if (s->vm86 ? s->iopl == 3 : s->cpl <= s->iopl) {
6832 gen_helper_sti(cpu_env);
6833 /* interruptions are enabled only the first insn after sti */
6834 gen_jmp_im(s->pc - s->cs_base);
6835 gen_eob_inhibit_irq(s, true);
2c0262af 6836 } else {
f083d92c 6837 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
2c0262af
FB
6838 }
6839 break;
6840 case 0x62: /* bound */
14ce26e7
FB
6841 if (CODE64(s))
6842 goto illegal_op;
ab4e4aec 6843 ot = dflag;
0af10c86 6844 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
6845 reg = (modrm >> 3) & 7;
6846 mod = (modrm >> 6) & 3;
6847 if (mod == 3)
6848 goto illegal_op;
1d1cc4d0 6849 gen_op_mov_v_reg(ot, cpu_T0, reg);
4eeb3939 6850 gen_lea_modrm(env, s, modrm);
1d1cc4d0 6851 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4ba9938c 6852 if (ot == MO_16) {
92fc4b58
BS
6853 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
6854 } else {
6855 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
6856 }
2c0262af
FB
6857 break;
6858 case 0x1c8 ... 0x1cf: /* bswap reg */
14ce26e7
FB
6859 reg = (b & 7) | REX_B(s);
6860#ifdef TARGET_X86_64
ab4e4aec 6861 if (dflag == MO_64) {
1d1cc4d0
RH
6862 gen_op_mov_v_reg(MO_64, cpu_T0, reg);
6863 tcg_gen_bswap64_i64(cpu_T0, cpu_T0);
6864 gen_op_mov_reg_v(MO_64, reg, cpu_T0);
5fafdf24 6865 } else
8777643e 6866#endif
57fec1fe 6867 {
1d1cc4d0
RH
6868 gen_op_mov_v_reg(MO_32, cpu_T0, reg);
6869 tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
6870 tcg_gen_bswap32_tl(cpu_T0, cpu_T0);
6871 gen_op_mov_reg_v(MO_32, reg, cpu_T0);
14ce26e7 6872 }
2c0262af
FB
6873 break;
6874 case 0xd6: /* salc */
14ce26e7
FB
6875 if (CODE64(s))
6876 goto illegal_op;
1d1cc4d0
RH
6877 gen_compute_eflags_c(s, cpu_T0);
6878 tcg_gen_neg_tl(cpu_T0, cpu_T0);
6879 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
2c0262af
FB
6880 break;
6881 case 0xe0: /* loopnz */
6882 case 0xe1: /* loopz */
2c0262af
FB
6883 case 0xe2: /* loop */
6884 case 0xe3: /* jecxz */
14ce26e7 6885 {
42a268c2 6886 TCGLabel *l1, *l2, *l3;
14ce26e7 6887
4ba9938c 6888 tval = (int8_t)insn_get(env, s, MO_8);
14ce26e7
FB
6889 next_eip = s->pc - s->cs_base;
6890 tval += next_eip;
ab4e4aec 6891 if (dflag == MO_16) {
14ce26e7 6892 tval &= 0xffff;
ab4e4aec 6893 }
3b46e624 6894
14ce26e7
FB
6895 l1 = gen_new_label();
6896 l2 = gen_new_label();
6e0d8677 6897 l3 = gen_new_label();
14ce26e7 6898 b &= 3;
6e0d8677
FB
6899 switch(b) {
6900 case 0: /* loopnz */
6901 case 1: /* loopz */
1d71ddb1
RH
6902 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6903 gen_op_jz_ecx(s->aflag, l3);
5bdb91b0 6904 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
6e0d8677
FB
6905 break;
6906 case 2: /* loop */
1d71ddb1
RH
6907 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6908 gen_op_jnz_ecx(s->aflag, l1);
6e0d8677
FB
6909 break;
6910 default:
6911 case 3: /* jcxz */
1d71ddb1 6912 gen_op_jz_ecx(s->aflag, l1);
6e0d8677 6913 break;
14ce26e7
FB
6914 }
6915
6e0d8677 6916 gen_set_label(l3);
14ce26e7 6917 gen_jmp_im(next_eip);
8e1c85e3 6918 tcg_gen_br(l2);
6e0d8677 6919
14ce26e7
FB
6920 gen_set_label(l1);
6921 gen_jmp_im(tval);
6922 gen_set_label(l2);
6923 gen_eob(s);
6924 }
2c0262af
FB
6925 break;
6926 case 0x130: /* wrmsr */
6927 case 0x132: /* rdmsr */
6928 if (s->cpl != 0) {
6929 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6930 } else {
773cdfcc 6931 gen_update_cc_op(s);
872929aa 6932 gen_jmp_im(pc_start - s->cs_base);
0573fbfc 6933 if (b & 2) {
4a7443be 6934 gen_helper_rdmsr(cpu_env);
0573fbfc 6935 } else {
4a7443be 6936 gen_helper_wrmsr(cpu_env);
0573fbfc 6937 }
2c0262af
FB
6938 }
6939 break;
6940 case 0x131: /* rdtsc */
773cdfcc 6941 gen_update_cc_op(s);
ecada8a2 6942 gen_jmp_im(pc_start - s->cs_base);
bd79255d 6943 if (s->tb->cflags & CF_USE_ICOUNT) {
efade670 6944 gen_io_start();
bd79255d 6945 }
4a7443be 6946 gen_helper_rdtsc(cpu_env);
bd79255d 6947 if (s->tb->cflags & CF_USE_ICOUNT) {
efade670
PB
6948 gen_io_end();
6949 gen_jmp(s, s->pc - s->cs_base);
6950 }
2c0262af 6951 break;
df01e0fc 6952 case 0x133: /* rdpmc */
773cdfcc 6953 gen_update_cc_op(s);
df01e0fc 6954 gen_jmp_im(pc_start - s->cs_base);
4a7443be 6955 gen_helper_rdpmc(cpu_env);
df01e0fc 6956 break;
023fe10d 6957 case 0x134: /* sysenter */
2436b61a 6958 /* For Intel SYSENTER is valid on 64-bit */
0af10c86 6959 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
14ce26e7 6960 goto illegal_op;
023fe10d
FB
6961 if (!s->pe) {
6962 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6963 } else {
2999a0b2 6964 gen_helper_sysenter(cpu_env);
023fe10d
FB
6965 gen_eob(s);
6966 }
6967 break;
6968 case 0x135: /* sysexit */
2436b61a 6969 /* For Intel SYSEXIT is valid on 64-bit */
0af10c86 6970 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
14ce26e7 6971 goto illegal_op;
023fe10d
FB
6972 if (!s->pe) {
6973 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6974 } else {
ab4e4aec 6975 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
023fe10d
FB
6976 gen_eob(s);
6977 }
6978 break;
14ce26e7
FB
6979#ifdef TARGET_X86_64
6980 case 0x105: /* syscall */
6981 /* XXX: is it usable in real mode ? */
728d803b 6982 gen_update_cc_op(s);
14ce26e7 6983 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 6984 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
14ce26e7
FB
6985 gen_eob(s);
6986 break;
6987 case 0x107: /* sysret */
6988 if (!s->pe) {
6989 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6990 } else {
ab4e4aec 6991 gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
aba9d61e 6992 /* condition codes are modified only in long mode */
3ca51d07
RH
6993 if (s->lma) {
6994 set_cc_op(s, CC_OP_EFLAGS);
6995 }
14ce26e7
FB
6996 gen_eob(s);
6997 }
6998 break;
6999#endif
2c0262af 7000 case 0x1a2: /* cpuid */
773cdfcc 7001 gen_update_cc_op(s);
9575cb94 7002 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7003 gen_helper_cpuid(cpu_env);
2c0262af
FB
7004 break;
7005 case 0xf4: /* hlt */
7006 if (s->cpl != 0) {
7007 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7008 } else {
773cdfcc 7009 gen_update_cc_op(s);
94451178 7010 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7011 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
5779406a 7012 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
7013 }
7014 break;
7015 case 0x100:
0af10c86 7016 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7017 mod = (modrm >> 6) & 3;
7018 op = (modrm >> 3) & 7;
7019 switch(op) {
7020 case 0: /* sldt */
f115e911
FB
7021 if (!s->pe || s->vm86)
7022 goto illegal_op;
872929aa 7023 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
1d1cc4d0
RH
7024 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
7025 offsetof(CPUX86State, ldt.selector));
ab4e4aec 7026 ot = mod == 3 ? dflag : MO_16;
0af10c86 7027 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
7028 break;
7029 case 2: /* lldt */
f115e911
FB
7030 if (!s->pe || s->vm86)
7031 goto illegal_op;
2c0262af
FB
7032 if (s->cpl != 0) {
7033 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7034 } else {
872929aa 7035 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
4ba9938c 7036 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
1d1cc4d0 7037 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
2999a0b2 7038 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
2c0262af
FB
7039 }
7040 break;
7041 case 1: /* str */
f115e911
FB
7042 if (!s->pe || s->vm86)
7043 goto illegal_op;
872929aa 7044 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
1d1cc4d0
RH
7045 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
7046 offsetof(CPUX86State, tr.selector));
ab4e4aec 7047 ot = mod == 3 ? dflag : MO_16;
0af10c86 7048 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
7049 break;
7050 case 3: /* ltr */
f115e911
FB
7051 if (!s->pe || s->vm86)
7052 goto illegal_op;
2c0262af
FB
7053 if (s->cpl != 0) {
7054 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7055 } else {
872929aa 7056 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
4ba9938c 7057 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
1d1cc4d0 7058 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
2999a0b2 7059 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
2c0262af
FB
7060 }
7061 break;
7062 case 4: /* verr */
7063 case 5: /* verw */
f115e911
FB
7064 if (!s->pe || s->vm86)
7065 goto illegal_op;
4ba9938c 7066 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
773cdfcc 7067 gen_update_cc_op(s);
2999a0b2 7068 if (op == 4) {
1d1cc4d0 7069 gen_helper_verr(cpu_env, cpu_T0);
2999a0b2 7070 } else {
1d1cc4d0 7071 gen_helper_verw(cpu_env, cpu_T0);
2999a0b2 7072 }
3ca51d07 7073 set_cc_op(s, CC_OP_EFLAGS);
f115e911 7074 break;
2c0262af 7075 default:
b9f9c5b4 7076 goto unknown_op;
2c0262af
FB
7077 }
7078 break;
1906b2af 7079
2c0262af 7080 case 0x101:
0af10c86 7081 modrm = cpu_ldub_code(env, s->pc++);
1906b2af 7082 switch (modrm) {
880f8486 7083 CASE_MODRM_MEM_OP(0): /* sgdt */
872929aa 7084 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
4eeb3939 7085 gen_lea_modrm(env, s, modrm);
1d1cc4d0
RH
7086 tcg_gen_ld32u_tl(cpu_T0,
7087 cpu_env, offsetof(CPUX86State, gdt.limit));
7088 gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
aba9d61e 7089 gen_add_A0_im(s, 2);
1d1cc4d0 7090 tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
ab4e4aec 7091 if (dflag == MO_16) {
1d1cc4d0 7092 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
f0706f0c 7093 }
1d1cc4d0 7094 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
2c0262af 7095 break;
1906b2af
RH
7096
7097 case 0xc8: /* monitor */
7098 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
7099 goto illegal_op;
3d7374c5 7100 }
1906b2af
RH
7101 gen_update_cc_op(s);
7102 gen_jmp_im(pc_start - s->cs_base);
7103 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
7104 gen_extu(s->aflag, cpu_A0);
7105 gen_add_A0_ds_seg(s);
7106 gen_helper_monitor(cpu_env, cpu_A0);
3d7374c5 7107 break;
1906b2af
RH
7108
7109 case 0xc9: /* mwait */
7110 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
7111 goto illegal_op;
7112 }
7113 gen_update_cc_op(s);
7114 gen_jmp_im(pc_start - s->cs_base);
7115 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7116 gen_eob(s);
7117 break;
7118
7119 case 0xca: /* clac */
7120 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
7121 || s->cpl != 0) {
7122 goto illegal_op;
7123 }
7124 gen_helper_clac(cpu_env);
7125 gen_jmp_im(s->pc - s->cs_base);
7126 gen_eob(s);
7127 break;
7128
7129 case 0xcb: /* stac */
7130 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
7131 || s->cpl != 0) {
7132 goto illegal_op;
7133 }
7134 gen_helper_stac(cpu_env);
7135 gen_jmp_im(s->pc - s->cs_base);
7136 gen_eob(s);
7137 break;
7138
880f8486 7139 CASE_MODRM_MEM_OP(1): /* sidt */
1906b2af
RH
7140 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7141 gen_lea_modrm(env, s, modrm);
7142 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit));
7143 gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
7144 gen_add_A0_im(s, 2);
7145 tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
7146 if (dflag == MO_16) {
7147 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7148 }
7149 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7150 break;
7151
19dc85db
RH
7152 case 0xd0: /* xgetbv */
7153 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7154 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
7155 | PREFIX_REPZ | PREFIX_REPNZ))) {
7156 goto illegal_op;
7157 }
7158 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7159 gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
7160 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
7161 break;
7162
7163 case 0xd1: /* xsetbv */
7164 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7165 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
7166 | PREFIX_REPZ | PREFIX_REPNZ))) {
7167 goto illegal_op;
7168 }
7169 if (s->cpl != 0) {
7170 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7171 break;
7172 }
7173 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7174 cpu_regs[R_EDX]);
7175 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7176 gen_helper_xsetbv(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
7177 /* End TB because translation flags may change. */
7178 gen_jmp_im(s->pc - pc_start);
7179 gen_eob(s);
7180 break;
7181
1906b2af
RH
7182 case 0xd8: /* VMRUN */
7183 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7184 goto illegal_op;
7185 }
7186 if (s->cpl != 0) {
2c0262af 7187 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
1906b2af 7188 break;
2c0262af 7189 }
1906b2af
RH
7190 gen_update_cc_op(s);
7191 gen_jmp_im(pc_start - s->cs_base);
7192 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
7193 tcg_const_i32(s->pc - pc_start));
7194 tcg_gen_exit_tb(0);
7195 s->is_jmp = DISAS_TB_JUMP;
2c0262af 7196 break;
1906b2af
RH
7197
7198 case 0xd9: /* VMMCALL */
7199 if (!(s->flags & HF_SVME_MASK)) {
7200 goto illegal_op;
7201 }
7202 gen_update_cc_op(s);
7203 gen_jmp_im(pc_start - s->cs_base);
7204 gen_helper_vmmcall(cpu_env);
7205 break;
7206
7207 case 0xda: /* VMLOAD */
7208 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7209 goto illegal_op;
7210 }
7211 if (s->cpl != 0) {
7212 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7213 break;
7214 }
7215 gen_update_cc_op(s);
7216 gen_jmp_im(pc_start - s->cs_base);
7217 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
7218 break;
7219
7220 case 0xdb: /* VMSAVE */
7221 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7222 goto illegal_op;
7223 }
7224 if (s->cpl != 0) {
7225 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7226 break;
7227 }
7228 gen_update_cc_op(s);
7229 gen_jmp_im(pc_start - s->cs_base);
7230 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
7231 break;
7232
7233 case 0xdc: /* STGI */
7234 if ((!(s->flags & HF_SVME_MASK)
7235 && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
7236 || !s->pe) {
7237 goto illegal_op;
7238 }
7239 if (s->cpl != 0) {
7240 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7241 break;
7242 }
7243 gen_update_cc_op(s);
7244 gen_jmp_im(pc_start - s->cs_base);
7245 gen_helper_stgi(cpu_env);
7246 break;
7247
7248 case 0xdd: /* CLGI */
7249 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7250 goto illegal_op;
7251 }
7252 if (s->cpl != 0) {
7253 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7254 break;
7255 }
7256 gen_update_cc_op(s);
7257 gen_jmp_im(pc_start - s->cs_base);
7258 gen_helper_clgi(cpu_env);
7259 break;
7260
7261 case 0xde: /* SKINIT */
7262 if ((!(s->flags & HF_SVME_MASK)
7263 && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
7264 || !s->pe) {
7265 goto illegal_op;
7266 }
7267 gen_update_cc_op(s);
7268 gen_jmp_im(pc_start - s->cs_base);
7269 gen_helper_skinit(cpu_env);
7270 break;
7271
7272 case 0xdf: /* INVLPGA */
7273 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7274 goto illegal_op;
7275 }
7276 if (s->cpl != 0) {
7277 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7278 break;
7279 }
7280 gen_update_cc_op(s);
7281 gen_jmp_im(pc_start - s->cs_base);
7282 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1));
7283 break;
7284
880f8486 7285 CASE_MODRM_MEM_OP(2): /* lgdt */
1906b2af
RH
7286 if (s->cpl != 0) {
7287 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7288 break;
7289 }
7290 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE);
7291 gen_lea_modrm(env, s, modrm);
7292 gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
7293 gen_add_A0_im(s, 2);
7294 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7295 if (dflag == MO_16) {
7296 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7297 }
7298 tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
7299 tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit));
7300 break;
7301
880f8486 7302 CASE_MODRM_MEM_OP(3): /* lidt */
1906b2af
RH
7303 if (s->cpl != 0) {
7304 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7305 break;
7306 }
7307 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE);
7308 gen_lea_modrm(env, s, modrm);
7309 gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
7310 gen_add_A0_im(s, 2);
7311 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7312 if (dflag == MO_16) {
7313 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7314 }
7315 tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
7316 tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit));
7317 break;
7318
880f8486 7319 CASE_MODRM_OP(4): /* smsw */
872929aa 7320 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
a657f79e
RH
7321 tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0]));
7322 if (CODE64(s)) {
7323 mod = (modrm >> 6) & 3;
7324 ot = (mod != 3 ? MO_16 : s->dflag);
7325 } else {
7326 ot = MO_16;
7327 }
7328 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af 7329 break;
0f70ed47
PB
7330 case 0xee: /* rdpkru */
7331 if (prefixes & PREFIX_LOCK) {
7332 goto illegal_op;
7333 }
7334 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7335 gen_helper_rdpkru(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
7336 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
7337 break;
7338 case 0xef: /* wrpkru */
7339 if (prefixes & PREFIX_LOCK) {
7340 goto illegal_op;
7341 }
7342 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7343 cpu_regs[R_EDX]);
7344 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7345 gen_helper_wrpkru(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
7346 break;
880f8486 7347 CASE_MODRM_OP(6): /* lmsw */
2c0262af
FB
7348 if (s->cpl != 0) {
7349 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
1906b2af 7350 break;
2c0262af 7351 }
1906b2af
RH
7352 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7353 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7354 gen_helper_lmsw(cpu_env, cpu_T0);
7355 gen_jmp_im(s->pc - s->cs_base);
7356 gen_eob(s);
2c0262af 7357 break;
1906b2af 7358
880f8486 7359 CASE_MODRM_MEM_OP(7): /* invlpg */
1906b2af
RH
7360 if (s->cpl != 0) {
7361 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7362 break;
7363 }
7364 gen_update_cc_op(s);
7365 gen_jmp_im(pc_start - s->cs_base);
7366 gen_lea_modrm(env, s, modrm);
7367 gen_helper_invlpg(cpu_env, cpu_A0);
7368 gen_jmp_im(s->pc - s->cs_base);
7369 gen_eob(s);
7370 break;
7371
7372 case 0xf8: /* swapgs */
7373#ifdef TARGET_X86_64
7374 if (CODE64(s)) {
1b050077
AP
7375 if (s->cpl != 0) {
7376 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7377 } else {
1906b2af
RH
7378 tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]);
7379 tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
7380 offsetof(CPUX86State, kernelgsbase));
7381 tcg_gen_st_tl(cpu_T0, cpu_env,
7382 offsetof(CPUX86State, kernelgsbase));
1b050077 7383 }
1906b2af
RH
7384 break;
7385 }
3558f805 7386#endif
1906b2af
RH
7387 goto illegal_op;
7388
7389 case 0xf9: /* rdtscp */
7390 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
7391 goto illegal_op;
7392 }
7393 gen_update_cc_op(s);
7394 gen_jmp_im(pc_start - s->cs_base);
7395 if (s->tb->cflags & CF_USE_ICOUNT) {
7396 gen_io_start();
7397 }
7398 gen_helper_rdtscp(cpu_env);
7399 if (s->tb->cflags & CF_USE_ICOUNT) {
7400 gen_io_end();
7401 gen_jmp(s, s->pc - s->cs_base);
2c0262af
FB
7402 }
7403 break;
1906b2af 7404
2c0262af 7405 default:
b9f9c5b4 7406 goto unknown_op;
2c0262af
FB
7407 }
7408 break;
1906b2af 7409
3415a4dd
FB
7410 case 0x108: /* invd */
7411 case 0x109: /* wbinvd */
7412 if (s->cpl != 0) {
7413 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7414 } else {
872929aa 7415 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
3415a4dd
FB
7416 /* nothing to do */
7417 }
7418 break;
14ce26e7
FB
7419 case 0x63: /* arpl or movslS (x86_64) */
7420#ifdef TARGET_X86_64
7421 if (CODE64(s)) {
7422 int d_ot;
7423 /* d_ot is the size of destination */
ab4e4aec 7424 d_ot = dflag;
14ce26e7 7425
0af10c86 7426 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
7427 reg = ((modrm >> 3) & 7) | rex_r;
7428 mod = (modrm >> 6) & 3;
7429 rm = (modrm & 7) | REX_B(s);
3b46e624 7430
14ce26e7 7431 if (mod == 3) {
1d1cc4d0 7432 gen_op_mov_v_reg(MO_32, cpu_T0, rm);
14ce26e7 7433 /* sign extend */
4ba9938c 7434 if (d_ot == MO_64) {
1d1cc4d0 7435 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
4ba9938c 7436 }
1d1cc4d0 7437 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
14ce26e7 7438 } else {
4eeb3939 7439 gen_lea_modrm(env, s, modrm);
1d1cc4d0
RH
7440 gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, cpu_A0);
7441 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
14ce26e7 7442 }
5fafdf24 7443 } else
14ce26e7
FB
7444#endif
7445 {
42a268c2 7446 TCGLabel *label1;
49d9fdcc 7447 TCGv t0, t1, t2, a0;
1e4840bf 7448
14ce26e7
FB
7449 if (!s->pe || s->vm86)
7450 goto illegal_op;
a7812ae4
PB
7451 t0 = tcg_temp_local_new();
7452 t1 = tcg_temp_local_new();
7453 t2 = tcg_temp_local_new();
4ba9938c 7454 ot = MO_16;
0af10c86 7455 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
7456 reg = (modrm >> 3) & 7;
7457 mod = (modrm >> 6) & 3;
7458 rm = modrm & 7;
7459 if (mod != 3) {
4eeb3939 7460 gen_lea_modrm(env, s, modrm);
323d1876 7461 gen_op_ld_v(s, ot, t0, cpu_A0);
49d9fdcc
LD
7462 a0 = tcg_temp_local_new();
7463 tcg_gen_mov_tl(a0, cpu_A0);
14ce26e7 7464 } else {
1e4840bf 7465 gen_op_mov_v_reg(ot, t0, rm);
49d9fdcc 7466 TCGV_UNUSED(a0);
14ce26e7 7467 }
1e4840bf
FB
7468 gen_op_mov_v_reg(ot, t1, reg);
7469 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7470 tcg_gen_andi_tl(t1, t1, 3);
7471 tcg_gen_movi_tl(t2, 0);
3bd7da9e 7472 label1 = gen_new_label();
1e4840bf
FB
7473 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7474 tcg_gen_andi_tl(t0, t0, ~3);
7475 tcg_gen_or_tl(t0, t0, t1);
7476 tcg_gen_movi_tl(t2, CC_Z);
3bd7da9e 7477 gen_set_label(label1);
14ce26e7 7478 if (mod != 3) {
323d1876 7479 gen_op_st_v(s, ot, t0, a0);
49d9fdcc
LD
7480 tcg_temp_free(a0);
7481 } else {
1e4840bf 7482 gen_op_mov_reg_v(ot, rm, t0);
14ce26e7 7483 }
d229edce 7484 gen_compute_eflags(s);
3bd7da9e 7485 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
1e4840bf 7486 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
1e4840bf
FB
7487 tcg_temp_free(t0);
7488 tcg_temp_free(t1);
7489 tcg_temp_free(t2);
f115e911 7490 }
f115e911 7491 break;
2c0262af
FB
7492 case 0x102: /* lar */
7493 case 0x103: /* lsl */
cec6843e 7494 {
42a268c2 7495 TCGLabel *label1;
1e4840bf 7496 TCGv t0;
cec6843e
FB
7497 if (!s->pe || s->vm86)
7498 goto illegal_op;
ab4e4aec 7499 ot = dflag != MO_16 ? MO_32 : MO_16;
0af10c86 7500 modrm = cpu_ldub_code(env, s->pc++);
cec6843e 7501 reg = ((modrm >> 3) & 7) | rex_r;
4ba9938c 7502 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
a7812ae4 7503 t0 = tcg_temp_local_new();
773cdfcc 7504 gen_update_cc_op(s);
2999a0b2 7505 if (b == 0x102) {
1d1cc4d0 7506 gen_helper_lar(t0, cpu_env, cpu_T0);
2999a0b2 7507 } else {
1d1cc4d0 7508 gen_helper_lsl(t0, cpu_env, cpu_T0);
2999a0b2 7509 }
cec6843e
FB
7510 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7511 label1 = gen_new_label();
cb63669a 7512 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
1e4840bf 7513 gen_op_mov_reg_v(ot, reg, t0);
cec6843e 7514 gen_set_label(label1);
3ca51d07 7515 set_cc_op(s, CC_OP_EFLAGS);
1e4840bf 7516 tcg_temp_free(t0);
cec6843e 7517 }
2c0262af
FB
7518 break;
7519 case 0x118:
0af10c86 7520 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7521 mod = (modrm >> 6) & 3;
7522 op = (modrm >> 3) & 7;
7523 switch(op) {
7524 case 0: /* prefetchnta */
7525 case 1: /* prefetchnt0 */
7526 case 2: /* prefetchnt0 */
7527 case 3: /* prefetchnt0 */
7528 if (mod == 3)
7529 goto illegal_op;
26317698 7530 gen_nop_modrm(env, s, modrm);
2c0262af
FB
7531 /* nothing more to do */
7532 break;
e17a36ce 7533 default: /* nop (multi byte) */
0af10c86 7534 gen_nop_modrm(env, s, modrm);
e17a36ce 7535 break;
2c0262af
FB
7536 }
7537 break;
62b58ba5
RH
7538 case 0x11a:
7539 modrm = cpu_ldub_code(env, s->pc++);
7540 if (s->flags & HF_MPX_EN_MASK) {
7541 mod = (modrm >> 6) & 3;
7542 reg = ((modrm >> 3) & 7) | rex_r;
523e28d7
RH
7543 if (prefixes & PREFIX_REPZ) {
7544 /* bndcl */
7545 if (reg >= 4
7546 || (prefixes & PREFIX_LOCK)
7547 || s->aflag == MO_16) {
7548 goto illegal_op;
7549 }
7550 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
7551 } else if (prefixes & PREFIX_REPNZ) {
7552 /* bndcu */
7553 if (reg >= 4
7554 || (prefixes & PREFIX_LOCK)
7555 || s->aflag == MO_16) {
7556 goto illegal_op;
7557 }
7558 TCGv_i64 notu = tcg_temp_new_i64();
7559 tcg_gen_not_i64(notu, cpu_bndu[reg]);
7560 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
7561 tcg_temp_free_i64(notu);
7562 } else if (prefixes & PREFIX_DATA) {
62b58ba5
RH
7563 /* bndmov -- from reg/mem */
7564 if (reg >= 4 || s->aflag == MO_16) {
7565 goto illegal_op;
7566 }
7567 if (mod == 3) {
7568 int reg2 = (modrm & 7) | REX_B(s);
7569 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
7570 goto illegal_op;
7571 }
7572 if (s->flags & HF_MPX_IU_MASK) {
7573 tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
7574 tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
7575 }
7576 } else {
7577 gen_lea_modrm(env, s, modrm);
7578 if (CODE64(s)) {
7579 tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
7580 s->mem_index, MO_LEQ);
7581 tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
7582 tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
7583 s->mem_index, MO_LEQ);
7584 } else {
7585 tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
7586 s->mem_index, MO_LEUL);
7587 tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
7588 tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
7589 s->mem_index, MO_LEUL);
7590 }
7591 /* bnd registers are now in-use */
7592 gen_set_hflag(s, HF_MPX_IU_MASK);
7593 }
bdd87b3b
RH
7594 } else if (mod != 3) {
7595 /* bndldx */
7596 AddressParts a = gen_lea_modrm_0(env, s, modrm);
7597 if (reg >= 4
7598 || (prefixes & PREFIX_LOCK)
7599 || s->aflag == MO_16
7600 || a.base < -1) {
7601 goto illegal_op;
7602 }
7603 if (a.base >= 0) {
7604 tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
7605 } else {
7606 tcg_gen_movi_tl(cpu_A0, 0);
7607 }
7608 gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
7609 if (a.index >= 0) {
7610 tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
7611 } else {
7612 tcg_gen_movi_tl(cpu_T0, 0);
7613 }
7614 if (CODE64(s)) {
7615 gen_helper_bndldx64(cpu_bndl[reg], cpu_env, cpu_A0, cpu_T0);
7616 tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
7617 offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
7618 } else {
7619 gen_helper_bndldx32(cpu_bndu[reg], cpu_env, cpu_A0, cpu_T0);
7620 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
7621 tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
7622 }
7623 gen_set_hflag(s, HF_MPX_IU_MASK);
62b58ba5
RH
7624 }
7625 }
7626 gen_nop_modrm(env, s, modrm);
7627 break;
149b427b
RH
7628 case 0x11b:
7629 modrm = cpu_ldub_code(env, s->pc++);
7630 if (s->flags & HF_MPX_EN_MASK) {
7631 mod = (modrm >> 6) & 3;
7632 reg = ((modrm >> 3) & 7) | rex_r;
7633 if (mod != 3 && (prefixes & PREFIX_REPZ)) {
7634 /* bndmk */
7635 if (reg >= 4
7636 || (prefixes & PREFIX_LOCK)
7637 || s->aflag == MO_16) {
7638 goto illegal_op;
7639 }
7640 AddressParts a = gen_lea_modrm_0(env, s, modrm);
7641 if (a.base >= 0) {
7642 tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
7643 if (!CODE64(s)) {
7644 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
7645 }
7646 } else if (a.base == -1) {
7647 /* no base register has lower bound of 0 */
7648 tcg_gen_movi_i64(cpu_bndl[reg], 0);
7649 } else {
7650 /* rip-relative generates #ud */
7651 goto illegal_op;
7652 }
7653 tcg_gen_not_tl(cpu_A0, gen_lea_modrm_1(a));
7654 if (!CODE64(s)) {
7655 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
7656 }
7657 tcg_gen_extu_tl_i64(cpu_bndu[reg], cpu_A0);
7658 /* bnd registers are now in-use */
7659 gen_set_hflag(s, HF_MPX_IU_MASK);
7660 break;
523e28d7
RH
7661 } else if (prefixes & PREFIX_REPNZ) {
7662 /* bndcn */
7663 if (reg >= 4
7664 || (prefixes & PREFIX_LOCK)
7665 || s->aflag == MO_16) {
7666 goto illegal_op;
7667 }
7668 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
62b58ba5
RH
7669 } else if (prefixes & PREFIX_DATA) {
7670 /* bndmov -- to reg/mem */
7671 if (reg >= 4 || s->aflag == MO_16) {
7672 goto illegal_op;
7673 }
7674 if (mod == 3) {
7675 int reg2 = (modrm & 7) | REX_B(s);
7676 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
7677 goto illegal_op;
7678 }
7679 if (s->flags & HF_MPX_IU_MASK) {
7680 tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
7681 tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
7682 }
7683 } else {
7684 gen_lea_modrm(env, s, modrm);
7685 if (CODE64(s)) {
7686 tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
7687 s->mem_index, MO_LEQ);
7688 tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
7689 tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
7690 s->mem_index, MO_LEQ);
7691 } else {
7692 tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
7693 s->mem_index, MO_LEUL);
7694 tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
7695 tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
7696 s->mem_index, MO_LEUL);
7697 }
7698 }
bdd87b3b
RH
7699 } else if (mod != 3) {
7700 /* bndstx */
7701 AddressParts a = gen_lea_modrm_0(env, s, modrm);
7702 if (reg >= 4
7703 || (prefixes & PREFIX_LOCK)
7704 || s->aflag == MO_16
7705 || a.base < -1) {
7706 goto illegal_op;
7707 }
7708 if (a.base >= 0) {
7709 tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
7710 } else {
7711 tcg_gen_movi_tl(cpu_A0, 0);
7712 }
7713 gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
7714 if (a.index >= 0) {
7715 tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
7716 } else {
7717 tcg_gen_movi_tl(cpu_T0, 0);
7718 }
7719 if (CODE64(s)) {
7720 gen_helper_bndstx64(cpu_env, cpu_A0, cpu_T0,
7721 cpu_bndl[reg], cpu_bndu[reg]);
7722 } else {
7723 gen_helper_bndstx32(cpu_env, cpu_A0, cpu_T0,
7724 cpu_bndl[reg], cpu_bndu[reg]);
7725 }
149b427b
RH
7726 }
7727 }
7728 gen_nop_modrm(env, s, modrm);
7729 break;
62b58ba5 7730 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
0af10c86
BS
7731 modrm = cpu_ldub_code(env, s->pc++);
7732 gen_nop_modrm(env, s, modrm);
e17a36ce 7733 break;
2c0262af
FB
7734 case 0x120: /* mov reg, crN */
7735 case 0x122: /* mov crN, reg */
7736 if (s->cpl != 0) {
7737 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7738 } else {
0af10c86 7739 modrm = cpu_ldub_code(env, s->pc++);
5c73b757
MO
7740 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7741 * AMD documentation (24594.pdf) and testing of
7742 * intel 386 and 486 processors all show that the mod bits
7743 * are assumed to be 1's, regardless of actual values.
7744 */
14ce26e7
FB
7745 rm = (modrm & 7) | REX_B(s);
7746 reg = ((modrm >> 3) & 7) | rex_r;
7747 if (CODE64(s))
4ba9938c 7748 ot = MO_64;
14ce26e7 7749 else
4ba9938c 7750 ot = MO_32;
ccd59d09
AP
7751 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7752 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7753 reg = 8;
7754 }
2c0262af
FB
7755 switch(reg) {
7756 case 0:
7757 case 2:
7758 case 3:
7759 case 4:
9230e66e 7760 case 8:
773cdfcc 7761 gen_update_cc_op(s);
872929aa 7762 gen_jmp_im(pc_start - s->cs_base);
2c0262af 7763 if (b & 2) {
1d1cc4d0 7764 gen_op_mov_v_reg(ot, cpu_T0, rm);
4a7443be 7765 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
1d1cc4d0 7766 cpu_T0);
14ce26e7 7767 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7768 gen_eob(s);
7769 } else {
1d1cc4d0
RH
7770 gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
7771 gen_op_mov_reg_v(ot, rm, cpu_T0);
2c0262af
FB
7772 }
7773 break;
7774 default:
b9f9c5b4 7775 goto unknown_op;
2c0262af
FB
7776 }
7777 }
7778 break;
7779 case 0x121: /* mov reg, drN */
7780 case 0x123: /* mov drN, reg */
7781 if (s->cpl != 0) {
7782 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7783 } else {
0af10c86 7784 modrm = cpu_ldub_code(env, s->pc++);
5c73b757
MO
7785 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7786 * AMD documentation (24594.pdf) and testing of
7787 * intel 386 and 486 processors all show that the mod bits
7788 * are assumed to be 1's, regardless of actual values.
7789 */
14ce26e7
FB
7790 rm = (modrm & 7) | REX_B(s);
7791 reg = ((modrm >> 3) & 7) | rex_r;
7792 if (CODE64(s))
4ba9938c 7793 ot = MO_64;
14ce26e7 7794 else
4ba9938c 7795 ot = MO_32;
d0052339 7796 if (reg >= 8) {
2c0262af 7797 goto illegal_op;
d0052339 7798 }
2c0262af 7799 if (b & 2) {
0573fbfc 7800 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
1d1cc4d0 7801 gen_op_mov_v_reg(ot, cpu_T0, rm);
d0052339 7802 tcg_gen_movi_i32(cpu_tmp2_i32, reg);
1d1cc4d0 7803 gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T0);
14ce26e7 7804 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7805 gen_eob(s);
7806 } else {
0573fbfc 7807 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
d0052339 7808 tcg_gen_movi_i32(cpu_tmp2_i32, reg);
1d1cc4d0
RH
7809 gen_helper_get_dr(cpu_T0, cpu_env, cpu_tmp2_i32);
7810 gen_op_mov_reg_v(ot, rm, cpu_T0);
2c0262af
FB
7811 }
7812 }
7813 break;
7814 case 0x106: /* clts */
7815 if (s->cpl != 0) {
7816 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7817 } else {
0573fbfc 7818 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
f0967a1a 7819 gen_helper_clts(cpu_env);
7eee2a50 7820 /* abort block because static cpu state changed */
14ce26e7 7821 gen_jmp_im(s->pc - s->cs_base);
7eee2a50 7822 gen_eob(s);
2c0262af
FB
7823 }
7824 break;
222a3336 7825 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
664e0f19
FB
7826 case 0x1c3: /* MOVNTI reg, mem */
7827 if (!(s->cpuid_features & CPUID_SSE2))
14ce26e7 7828 goto illegal_op;
ab4e4aec 7829 ot = mo_64_32(dflag);
0af10c86 7830 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
7831 mod = (modrm >> 6) & 3;
7832 if (mod == 3)
7833 goto illegal_op;
7834 reg = ((modrm >> 3) & 7) | rex_r;
7835 /* generate a generic store */
0af10c86 7836 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
14ce26e7 7837 break;
664e0f19 7838 case 0x1ae:
0af10c86 7839 modrm = cpu_ldub_code(env, s->pc++);
121f3157 7840 switch (modrm) {
880f8486 7841 CASE_MODRM_MEM_OP(0): /* fxsave */
121f3157
RH
7842 if (!(s->cpuid_features & CPUID_FXSR)
7843 || (prefixes & PREFIX_LOCK)) {
14ce26e7 7844 goto illegal_op;
121f3157 7845 }
09d85fb8 7846 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
0fd14b72
FB
7847 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7848 break;
7849 }
4eeb3939 7850 gen_lea_modrm(env, s, modrm);
64dbaff0 7851 gen_helper_fxsave(cpu_env, cpu_A0);
664e0f19 7852 break;
121f3157 7853
880f8486 7854 CASE_MODRM_MEM_OP(1): /* fxrstor */
121f3157
RH
7855 if (!(s->cpuid_features & CPUID_FXSR)
7856 || (prefixes & PREFIX_LOCK)) {
14ce26e7 7857 goto illegal_op;
121f3157 7858 }
09d85fb8 7859 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
0fd14b72
FB
7860 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7861 break;
7862 }
4eeb3939 7863 gen_lea_modrm(env, s, modrm);
64dbaff0 7864 gen_helper_fxrstor(cpu_env, cpu_A0);
664e0f19 7865 break;
121f3157 7866
880f8486 7867 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
121f3157
RH
7868 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
7869 goto illegal_op;
7870 }
664e0f19
FB
7871 if (s->flags & HF_TS_MASK) {
7872 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7873 break;
14ce26e7 7874 }
4eeb3939 7875 gen_lea_modrm(env, s, modrm);
121f3157
RH
7876 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL);
7877 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
664e0f19 7878 break;
121f3157 7879
880f8486 7880 CASE_MODRM_MEM_OP(3): /* stmxcsr */
121f3157 7881 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
664e0f19 7882 goto illegal_op;
121f3157
RH
7883 }
7884 if (s->flags & HF_TS_MASK) {
7885 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7886 break;
7887 }
7888 gen_lea_modrm(env, s, modrm);
7889 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr));
7890 gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
664e0f19 7891 break;
121f3157 7892
880f8486 7893 CASE_MODRM_MEM_OP(4): /* xsave */
19dc85db
RH
7894 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7895 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
7896 | PREFIX_REPZ | PREFIX_REPNZ))) {
7897 goto illegal_op;
7898 }
7899 gen_lea_modrm(env, s, modrm);
7900 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7901 cpu_regs[R_EDX]);
7902 gen_helper_xsave(cpu_env, cpu_A0, cpu_tmp1_i64);
7903 break;
7904
880f8486 7905 CASE_MODRM_MEM_OP(5): /* xrstor */
19dc85db
RH
7906 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7907 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
7908 | PREFIX_REPZ | PREFIX_REPNZ))) {
7909 goto illegal_op;
7910 }
7911 gen_lea_modrm(env, s, modrm);
7912 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7913 cpu_regs[R_EDX]);
7914 gen_helper_xrstor(cpu_env, cpu_A0, cpu_tmp1_i64);
f4f1110e
RH
7915 /* XRSTOR is how MPX is enabled, which changes how
7916 we translate. Thus we need to end the TB. */
7917 gen_update_cc_op(s);
7918 gen_jmp_im(s->pc - s->cs_base);
7919 gen_eob(s);
19dc85db
RH
7920 break;
7921
880f8486 7922 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
121f3157
RH
7923 if (prefixes & PREFIX_LOCK) {
7924 goto illegal_op;
7925 }
7926 if (prefixes & PREFIX_DATA) {
5e1fac2d 7927 /* clwb */
121f3157 7928 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
5e1fac2d 7929 goto illegal_op;
121f3157 7930 }
5e1fac2d 7931 gen_nop_modrm(env, s, modrm);
c9cfe8f9
RH
7932 } else {
7933 /* xsaveopt */
7934 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7935 || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
7936 || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
7937 goto illegal_op;
7938 }
7939 gen_lea_modrm(env, s, modrm);
7940 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7941 cpu_regs[R_EDX]);
7942 gen_helper_xsaveopt(cpu_env, cpu_A0, cpu_tmp1_i64);
121f3157 7943 }
c9cfe8f9 7944 break;
121f3157 7945
880f8486 7946 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
121f3157
RH
7947 if (prefixes & PREFIX_LOCK) {
7948 goto illegal_op;
7949 }
7950 if (prefixes & PREFIX_DATA) {
7951 /* clflushopt */
7952 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
7953 goto illegal_op;
7954 }
5e1fac2d 7955 } else {
121f3157
RH
7956 /* clflush */
7957 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
7958 || !(s->cpuid_features & CPUID_CLFLUSH)) {
5e1fac2d 7959 goto illegal_op;
121f3157 7960 }
5e1fac2d 7961 }
121f3157 7962 gen_nop_modrm(env, s, modrm);
5e1fac2d 7963 break;
121f3157 7964
07929f2a
RH
7965 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
7966 case 0xc8 ... 0xc8: /* rdgsbase (f3 0f ae /1) */
7967 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
7968 case 0xd8 ... 0xd8: /* wrgsbase (f3 0f ae /3) */
7969 if (CODE64(s)
7970 && (prefixes & PREFIX_REPZ)
7971 && !(prefixes & PREFIX_LOCK)
7972 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
7973 TCGv base, treg, src, dst;
7974
7975 /* Preserve hflags bits by testing CR4 at runtime. */
7976 tcg_gen_movi_i32(cpu_tmp2_i32, CR4_FSGSBASE_MASK);
7977 gen_helper_cr4_testbit(cpu_env, cpu_tmp2_i32);
7978
7979 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
7980 treg = cpu_regs[(modrm & 7) | REX_B(s)];
7981
7982 if (modrm & 0x10) {
7983 /* wr*base */
7984 dst = base, src = treg;
7985 } else {
7986 /* rd*base */
7987 dst = treg, src = base;
7988 }
7989
7990 if (s->dflag == MO_32) {
7991 tcg_gen_ext32u_tl(dst, src);
7992 } else {
7993 tcg_gen_mov_tl(dst, src);
7994 }
7995 break;
7996 }
b9f9c5b4 7997 goto unknown_op;
07929f2a 7998
121f3157
RH
7999 case 0xf8: /* sfence / pcommit */
8000 if (prefixes & PREFIX_DATA) {
8001 /* pcommit */
8002 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
8003 || (prefixes & PREFIX_LOCK)) {
8004 goto illegal_op;
891bc821 8005 }
121f3157
RH
8006 break;
8007 }
8008 /* fallthru */
8009 case 0xf9 ... 0xff: /* sfence */
8010 case 0xe8 ... 0xef: /* lfence */
8011 case 0xf0 ... 0xf7: /* mfence */
8012 if (!(s->cpuid_features & CPUID_SSE2)
8013 || (prefixes & PREFIX_LOCK)) {
8014 goto illegal_op;
8f091a59
FB
8015 }
8016 break;
121f3157 8017
664e0f19 8018 default:
b9f9c5b4 8019 goto unknown_op;
14ce26e7
FB
8020 }
8021 break;
121f3157 8022
a35f3ec7 8023 case 0x10d: /* 3DNow! prefetch(w) */
0af10c86 8024 modrm = cpu_ldub_code(env, s->pc++);
a35f3ec7
AJ
8025 mod = (modrm >> 6) & 3;
8026 if (mod == 3)
8027 goto illegal_op;
26317698 8028 gen_nop_modrm(env, s, modrm);
8f091a59 8029 break;
3b21e03e 8030 case 0x1aa: /* rsm */
872929aa 8031 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
3b21e03e
FB
8032 if (!(s->flags & HF_SMM_MASK))
8033 goto illegal_op;
728d803b 8034 gen_update_cc_op(s);
3b21e03e 8035 gen_jmp_im(s->pc - s->cs_base);
608badfc 8036 gen_helper_rsm(cpu_env);
3b21e03e
FB
8037 gen_eob(s);
8038 break;
222a3336
AZ
8039 case 0x1b8: /* SSE4.2 popcnt */
8040 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
8041 PREFIX_REPZ)
8042 goto illegal_op;
8043 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
8044 goto illegal_op;
8045
0af10c86 8046 modrm = cpu_ldub_code(env, s->pc++);
8b4a3df8 8047 reg = ((modrm >> 3) & 7) | rex_r;
222a3336 8048
ab4e4aec 8049 if (s->prefix & PREFIX_DATA) {
4ba9938c 8050 ot = MO_16;
ab4e4aec
RH
8051 } else {
8052 ot = mo_64_32(dflag);
8053 }
222a3336 8054
0af10c86 8055 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
1d1cc4d0
RH
8056 gen_helper_popcnt(cpu_T0, cpu_env, cpu_T0, tcg_const_i32(ot));
8057 gen_op_mov_reg_v(ot, reg, cpu_T0);
fdb0d09d 8058
3ca51d07 8059 set_cc_op(s, CC_OP_EFLAGS);
222a3336 8060 break;
a35f3ec7
AJ
8061 case 0x10e ... 0x10f:
8062 /* 3DNow! instructions, ignore prefixes */
8063 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
664e0f19
FB
8064 case 0x110 ... 0x117:
8065 case 0x128 ... 0x12f:
4242b1bd 8066 case 0x138 ... 0x13a:
d9f4bb27 8067 case 0x150 ... 0x179:
664e0f19
FB
8068 case 0x17c ... 0x17f:
8069 case 0x1c2:
8070 case 0x1c4 ... 0x1c6:
8071 case 0x1d0 ... 0x1fe:
0af10c86 8072 gen_sse(env, s, b, pc_start, rex_r);
664e0f19 8073 break;
2c0262af 8074 default:
b9f9c5b4 8075 goto unknown_op;
2c0262af
FB
8076 }
8077 /* lock generation */
8078 if (s->prefix & PREFIX_LOCK)
a7812ae4 8079 gen_helper_unlock();
2c0262af
FB
8080 return s->pc;
8081 illegal_op:
ab1f142b 8082 if (s->prefix & PREFIX_LOCK)
a7812ae4 8083 gen_helper_unlock();
2c0262af 8084 /* XXX: ensure that no lock was generated */
b9f9c5b4
RH
8085 gen_illegal_opcode(s);
8086 return s->pc;
8087 unknown_op:
8088 if (s->prefix & PREFIX_LOCK)
8089 gen_helper_unlock();
8090 /* XXX: ensure that no lock was generated */
8091 gen_unknown_opcode(env, s);
2c0262af
FB
8092 return s->pc;
8093}
8094
63618b4e 8095void tcg_x86_init(void)
2c0262af 8096{
fac0aff9
RH
8097 static const char reg_names[CPU_NB_REGS][4] = {
8098#ifdef TARGET_X86_64
8099 [R_EAX] = "rax",
8100 [R_EBX] = "rbx",
8101 [R_ECX] = "rcx",
8102 [R_EDX] = "rdx",
8103 [R_ESI] = "rsi",
8104 [R_EDI] = "rdi",
8105 [R_EBP] = "rbp",
8106 [R_ESP] = "rsp",
8107 [8] = "r8",
8108 [9] = "r9",
8109 [10] = "r10",
8110 [11] = "r11",
8111 [12] = "r12",
8112 [13] = "r13",
8113 [14] = "r14",
8114 [15] = "r15",
8115#else
8116 [R_EAX] = "eax",
8117 [R_EBX] = "ebx",
8118 [R_ECX] = "ecx",
8119 [R_EDX] = "edx",
8120 [R_ESI] = "esi",
8121 [R_EDI] = "edi",
8122 [R_EBP] = "ebp",
8123 [R_ESP] = "esp",
8124#endif
8125 };
3558f805
RH
8126 static const char seg_base_names[6][8] = {
8127 [R_CS] = "cs_base",
8128 [R_DS] = "ds_base",
8129 [R_ES] = "es_base",
8130 [R_FS] = "fs_base",
8131 [R_GS] = "gs_base",
8132 [R_SS] = "ss_base",
8133 };
149b427b
RH
8134 static const char bnd_regl_names[4][8] = {
8135 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
8136 };
8137 static const char bnd_regu_names[4][8] = {
8138 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
8139 };
fac0aff9
RH
8140 int i;
8141
a7812ae4 8142 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
e1ccc054 8143 cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
317ac620 8144 offsetof(CPUX86State, cc_op), "cc_op");
e1ccc054 8145 cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst),
a7812ae4 8146 "cc_dst");
e1ccc054 8147 cpu_cc_src = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src),
a3251186 8148 "cc_src");
e1ccc054 8149 cpu_cc_src2 = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src2),
988c3eb0 8150 "cc_src2");
437a88a5 8151
fac0aff9 8152 for (i = 0; i < CPU_NB_REGS; ++i) {
e1ccc054 8153 cpu_regs[i] = tcg_global_mem_new(cpu_env,
fac0aff9
RH
8154 offsetof(CPUX86State, regs[i]),
8155 reg_names[i]);
8156 }
677ef623 8157
3558f805
RH
8158 for (i = 0; i < 6; ++i) {
8159 cpu_seg_base[i]
8160 = tcg_global_mem_new(cpu_env,
8161 offsetof(CPUX86State, segs[i].base),
8162 seg_base_names[i]);
8163 }
8164
149b427b
RH
8165 for (i = 0; i < 4; ++i) {
8166 cpu_bndl[i]
8167 = tcg_global_mem_new_i64(cpu_env,
8168 offsetof(CPUX86State, bnd_regs[i].lb),
8169 bnd_regl_names[i]);
8170 cpu_bndu[i]
8171 = tcg_global_mem_new_i64(cpu_env,
8172 offsetof(CPUX86State, bnd_regs[i].ub),
8173 bnd_regu_names[i]);
8174 }
8175
677ef623 8176 helper_lock_init();
2c0262af
FB
8177}
8178
20157705 8179/* generate intermediate code for basic block 'tb'. */
4e5e1215 8180void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
2c0262af 8181{
4e5e1215 8182 X86CPU *cpu = x86_env_get_cpu(env);
ed2803da 8183 CPUState *cs = CPU(cpu);
2c0262af 8184 DisasContext dc1, *dc = &dc1;
14ce26e7 8185 target_ulong pc_ptr;
89fee74a 8186 uint32_t flags;
14ce26e7
FB
8187 target_ulong pc_start;
8188 target_ulong cs_base;
2e70f6ef
PB
8189 int num_insns;
8190 int max_insns;
3b46e624 8191
2c0262af 8192 /* generate intermediate code */
14ce26e7
FB
8193 pc_start = tb->pc;
8194 cs_base = tb->cs_base;
2c0262af 8195 flags = tb->flags;
3a1d9b8b 8196
4f31916f 8197 dc->pe = (flags >> HF_PE_SHIFT) & 1;
2c0262af
FB
8198 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
8199 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
8200 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
8201 dc->f_st = 0;
8202 dc->vm86 = (flags >> VM_SHIFT) & 1;
8203 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
8204 dc->iopl = (flags >> IOPL_SHIFT) & 3;
8205 dc->tf = (flags >> TF_SHIFT) & 1;
ed2803da 8206 dc->singlestep_enabled = cs->singlestep_enabled;
2c0262af 8207 dc->cc_op = CC_OP_DYNAMIC;
e207582f 8208 dc->cc_op_dirty = false;
2c0262af
FB
8209 dc->cs_base = cs_base;
8210 dc->tb = tb;
8211 dc->popl_esp_hack = 0;
8212 /* select memory access functions */
8213 dc->mem_index = 0;
8214 if (flags & HF_SOFTMMU_MASK) {
97ed5ccd 8215 dc->mem_index = cpu_mmu_index(env, false);
2c0262af 8216 }
0514ef2f
EH
8217 dc->cpuid_features = env->features[FEAT_1_EDX];
8218 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
8219 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
8220 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
8221 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
c9cfe8f9 8222 dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
14ce26e7
FB
8223#ifdef TARGET_X86_64
8224 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
8225 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
8226#endif
7eee2a50 8227 dc->flags = flags;
ed2803da 8228 dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
a2cc3b24 8229 (flags & HF_INHIBIT_IRQ_MASK)
415fa2ea 8230#ifndef CONFIG_SOFTMMU
2c0262af
FB
8231 || (flags & HF_SOFTMMU_MASK)
8232#endif
8233 );
c4d4525c
PD
8234 /* Do not optimize repz jumps at all in icount mode, because
8235 rep movsS instructions are execured with different paths
8236 in !repz_opt and repz_opt modes. The first one was used
8237 always except single step mode. And this setting
8238 disables jumps optimization and control paths become
8239 equivalent in run and single step modes.
8240 Now there will be no jump optimization for repz in
8241 record/replay modes and there will always be an
8242 additional step for ecx=0 when icount is enabled.
8243 */
bd79255d 8244 dc->repz_opt = !dc->jmp_opt && !(tb->cflags & CF_USE_ICOUNT);
4f31916f
FB
8245#if 0
8246 /* check addseg logic */
dc196a57 8247 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
4f31916f
FB
8248 printf("ERROR addseg\n");
8249#endif
8250
1d1cc4d0
RH
8251 cpu_T0 = tcg_temp_new();
8252 cpu_T1 = tcg_temp_new();
a7812ae4 8253 cpu_A0 = tcg_temp_new();
a7812ae4
PB
8254
8255 cpu_tmp0 = tcg_temp_new();
8256 cpu_tmp1_i64 = tcg_temp_new_i64();
8257 cpu_tmp2_i32 = tcg_temp_new_i32();
8258 cpu_tmp3_i32 = tcg_temp_new_i32();
8259 cpu_tmp4 = tcg_temp_new();
a7812ae4
PB
8260 cpu_ptr0 = tcg_temp_new_ptr();
8261 cpu_ptr1 = tcg_temp_new_ptr();
a3251186 8262 cpu_cc_srcT = tcg_temp_local_new();
57fec1fe 8263
2c0262af
FB
8264 dc->is_jmp = DISAS_NEXT;
8265 pc_ptr = pc_start;
2e70f6ef
PB
8266 num_insns = 0;
8267 max_insns = tb->cflags & CF_COUNT_MASK;
190ce7fb 8268 if (max_insns == 0) {
2e70f6ef 8269 max_insns = CF_COUNT_MASK;
190ce7fb
RH
8270 }
8271 if (max_insns > TCG_MAX_INSNS) {
8272 max_insns = TCG_MAX_INSNS;
8273 }
2c0262af 8274
cd42d5b2 8275 gen_tb_start(tb);
2c0262af 8276 for(;;) {
2066d095 8277 tcg_gen_insn_start(pc_ptr, dc->cc_op);
959082fc 8278 num_insns++;
667b8e29 8279
b933066a
RH
8280 /* If RF is set, suppress an internally generated breakpoint. */
8281 if (unlikely(cpu_breakpoint_test(cs, pc_ptr,
8282 tb->flags & HF_RF_MASK
8283 ? BP_GDB : BP_ANY))) {
8284 gen_debug(dc, pc_ptr - dc->cs_base);
522a0d4e
RH
8285 /* The address covered by the breakpoint must be included in
8286 [tb->pc, tb->pc + tb->size) in order to for it to be
8287 properly cleared -- thus we increment the PC here so that
8288 the logic setting tb->size below does the right thing. */
8289 pc_ptr += 1;
b933066a
RH
8290 goto done_generating;
8291 }
959082fc 8292 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 8293 gen_io_start();
959082fc 8294 }
2e70f6ef 8295
0af10c86 8296 pc_ptr = disas_insn(env, dc, pc_ptr);
2c0262af
FB
8297 /* stop translation if indicated */
8298 if (dc->is_jmp)
8299 break;
8300 /* if single step mode, we generate only one instruction and
8301 generate an exception */
a2cc3b24
FB
8302 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8303 the flag and abort the translation to give the irqs a
8304 change to be happen */
5fafdf24 8305 if (dc->tf || dc->singlestep_enabled ||
2e70f6ef 8306 (flags & HF_INHIBIT_IRQ_MASK)) {
14ce26e7 8307 gen_jmp_im(pc_ptr - dc->cs_base);
2c0262af
FB
8308 gen_eob(dc);
8309 break;
8310 }
5b9efc39
PD
8311 /* Do not cross the boundary of the pages in icount mode,
8312 it can cause an exception. Do it only when boundary is
8313 crossed by the first instruction in the block.
8314 If current instruction already crossed the bound - it's ok,
8315 because an exception hasn't stopped this code.
8316 */
bd79255d 8317 if ((tb->cflags & CF_USE_ICOUNT)
5b9efc39
PD
8318 && ((pc_ptr & TARGET_PAGE_MASK)
8319 != ((pc_ptr + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK)
8320 || (pc_ptr & ~TARGET_PAGE_MASK) == 0)) {
8321 gen_jmp_im(pc_ptr - dc->cs_base);
8322 gen_eob(dc);
8323 break;
8324 }
2c0262af 8325 /* if too long translation, stop generation too */
fe700adb 8326 if (tcg_op_buf_full() ||
2e70f6ef
PB
8327 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8328 num_insns >= max_insns) {
14ce26e7 8329 gen_jmp_im(pc_ptr - dc->cs_base);
2c0262af
FB
8330 gen_eob(dc);
8331 break;
8332 }
1b530a6d
AJ
8333 if (singlestep) {
8334 gen_jmp_im(pc_ptr - dc->cs_base);
8335 gen_eob(dc);
8336 break;
8337 }
2c0262af 8338 }
2e70f6ef
PB
8339 if (tb->cflags & CF_LAST_IO)
8340 gen_io_end();
e64e3535 8341done_generating:
806f352d 8342 gen_tb_end(tb, num_insns);
0a7df5da 8343
2c0262af 8344#ifdef DEBUG_DISAS
8fec2b8c 8345 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
14ce26e7 8346 int disas_flags;
93fcfe39
AL
8347 qemu_log("----------------\n");
8348 qemu_log("IN: %s\n", lookup_symbol(pc_start));
14ce26e7
FB
8349#ifdef TARGET_X86_64
8350 if (dc->code64)
8351 disas_flags = 2;
8352 else
8353#endif
8354 disas_flags = !dc->code32;
d49190c4 8355 log_target_disas(cs, pc_start, pc_ptr - pc_start, disas_flags);
93fcfe39 8356 qemu_log("\n");
2c0262af
FB
8357 }
8358#endif
8359
4e5e1215
RH
8360 tb->size = pc_ptr - pc_start;
8361 tb->icount = num_insns;
2c0262af
FB
8362}
8363
bad729e2
RH
8364void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
8365 target_ulong *data)
d2856f1a 8366{
bad729e2
RH
8367 int cc_op = data[1];
8368 env->eip = data[0] - tb->cs_base;
8369 if (cc_op != CC_OP_DYNAMIC) {
d2856f1a 8370 env->cc_op = cc_op;
bad729e2 8371 }
d2856f1a 8372}
This page took 2.624193 seconds and 4 git commands to generate.