]> Git Repo - qemu.git/blame - target-i386/translate.c
target-i386: exception handling for seg_helper functions
[qemu.git] / target-i386 / translate.c
CommitLineData
2c0262af
FB
1/*
2 * i386 translation
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
18 */
19#include <stdarg.h>
20#include <stdlib.h>
21#include <stdio.h>
22#include <string.h>
23#include <inttypes.h>
2c0262af 24
bec93d72 25#include "qemu/host-utils.h"
2c0262af 26#include "cpu.h"
76cad711 27#include "disas/disas.h"
57fec1fe 28#include "tcg-op.h"
f08b6170 29#include "exec/cpu_ldst.h"
2c0262af 30
2ef6175a
RH
31#include "exec/helper-proto.h"
32#include "exec/helper-gen.h"
a7812ae4 33
a7e30d84
LV
34#include "trace-tcg.h"
35
36
2c0262af
FB
37#define PREFIX_REPZ 0x01
38#define PREFIX_REPNZ 0x02
39#define PREFIX_LOCK 0x04
40#define PREFIX_DATA 0x08
41#define PREFIX_ADR 0x10
701ed211 42#define PREFIX_VEX 0x20
2c0262af 43
14ce26e7 44#ifdef TARGET_X86_64
14ce26e7
FB
45#define CODE64(s) ((s)->code64)
46#define REX_X(s) ((s)->rex_x)
47#define REX_B(s) ((s)->rex_b)
14ce26e7 48#else
14ce26e7
FB
49#define CODE64(s) 0
50#define REX_X(s) 0
51#define REX_B(s) 0
52#endif
53
bec93d72
RH
54#ifdef TARGET_X86_64
55# define ctztl ctz64
56# define clztl clz64
57#else
58# define ctztl ctz32
59# define clztl clz32
60#endif
61
57fec1fe
FB
62//#define MACRO_TEST 1
63
57fec1fe 64/* global register indexes */
a7812ae4 65static TCGv_ptr cpu_env;
a3251186 66static TCGv cpu_A0;
988c3eb0 67static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
a7812ae4 68static TCGv_i32 cpu_cc_op;
cc739bb0 69static TCGv cpu_regs[CPU_NB_REGS];
1e4840bf 70/* local temps */
3b9d3cf1 71static TCGv cpu_T[2];
57fec1fe 72/* local register indexes (only used inside old micro ops) */
a7812ae4
PB
73static TCGv cpu_tmp0, cpu_tmp4;
74static TCGv_ptr cpu_ptr0, cpu_ptr1;
75static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
76static TCGv_i64 cpu_tmp1_i64;
57fec1fe 77
1a7ff922
PB
78static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
79
022c62cb 80#include "exec/gen-icount.h"
2e70f6ef 81
57fec1fe
FB
82#ifdef TARGET_X86_64
83static int x86_64_hregs;
ae063a68
FB
84#endif
85
2c0262af
FB
86typedef struct DisasContext {
87 /* current insn context */
88 int override; /* -1 if no override */
89 int prefix;
1d71ddb1 90 TCGMemOp aflag;
ab4e4aec 91 TCGMemOp dflag;
14ce26e7 92 target_ulong pc; /* pc = eip + cs_base */
2c0262af
FB
93 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
94 static state change (stop translation) */
95 /* current block context */
14ce26e7 96 target_ulong cs_base; /* base of CS segment */
2c0262af
FB
97 int pe; /* protected mode */
98 int code32; /* 32 bit code segment */
14ce26e7
FB
99#ifdef TARGET_X86_64
100 int lma; /* long mode active */
101 int code64; /* 64 bit code segment */
102 int rex_x, rex_b;
103#endif
701ed211
RH
104 int vex_l; /* vex vector length */
105 int vex_v; /* vex vvvv register, without 1's compliment. */
2c0262af 106 int ss32; /* 32 bit stack segment */
fee71888 107 CCOp cc_op; /* current CC operation */
e207582f 108 bool cc_op_dirty;
2c0262af
FB
109 int addseg; /* non zero if either DS/ES/SS have a non zero base */
110 int f_st; /* currently unused */
111 int vm86; /* vm86 mode */
112 int cpl;
113 int iopl;
114 int tf; /* TF cpu flag */
34865134 115 int singlestep_enabled; /* "hardware" single step enabled */
2c0262af 116 int jmp_opt; /* use direct block chaining for direct jumps */
c4d4525c 117 int repz_opt; /* optimize jumps within repz instructions */
2c0262af 118 int mem_index; /* select memory access functions */
c068688b 119 uint64_t flags; /* all execution flags */
2c0262af
FB
120 struct TranslationBlock *tb;
121 int popl_esp_hack; /* for correct popl with esp base handling */
14ce26e7
FB
122 int rip_offset; /* only used in x86_64, but left for simplicity */
123 int cpuid_features;
3d7374c5 124 int cpuid_ext_features;
e771edab 125 int cpuid_ext2_features;
12e26b75 126 int cpuid_ext3_features;
a9321a4d 127 int cpuid_7_0_ebx_features;
2c0262af
FB
128} DisasContext;
129
130static void gen_eob(DisasContext *s);
14ce26e7
FB
131static void gen_jmp(DisasContext *s, target_ulong eip);
132static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
d67dc9e6 133static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d);
2c0262af
FB
134
135/* i386 arith/logic operations */
136enum {
5fafdf24
TS
137 OP_ADDL,
138 OP_ORL,
139 OP_ADCL,
2c0262af 140 OP_SBBL,
5fafdf24
TS
141 OP_ANDL,
142 OP_SUBL,
143 OP_XORL,
2c0262af
FB
144 OP_CMPL,
145};
146
147/* i386 shift ops */
148enum {
5fafdf24
TS
149 OP_ROL,
150 OP_ROR,
151 OP_RCL,
152 OP_RCR,
153 OP_SHL,
154 OP_SHR,
2c0262af
FB
155 OP_SHL1, /* undocumented */
156 OP_SAR = 7,
157};
158
8e1c85e3
FB
159enum {
160 JCC_O,
161 JCC_B,
162 JCC_Z,
163 JCC_BE,
164 JCC_S,
165 JCC_P,
166 JCC_L,
167 JCC_LE,
168};
169
2c0262af
FB
170enum {
171 /* I386 int registers */
172 OR_EAX, /* MUST be even numbered */
173 OR_ECX,
174 OR_EDX,
175 OR_EBX,
176 OR_ESP,
177 OR_EBP,
178 OR_ESI,
179 OR_EDI,
14ce26e7
FB
180
181 OR_TMP0 = 16, /* temporary operand register */
2c0262af
FB
182 OR_TMP1,
183 OR_A0, /* temporary register used when doing address evaluation */
2c0262af
FB
184};
185
b666265b 186enum {
a3251186
RH
187 USES_CC_DST = 1,
188 USES_CC_SRC = 2,
988c3eb0
RH
189 USES_CC_SRC2 = 4,
190 USES_CC_SRCT = 8,
b666265b
RH
191};
192
193/* Bit set if the global variable is live after setting CC_OP to X. */
194static const uint8_t cc_op_live[CC_OP_NB] = {
988c3eb0 195 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
196 [CC_OP_EFLAGS] = USES_CC_SRC,
197 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
198 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
988c3eb0 199 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
a3251186 200 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
988c3eb0 201 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
b666265b
RH
202 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
203 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
204 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
205 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
206 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
bc4b43dc 207 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
cd7f97ca
RH
208 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
209 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
210 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
436ff2d2 211 [CC_OP_CLR] = 0,
b666265b
RH
212};
213
e207582f 214static void set_cc_op(DisasContext *s, CCOp op)
3ca51d07 215{
b666265b
RH
216 int dead;
217
218 if (s->cc_op == op) {
219 return;
220 }
221
222 /* Discard CC computation that will no longer be used. */
223 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
224 if (dead & USES_CC_DST) {
225 tcg_gen_discard_tl(cpu_cc_dst);
e207582f 226 }
b666265b
RH
227 if (dead & USES_CC_SRC) {
228 tcg_gen_discard_tl(cpu_cc_src);
229 }
988c3eb0
RH
230 if (dead & USES_CC_SRC2) {
231 tcg_gen_discard_tl(cpu_cc_src2);
232 }
a3251186
RH
233 if (dead & USES_CC_SRCT) {
234 tcg_gen_discard_tl(cpu_cc_srcT);
235 }
b666265b 236
e2f515cf
RH
237 if (op == CC_OP_DYNAMIC) {
238 /* The DYNAMIC setting is translator only, and should never be
239 stored. Thus we always consider it clean. */
240 s->cc_op_dirty = false;
241 } else {
242 /* Discard any computed CC_OP value (see shifts). */
243 if (s->cc_op == CC_OP_DYNAMIC) {
244 tcg_gen_discard_i32(cpu_cc_op);
245 }
246 s->cc_op_dirty = true;
247 }
b666265b 248 s->cc_op = op;
e207582f
RH
249}
250
e207582f
RH
251static void gen_update_cc_op(DisasContext *s)
252{
253 if (s->cc_op_dirty) {
773cdfcc 254 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
e207582f
RH
255 s->cc_op_dirty = false;
256 }
3ca51d07
RH
257}
258
14ce26e7
FB
259#ifdef TARGET_X86_64
260
261#define NB_OP_SIZES 4
262
14ce26e7
FB
263#else /* !TARGET_X86_64 */
264
265#define NB_OP_SIZES 3
266
14ce26e7
FB
267#endif /* !TARGET_X86_64 */
268
e2542fe2 269#if defined(HOST_WORDS_BIGENDIAN)
57fec1fe
FB
270#define REG_B_OFFSET (sizeof(target_ulong) - 1)
271#define REG_H_OFFSET (sizeof(target_ulong) - 2)
272#define REG_W_OFFSET (sizeof(target_ulong) - 2)
273#define REG_L_OFFSET (sizeof(target_ulong) - 4)
274#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
14ce26e7 275#else
57fec1fe
FB
276#define REG_B_OFFSET 0
277#define REG_H_OFFSET 1
278#define REG_W_OFFSET 0
279#define REG_L_OFFSET 0
280#define REG_LH_OFFSET 4
14ce26e7 281#endif
57fec1fe 282
96d7073f
PM
283/* In instruction encodings for byte register accesses the
284 * register number usually indicates "low 8 bits of register N";
285 * however there are some special cases where N 4..7 indicates
286 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
287 * true for this special case, false otherwise.
288 */
289static inline bool byte_reg_is_xH(int reg)
290{
291 if (reg < 4) {
292 return false;
293 }
294#ifdef TARGET_X86_64
295 if (reg >= 8 || x86_64_hregs) {
296 return false;
297 }
298#endif
299 return true;
300}
301
ab4e4aec
RH
302/* Select the size of a push/pop operation. */
303static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
304{
305 if (CODE64(s)) {
306 return ot == MO_16 ? MO_16 : MO_64;
307 } else {
308 return ot;
309 }
310}
311
312/* Select only size 64 else 32. Used for SSE operand sizes. */
313static inline TCGMemOp mo_64_32(TCGMemOp ot)
314{
315#ifdef TARGET_X86_64
316 return ot == MO_64 ? MO_64 : MO_32;
317#else
318 return MO_32;
319#endif
320}
321
322/* Select size 8 if lsb of B is clear, else OT. Used for decoding
323 byte vs word opcodes. */
324static inline TCGMemOp mo_b_d(int b, TCGMemOp ot)
325{
326 return b & 1 ? ot : MO_8;
327}
328
329/* Select size 8 if lsb of B is clear, else OT capped at 32.
330 Used for decoding operand size of port opcodes. */
331static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot)
332{
333 return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
334}
335
d67dc9e6 336static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGv t0)
57fec1fe
FB
337{
338 switch(ot) {
4ba9938c 339 case MO_8:
96d7073f 340 if (!byte_reg_is_xH(reg)) {
c832e3de 341 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
57fec1fe 342 } else {
c832e3de 343 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
57fec1fe
FB
344 }
345 break;
4ba9938c 346 case MO_16:
c832e3de 347 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
57fec1fe 348 break;
4ba9938c 349 case MO_32:
cc739bb0
LD
350 /* For x86_64, this sets the higher half of register to zero.
351 For i386, this is equivalent to a mov. */
352 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
57fec1fe 353 break;
cc739bb0 354#ifdef TARGET_X86_64
4ba9938c 355 case MO_64:
cc739bb0 356 tcg_gen_mov_tl(cpu_regs[reg], t0);
57fec1fe 357 break;
14ce26e7 358#endif
d67dc9e6
RH
359 default:
360 tcg_abort();
57fec1fe
FB
361 }
362}
2c0262af 363
d67dc9e6 364static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg)
57fec1fe 365{
4ba9938c 366 if (ot == MO_8 && byte_reg_is_xH(reg)) {
96d7073f
PM
367 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
368 tcg_gen_ext8u_tl(t0, t0);
369 } else {
cc739bb0 370 tcg_gen_mov_tl(t0, cpu_regs[reg]);
57fec1fe
FB
371 }
372}
373
374static inline void gen_op_movl_A0_reg(int reg)
375{
cc739bb0 376 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
57fec1fe
FB
377}
378
379static inline void gen_op_addl_A0_im(int32_t val)
380{
381 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
14ce26e7 382#ifdef TARGET_X86_64
57fec1fe 383 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
14ce26e7 384#endif
57fec1fe 385}
2c0262af 386
14ce26e7 387#ifdef TARGET_X86_64
57fec1fe
FB
388static inline void gen_op_addq_A0_im(int64_t val)
389{
390 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
391}
14ce26e7 392#endif
57fec1fe
FB
393
394static void gen_add_A0_im(DisasContext *s, int val)
395{
396#ifdef TARGET_X86_64
397 if (CODE64(s))
398 gen_op_addq_A0_im(val);
399 else
400#endif
401 gen_op_addl_A0_im(val);
402}
2c0262af 403
74bdfbda 404static inline void gen_op_jmp_v(TCGv dest)
57fec1fe 405{
74bdfbda 406 tcg_gen_st_tl(dest, cpu_env, offsetof(CPUX86State, eip));
57fec1fe
FB
407}
408
d3f4bbe3 409static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val)
57fec1fe 410{
d3f4bbe3
RH
411 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
412 gen_op_mov_reg_v(size, reg, cpu_tmp0);
57fec1fe
FB
413}
414
d3f4bbe3 415static inline void gen_op_add_reg_T0(TCGMemOp size, int reg)
57fec1fe 416{
d3f4bbe3
RH
417 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
418 gen_op_mov_reg_v(size, reg, cpu_tmp0);
6e0d8677 419}
57fec1fe 420
57fec1fe
FB
421static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
422{
cc739bb0
LD
423 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
424 if (shift != 0)
57fec1fe
FB
425 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
426 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
cc739bb0
LD
427 /* For x86_64, this sets the higher half of register to zero.
428 For i386, this is equivalent to a nop. */
429 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
57fec1fe 430}
2c0262af 431
57fec1fe
FB
432static inline void gen_op_movl_A0_seg(int reg)
433{
317ac620 434 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
57fec1fe 435}
2c0262af 436
7162ab21 437static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
57fec1fe 438{
317ac620 439 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
57fec1fe 440#ifdef TARGET_X86_64
7162ab21
VC
441 if (CODE64(s)) {
442 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
443 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
444 } else {
445 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
446 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
447 }
448#else
449 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
57fec1fe
FB
450#endif
451}
2c0262af 452
14ce26e7 453#ifdef TARGET_X86_64
57fec1fe
FB
454static inline void gen_op_movq_A0_seg(int reg)
455{
317ac620 456 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
57fec1fe 457}
14ce26e7 458
57fec1fe
FB
459static inline void gen_op_addq_A0_seg(int reg)
460{
317ac620 461 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
57fec1fe
FB
462 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
463}
464
465static inline void gen_op_movq_A0_reg(int reg)
466{
cc739bb0 467 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
57fec1fe
FB
468}
469
470static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
471{
cc739bb0
LD
472 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
473 if (shift != 0)
57fec1fe
FB
474 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
475 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
476}
14ce26e7
FB
477#endif
478
323d1876 479static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
57fec1fe 480{
3c5f4116 481 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
57fec1fe 482}
2c0262af 483
323d1876 484static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
57fec1fe 485{
3523e4bd 486 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
57fec1fe 487}
4f31916f 488
d4faa3e0
RH
489static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
490{
491 if (d == OR_TMP0) {
fd8ca9f6 492 gen_op_st_v(s, idx, cpu_T[0], cpu_A0);
d4faa3e0 493 } else {
480a762d 494 gen_op_mov_reg_v(idx, d, cpu_T[0]);
d4faa3e0
RH
495 }
496}
497
14ce26e7
FB
498static inline void gen_jmp_im(target_ulong pc)
499{
57fec1fe 500 tcg_gen_movi_tl(cpu_tmp0, pc);
74bdfbda 501 gen_op_jmp_v(cpu_tmp0);
14ce26e7
FB
502}
503
2c0262af
FB
504static inline void gen_string_movl_A0_ESI(DisasContext *s)
505{
506 int override;
507
508 override = s->override;
1d71ddb1 509 switch (s->aflag) {
14ce26e7 510#ifdef TARGET_X86_64
1d71ddb1 511 case MO_64:
14ce26e7 512 if (override >= 0) {
57fec1fe
FB
513 gen_op_movq_A0_seg(override);
514 gen_op_addq_A0_reg_sN(0, R_ESI);
14ce26e7 515 } else {
57fec1fe 516 gen_op_movq_A0_reg(R_ESI);
14ce26e7 517 }
1d71ddb1 518 break;
14ce26e7 519#endif
1d71ddb1 520 case MO_32:
2c0262af
FB
521 /* 32 bit address */
522 if (s->addseg && override < 0)
523 override = R_DS;
524 if (override >= 0) {
57fec1fe
FB
525 gen_op_movl_A0_seg(override);
526 gen_op_addl_A0_reg_sN(0, R_ESI);
2c0262af 527 } else {
57fec1fe 528 gen_op_movl_A0_reg(R_ESI);
2c0262af 529 }
1d71ddb1
RH
530 break;
531 case MO_16:
2c0262af
FB
532 /* 16 address, always override */
533 if (override < 0)
534 override = R_DS;
a7e5c7de 535 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESI]);
7162ab21 536 gen_op_addl_A0_seg(s, override);
1d71ddb1
RH
537 break;
538 default:
539 tcg_abort();
2c0262af
FB
540 }
541}
542
543static inline void gen_string_movl_A0_EDI(DisasContext *s)
544{
1d71ddb1 545 switch (s->aflag) {
14ce26e7 546#ifdef TARGET_X86_64
1d71ddb1 547 case MO_64:
57fec1fe 548 gen_op_movq_A0_reg(R_EDI);
1d71ddb1 549 break;
14ce26e7 550#endif
1d71ddb1 551 case MO_32:
2c0262af 552 if (s->addseg) {
57fec1fe
FB
553 gen_op_movl_A0_seg(R_ES);
554 gen_op_addl_A0_reg_sN(0, R_EDI);
2c0262af 555 } else {
57fec1fe 556 gen_op_movl_A0_reg(R_EDI);
2c0262af 557 }
1d71ddb1
RH
558 break;
559 case MO_16:
a7e5c7de 560 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_EDI]);
7162ab21 561 gen_op_addl_A0_seg(s, R_ES);
1d71ddb1
RH
562 break;
563 default:
564 tcg_abort();
2c0262af
FB
565 }
566}
567
d67dc9e6 568static inline void gen_op_movl_T0_Dshift(TCGMemOp ot)
6e0d8677 569{
317ac620 570 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
6e0d8677 571 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
2c0262af
FB
572};
573
d67dc9e6 574static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
6e0d8677 575{
d824df34 576 switch (size) {
4ba9938c 577 case MO_8:
d824df34
PB
578 if (sign) {
579 tcg_gen_ext8s_tl(dst, src);
580 } else {
581 tcg_gen_ext8u_tl(dst, src);
582 }
583 return dst;
4ba9938c 584 case MO_16:
d824df34
PB
585 if (sign) {
586 tcg_gen_ext16s_tl(dst, src);
587 } else {
588 tcg_gen_ext16u_tl(dst, src);
589 }
590 return dst;
591#ifdef TARGET_X86_64
4ba9938c 592 case MO_32:
d824df34
PB
593 if (sign) {
594 tcg_gen_ext32s_tl(dst, src);
595 } else {
596 tcg_gen_ext32u_tl(dst, src);
597 }
598 return dst;
599#endif
6e0d8677 600 default:
d824df34 601 return src;
6e0d8677
FB
602 }
603}
3b46e624 604
d67dc9e6 605static void gen_extu(TCGMemOp ot, TCGv reg)
d824df34
PB
606{
607 gen_ext_tl(reg, reg, ot, false);
608}
609
d67dc9e6 610static void gen_exts(TCGMemOp ot, TCGv reg)
6e0d8677 611{
d824df34 612 gen_ext_tl(reg, reg, ot, true);
6e0d8677 613}
2c0262af 614
42a268c2 615static inline void gen_op_jnz_ecx(TCGMemOp size, TCGLabel *label1)
6e0d8677 616{
cc739bb0 617 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
c92aa1ad 618 gen_extu(size, cpu_tmp0);
cb63669a 619 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
6e0d8677
FB
620}
621
42a268c2 622static inline void gen_op_jz_ecx(TCGMemOp size, TCGLabel *label1)
6e0d8677 623{
cc739bb0 624 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
c92aa1ad 625 gen_extu(size, cpu_tmp0);
cb63669a 626 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
6e0d8677 627}
2c0262af 628
d67dc9e6 629static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n)
a7812ae4
PB
630{
631 switch (ot) {
4ba9938c 632 case MO_8:
3f7d8464 633 gen_helper_inb(v, cpu_env, n);
93ab25d7 634 break;
4ba9938c 635 case MO_16:
3f7d8464 636 gen_helper_inw(v, cpu_env, n);
93ab25d7 637 break;
4ba9938c 638 case MO_32:
3f7d8464 639 gen_helper_inl(v, cpu_env, n);
93ab25d7 640 break;
d67dc9e6
RH
641 default:
642 tcg_abort();
a7812ae4 643 }
a7812ae4 644}
2c0262af 645
d67dc9e6 646static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
a7812ae4
PB
647{
648 switch (ot) {
4ba9938c 649 case MO_8:
3f7d8464 650 gen_helper_outb(cpu_env, v, n);
93ab25d7 651 break;
4ba9938c 652 case MO_16:
3f7d8464 653 gen_helper_outw(cpu_env, v, n);
93ab25d7 654 break;
4ba9938c 655 case MO_32:
3f7d8464 656 gen_helper_outl(cpu_env, v, n);
93ab25d7 657 break;
d67dc9e6
RH
658 default:
659 tcg_abort();
a7812ae4 660 }
a7812ae4 661}
f115e911 662
d67dc9e6 663static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
b8b6a50b 664 uint32_t svm_flags)
f115e911 665{
b8b6a50b
FB
666 target_ulong next_eip;
667
f115e911 668 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
b6abf97d 669 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 670 switch (ot) {
4ba9938c 671 case MO_8:
4a7443be
BS
672 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
673 break;
4ba9938c 674 case MO_16:
4a7443be
BS
675 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
676 break;
4ba9938c 677 case MO_32:
4a7443be
BS
678 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
679 break;
d67dc9e6
RH
680 default:
681 tcg_abort();
a7812ae4 682 }
b8b6a50b 683 }
872929aa 684 if(s->flags & HF_SVMI_MASK) {
100ec099
PD
685 gen_update_cc_op(s);
686 gen_jmp_im(cur_eip);
b8b6a50b
FB
687 svm_flags |= (1 << (4 + ot));
688 next_eip = s->pc - s->cs_base;
b6abf97d 689 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
052e80d5
BS
690 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
691 tcg_const_i32(svm_flags),
a7812ae4 692 tcg_const_i32(next_eip - cur_eip));
f115e911
FB
693 }
694}
695
d67dc9e6 696static inline void gen_movs(DisasContext *s, TCGMemOp ot)
2c0262af
FB
697{
698 gen_string_movl_A0_ESI(s);
909be183 699 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 700 gen_string_movl_A0_EDI(s);
fd8ca9f6 701 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6e0d8677 702 gen_op_movl_T0_Dshift(ot);
1d71ddb1
RH
703 gen_op_add_reg_T0(s->aflag, R_ESI);
704 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
705}
706
b6abf97d
FB
707static void gen_op_update1_cc(void)
708{
b6abf97d
FB
709 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
710}
711
712static void gen_op_update2_cc(void)
713{
714 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
715 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
716}
717
988c3eb0
RH
718static void gen_op_update3_cc(TCGv reg)
719{
720 tcg_gen_mov_tl(cpu_cc_src2, reg);
721 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
722 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
723}
724
b6abf97d
FB
725static inline void gen_op_testl_T0_T1_cc(void)
726{
b6abf97d
FB
727 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
728}
729
730static void gen_op_update_neg_cc(void)
731{
b6abf97d 732 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
a3251186
RH
733 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
734 tcg_gen_movi_tl(cpu_cc_srcT, 0);
b6abf97d
FB
735}
736
d229edce
RH
737/* compute all eflags to cc_src */
738static void gen_compute_eflags(DisasContext *s)
8e1c85e3 739{
988c3eb0 740 TCGv zero, dst, src1, src2;
db9f2597
RH
741 int live, dead;
742
d229edce
RH
743 if (s->cc_op == CC_OP_EFLAGS) {
744 return;
745 }
436ff2d2 746 if (s->cc_op == CC_OP_CLR) {
d2fe51bd 747 tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
436ff2d2
RH
748 set_cc_op(s, CC_OP_EFLAGS);
749 return;
750 }
db9f2597
RH
751
752 TCGV_UNUSED(zero);
753 dst = cpu_cc_dst;
754 src1 = cpu_cc_src;
988c3eb0 755 src2 = cpu_cc_src2;
db9f2597
RH
756
757 /* Take care to not read values that are not live. */
758 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
988c3eb0 759 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
db9f2597
RH
760 if (dead) {
761 zero = tcg_const_tl(0);
762 if (dead & USES_CC_DST) {
763 dst = zero;
764 }
765 if (dead & USES_CC_SRC) {
766 src1 = zero;
767 }
988c3eb0
RH
768 if (dead & USES_CC_SRC2) {
769 src2 = zero;
770 }
db9f2597
RH
771 }
772
773cdfcc 773 gen_update_cc_op(s);
988c3eb0 774 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
d229edce 775 set_cc_op(s, CC_OP_EFLAGS);
db9f2597
RH
776
777 if (dead) {
778 tcg_temp_free(zero);
779 }
8e1c85e3
FB
780}
781
bec93d72
RH
782typedef struct CCPrepare {
783 TCGCond cond;
784 TCGv reg;
785 TCGv reg2;
786 target_ulong imm;
787 target_ulong mask;
788 bool use_reg2;
789 bool no_setcond;
790} CCPrepare;
791
06847f1f 792/* compute eflags.C to reg */
bec93d72 793static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
06847f1f
RH
794{
795 TCGv t0, t1;
bec93d72 796 int size, shift;
06847f1f
RH
797
798 switch (s->cc_op) {
799 case CC_OP_SUBB ... CC_OP_SUBQ:
a3251186 800 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
06847f1f
RH
801 size = s->cc_op - CC_OP_SUBB;
802 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
803 /* If no temporary was used, be careful not to alias t1 and t0. */
804 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
a3251186 805 tcg_gen_mov_tl(t0, cpu_cc_srcT);
06847f1f
RH
806 gen_extu(size, t0);
807 goto add_sub;
808
809 case CC_OP_ADDB ... CC_OP_ADDQ:
810 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
811 size = s->cc_op - CC_OP_ADDB;
812 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
813 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
814 add_sub:
bec93d72
RH
815 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
816 .reg2 = t1, .mask = -1, .use_reg2 = true };
06847f1f 817
06847f1f 818 case CC_OP_LOGICB ... CC_OP_LOGICQ:
436ff2d2 819 case CC_OP_CLR:
bec93d72 820 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
06847f1f
RH
821
822 case CC_OP_INCB ... CC_OP_INCQ:
823 case CC_OP_DECB ... CC_OP_DECQ:
bec93d72
RH
824 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
825 .mask = -1, .no_setcond = true };
06847f1f
RH
826
827 case CC_OP_SHLB ... CC_OP_SHLQ:
828 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
829 size = s->cc_op - CC_OP_SHLB;
bec93d72
RH
830 shift = (8 << size) - 1;
831 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
832 .mask = (target_ulong)1 << shift };
06847f1f
RH
833
834 case CC_OP_MULB ... CC_OP_MULQ:
bec93d72
RH
835 return (CCPrepare) { .cond = TCG_COND_NE,
836 .reg = cpu_cc_src, .mask = -1 };
06847f1f 837
bc4b43dc
RH
838 case CC_OP_BMILGB ... CC_OP_BMILGQ:
839 size = s->cc_op - CC_OP_BMILGB;
840 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
841 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
842
cd7f97ca
RH
843 case CC_OP_ADCX:
844 case CC_OP_ADCOX:
845 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
846 .mask = -1, .no_setcond = true };
847
06847f1f
RH
848 case CC_OP_EFLAGS:
849 case CC_OP_SARB ... CC_OP_SARQ:
850 /* CC_SRC & 1 */
bec93d72
RH
851 return (CCPrepare) { .cond = TCG_COND_NE,
852 .reg = cpu_cc_src, .mask = CC_C };
06847f1f
RH
853
854 default:
855 /* The need to compute only C from CC_OP_DYNAMIC is important
856 in efficiently implementing e.g. INC at the start of a TB. */
857 gen_update_cc_op(s);
988c3eb0
RH
858 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
859 cpu_cc_src2, cpu_cc_op);
bec93d72
RH
860 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
861 .mask = -1, .no_setcond = true };
06847f1f
RH
862 }
863}
864
1608ecca 865/* compute eflags.P to reg */
bec93d72 866static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
1608ecca 867{
d229edce 868 gen_compute_eflags(s);
bec93d72
RH
869 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
870 .mask = CC_P };
1608ecca
PB
871}
872
873/* compute eflags.S to reg */
bec93d72 874static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1608ecca 875{
086c4077
RH
876 switch (s->cc_op) {
877 case CC_OP_DYNAMIC:
878 gen_compute_eflags(s);
879 /* FALLTHRU */
880 case CC_OP_EFLAGS:
cd7f97ca
RH
881 case CC_OP_ADCX:
882 case CC_OP_ADOX:
883 case CC_OP_ADCOX:
bec93d72
RH
884 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
885 .mask = CC_S };
436ff2d2
RH
886 case CC_OP_CLR:
887 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
086c4077
RH
888 default:
889 {
d67dc9e6 890 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
086c4077 891 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
bec93d72 892 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
086c4077 893 }
086c4077 894 }
1608ecca
PB
895}
896
897/* compute eflags.O to reg */
bec93d72 898static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1608ecca 899{
cd7f97ca
RH
900 switch (s->cc_op) {
901 case CC_OP_ADOX:
902 case CC_OP_ADCOX:
903 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
904 .mask = -1, .no_setcond = true };
436ff2d2
RH
905 case CC_OP_CLR:
906 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
cd7f97ca
RH
907 default:
908 gen_compute_eflags(s);
909 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
910 .mask = CC_O };
911 }
1608ecca
PB
912}
913
914/* compute eflags.Z to reg */
bec93d72 915static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1608ecca 916{
086c4077
RH
917 switch (s->cc_op) {
918 case CC_OP_DYNAMIC:
919 gen_compute_eflags(s);
920 /* FALLTHRU */
921 case CC_OP_EFLAGS:
cd7f97ca
RH
922 case CC_OP_ADCX:
923 case CC_OP_ADOX:
924 case CC_OP_ADCOX:
bec93d72
RH
925 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
926 .mask = CC_Z };
436ff2d2
RH
927 case CC_OP_CLR:
928 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
086c4077
RH
929 default:
930 {
d67dc9e6 931 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
086c4077 932 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
bec93d72 933 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
086c4077 934 }
bec93d72
RH
935 }
936}
937
c365395e
PB
938/* perform a conditional store into register 'reg' according to jump opcode
939 value 'b'. In the fast case, T0 is guaranted not to be used. */
276e6b5f 940static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
8e1c85e3 941{
d67dc9e6
RH
942 int inv, jcc_op, cond;
943 TCGMemOp size;
276e6b5f 944 CCPrepare cc;
c365395e
PB
945 TCGv t0;
946
947 inv = b & 1;
8e1c85e3 948 jcc_op = (b >> 1) & 7;
c365395e
PB
949
950 switch (s->cc_op) {
69d1aa31
RH
951 case CC_OP_SUBB ... CC_OP_SUBQ:
952 /* We optimize relational operators for the cmp/jcc case. */
c365395e
PB
953 size = s->cc_op - CC_OP_SUBB;
954 switch (jcc_op) {
955 case JCC_BE:
a3251186 956 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
c365395e
PB
957 gen_extu(size, cpu_tmp4);
958 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
276e6b5f
RH
959 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
960 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 961 break;
8e1c85e3 962
c365395e 963 case JCC_L:
276e6b5f 964 cond = TCG_COND_LT;
c365395e
PB
965 goto fast_jcc_l;
966 case JCC_LE:
276e6b5f 967 cond = TCG_COND_LE;
c365395e 968 fast_jcc_l:
a3251186 969 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
c365395e
PB
970 gen_exts(size, cpu_tmp4);
971 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
276e6b5f
RH
972 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
973 .reg2 = t0, .mask = -1, .use_reg2 = true };
c365395e 974 break;
8e1c85e3 975
c365395e 976 default:
8e1c85e3 977 goto slow_jcc;
c365395e 978 }
8e1c85e3 979 break;
c365395e 980
8e1c85e3
FB
981 default:
982 slow_jcc:
69d1aa31
RH
983 /* This actually generates good code for JC, JZ and JS. */
984 switch (jcc_op) {
985 case JCC_O:
986 cc = gen_prepare_eflags_o(s, reg);
987 break;
988 case JCC_B:
989 cc = gen_prepare_eflags_c(s, reg);
990 break;
991 case JCC_Z:
992 cc = gen_prepare_eflags_z(s, reg);
993 break;
994 case JCC_BE:
995 gen_compute_eflags(s);
996 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
997 .mask = CC_Z | CC_C };
998 break;
999 case JCC_S:
1000 cc = gen_prepare_eflags_s(s, reg);
1001 break;
1002 case JCC_P:
1003 cc = gen_prepare_eflags_p(s, reg);
1004 break;
1005 case JCC_L:
1006 gen_compute_eflags(s);
1007 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1008 reg = cpu_tmp0;
1009 }
1010 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1011 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1012 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1013 .mask = CC_S };
1014 break;
1015 default:
1016 case JCC_LE:
1017 gen_compute_eflags(s);
1018 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1019 reg = cpu_tmp0;
1020 }
1021 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1022 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1023 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1024 .mask = CC_S | CC_Z };
1025 break;
1026 }
c365395e 1027 break;
8e1c85e3 1028 }
276e6b5f
RH
1029
1030 if (inv) {
1031 cc.cond = tcg_invert_cond(cc.cond);
1032 }
1033 return cc;
8e1c85e3
FB
1034}
1035
cc8b6f5b
PB
1036static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1037{
1038 CCPrepare cc = gen_prepare_cc(s, b, reg);
1039
1040 if (cc.no_setcond) {
1041 if (cc.cond == TCG_COND_EQ) {
1042 tcg_gen_xori_tl(reg, cc.reg, 1);
1043 } else {
1044 tcg_gen_mov_tl(reg, cc.reg);
1045 }
1046 return;
1047 }
1048
1049 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1050 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1051 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1052 tcg_gen_andi_tl(reg, reg, 1);
1053 return;
1054 }
1055 if (cc.mask != -1) {
1056 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1057 cc.reg = reg;
1058 }
1059 if (cc.use_reg2) {
1060 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1061 } else {
1062 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1063 }
1064}
1065
1066static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1067{
1068 gen_setcc1(s, JCC_B << 1, reg);
1069}
276e6b5f 1070
8e1c85e3
FB
1071/* generate a conditional jump to label 'l1' according to jump opcode
1072 value 'b'. In the fast case, T0 is guaranted not to be used. */
42a268c2 1073static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
dc259201
RH
1074{
1075 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1076
1077 if (cc.mask != -1) {
1078 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1079 cc.reg = cpu_T[0];
1080 }
1081 if (cc.use_reg2) {
1082 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1083 } else {
1084 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1085 }
1086}
1087
1088/* Generate a conditional jump to label 'l1' according to jump opcode
1089 value 'b'. In the fast case, T0 is guaranted not to be used.
1090 A translation block must end soon. */
42a268c2 1091static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
8e1c85e3 1092{
943131ca 1093 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
8e1c85e3 1094
dc259201 1095 gen_update_cc_op(s);
943131ca
PB
1096 if (cc.mask != -1) {
1097 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1098 cc.reg = cpu_T[0];
1099 }
dc259201 1100 set_cc_op(s, CC_OP_DYNAMIC);
943131ca
PB
1101 if (cc.use_reg2) {
1102 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1103 } else {
1104 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
8e1c85e3
FB
1105 }
1106}
1107
14ce26e7
FB
1108/* XXX: does not work with gdbstub "ice" single step - not a
1109 serious problem */
42a268c2 1110static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
2c0262af 1111{
42a268c2
RH
1112 TCGLabel *l1 = gen_new_label();
1113 TCGLabel *l2 = gen_new_label();
1d71ddb1 1114 gen_op_jnz_ecx(s->aflag, l1);
14ce26e7
FB
1115 gen_set_label(l2);
1116 gen_jmp_tb(s, next_eip, 1);
1117 gen_set_label(l1);
1118 return l2;
2c0262af
FB
1119}
1120
d67dc9e6 1121static inline void gen_stos(DisasContext *s, TCGMemOp ot)
2c0262af 1122{
c56baccf 1123 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
2c0262af 1124 gen_string_movl_A0_EDI(s);
fd8ca9f6 1125 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6e0d8677 1126 gen_op_movl_T0_Dshift(ot);
1d71ddb1 1127 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1128}
1129
d67dc9e6 1130static inline void gen_lods(DisasContext *s, TCGMemOp ot)
2c0262af
FB
1131{
1132 gen_string_movl_A0_ESI(s);
909be183 1133 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
480a762d 1134 gen_op_mov_reg_v(ot, R_EAX, cpu_T[0]);
6e0d8677 1135 gen_op_movl_T0_Dshift(ot);
1d71ddb1 1136 gen_op_add_reg_T0(s->aflag, R_ESI);
2c0262af
FB
1137}
1138
d67dc9e6 1139static inline void gen_scas(DisasContext *s, TCGMemOp ot)
2c0262af 1140{
2c0262af 1141 gen_string_movl_A0_EDI(s);
0f712e10 1142 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
63633fe6 1143 gen_op(s, OP_CMPL, ot, R_EAX);
6e0d8677 1144 gen_op_movl_T0_Dshift(ot);
1d71ddb1 1145 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1146}
1147
d67dc9e6 1148static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
2c0262af 1149{
2c0262af 1150 gen_string_movl_A0_EDI(s);
0f712e10 1151 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
63633fe6
RH
1152 gen_string_movl_A0_ESI(s);
1153 gen_op(s, OP_CMPL, ot, OR_TMP0);
6e0d8677 1154 gen_op_movl_T0_Dshift(ot);
1d71ddb1
RH
1155 gen_op_add_reg_T0(s->aflag, R_ESI);
1156 gen_op_add_reg_T0(s->aflag, R_EDI);
2c0262af
FB
1157}
1158
d67dc9e6 1159static inline void gen_ins(DisasContext *s, TCGMemOp ot)
2c0262af 1160{
bd79255d 1161 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 1162 gen_io_start();
bd79255d 1163 }
2c0262af 1164 gen_string_movl_A0_EDI(s);
6e0d8677
FB
1165 /* Note: we must do this dummy write first to be restartable in
1166 case of page fault. */
97212c88 1167 tcg_gen_movi_tl(cpu_T[0], 0);
fd8ca9f6 1168 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
24b9c00f 1169 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
b6abf97d 1170 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
a7812ae4 1171 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
fd8ca9f6 1172 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6e0d8677 1173 gen_op_movl_T0_Dshift(ot);
1d71ddb1 1174 gen_op_add_reg_T0(s->aflag, R_EDI);
bd79255d 1175 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 1176 gen_io_end();
bd79255d 1177 }
2c0262af
FB
1178}
1179
d67dc9e6 1180static inline void gen_outs(DisasContext *s, TCGMemOp ot)
2c0262af 1181{
bd79255d 1182 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 1183 gen_io_start();
bd79255d 1184 }
2c0262af 1185 gen_string_movl_A0_ESI(s);
909be183 1186 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
b8b6a50b 1187
24b9c00f 1188 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
b6abf97d
FB
1189 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1190 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
a7812ae4 1191 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
b8b6a50b 1192
6e0d8677 1193 gen_op_movl_T0_Dshift(ot);
1d71ddb1 1194 gen_op_add_reg_T0(s->aflag, R_ESI);
bd79255d 1195 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 1196 gen_io_end();
bd79255d 1197 }
2c0262af
FB
1198}
1199
1200/* same method as Valgrind : we generate jumps to current or next
1201 instruction */
1202#define GEN_REPZ(op) \
d67dc9e6 1203static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
14ce26e7 1204 target_ulong cur_eip, target_ulong next_eip) \
2c0262af 1205{ \
42a268c2 1206 TCGLabel *l2; \
2c0262af 1207 gen_update_cc_op(s); \
14ce26e7 1208 l2 = gen_jz_ecx_string(s, next_eip); \
2c0262af 1209 gen_ ## op(s, ot); \
1d71ddb1 1210 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
2c0262af
FB
1211 /* a loop would cause two single step exceptions if ECX = 1 \
1212 before rep string_insn */ \
c4d4525c 1213 if (s->repz_opt) \
1d71ddb1 1214 gen_op_jz_ecx(s->aflag, l2); \
2c0262af
FB
1215 gen_jmp(s, cur_eip); \
1216}
1217
1218#define GEN_REPZ2(op) \
d67dc9e6 1219static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
14ce26e7
FB
1220 target_ulong cur_eip, \
1221 target_ulong next_eip, \
2c0262af
FB
1222 int nz) \
1223{ \
42a268c2 1224 TCGLabel *l2; \
2c0262af 1225 gen_update_cc_op(s); \
14ce26e7 1226 l2 = gen_jz_ecx_string(s, next_eip); \
2c0262af 1227 gen_ ## op(s, ot); \
1d71ddb1 1228 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
773cdfcc 1229 gen_update_cc_op(s); \
b27fc131 1230 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
c4d4525c 1231 if (s->repz_opt) \
1d71ddb1 1232 gen_op_jz_ecx(s->aflag, l2); \
2c0262af
FB
1233 gen_jmp(s, cur_eip); \
1234}
1235
1236GEN_REPZ(movs)
1237GEN_REPZ(stos)
1238GEN_REPZ(lods)
1239GEN_REPZ(ins)
1240GEN_REPZ(outs)
1241GEN_REPZ2(scas)
1242GEN_REPZ2(cmps)
1243
a7812ae4
PB
1244static void gen_helper_fp_arith_ST0_FT0(int op)
1245{
1246 switch (op) {
d3eb5eae
BS
1247 case 0:
1248 gen_helper_fadd_ST0_FT0(cpu_env);
1249 break;
1250 case 1:
1251 gen_helper_fmul_ST0_FT0(cpu_env);
1252 break;
1253 case 2:
1254 gen_helper_fcom_ST0_FT0(cpu_env);
1255 break;
1256 case 3:
1257 gen_helper_fcom_ST0_FT0(cpu_env);
1258 break;
1259 case 4:
1260 gen_helper_fsub_ST0_FT0(cpu_env);
1261 break;
1262 case 5:
1263 gen_helper_fsubr_ST0_FT0(cpu_env);
1264 break;
1265 case 6:
1266 gen_helper_fdiv_ST0_FT0(cpu_env);
1267 break;
1268 case 7:
1269 gen_helper_fdivr_ST0_FT0(cpu_env);
1270 break;
a7812ae4
PB
1271 }
1272}
2c0262af
FB
1273
1274/* NOTE the exception in "r" op ordering */
a7812ae4
PB
1275static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1276{
1277 TCGv_i32 tmp = tcg_const_i32(opreg);
1278 switch (op) {
d3eb5eae
BS
1279 case 0:
1280 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1281 break;
1282 case 1:
1283 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1284 break;
1285 case 4:
1286 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1287 break;
1288 case 5:
1289 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1290 break;
1291 case 6:
1292 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1293 break;
1294 case 7:
1295 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1296 break;
a7812ae4
PB
1297 }
1298}
2c0262af
FB
1299
1300/* if d == OR_TMP0, it means memory operand (address in A0) */
d67dc9e6 1301static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
2c0262af 1302{
2c0262af 1303 if (d != OR_TMP0) {
c56baccf 1304 gen_op_mov_v_reg(ot, cpu_T[0], d);
2c0262af 1305 } else {
909be183 1306 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
2c0262af
FB
1307 }
1308 switch(op) {
1309 case OP_ADCL:
cc8b6f5b 1310 gen_compute_eflags_c(s1, cpu_tmp4);
cad3a37d
FB
1311 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1312 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
d4faa3e0 1313 gen_op_st_rm_T0_A0(s1, ot, d);
988c3eb0
RH
1314 gen_op_update3_cc(cpu_tmp4);
1315 set_cc_op(s1, CC_OP_ADCB + ot);
cad3a37d 1316 break;
2c0262af 1317 case OP_SBBL:
cc8b6f5b 1318 gen_compute_eflags_c(s1, cpu_tmp4);
cad3a37d
FB
1319 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1320 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
d4faa3e0 1321 gen_op_st_rm_T0_A0(s1, ot, d);
988c3eb0
RH
1322 gen_op_update3_cc(cpu_tmp4);
1323 set_cc_op(s1, CC_OP_SBBB + ot);
cad3a37d 1324 break;
2c0262af 1325 case OP_ADDL:
fd5185ec 1326 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
d4faa3e0 1327 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1328 gen_op_update2_cc();
3ca51d07 1329 set_cc_op(s1, CC_OP_ADDB + ot);
2c0262af
FB
1330 break;
1331 case OP_SUBL:
a3251186 1332 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
57fec1fe 1333 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
d4faa3e0 1334 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1335 gen_op_update2_cc();
3ca51d07 1336 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1337 break;
1338 default:
1339 case OP_ANDL:
57fec1fe 1340 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
d4faa3e0 1341 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1342 gen_op_update1_cc();
3ca51d07 1343 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1344 break;
2c0262af 1345 case OP_ORL:
57fec1fe 1346 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
d4faa3e0 1347 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1348 gen_op_update1_cc();
3ca51d07 1349 set_cc_op(s1, CC_OP_LOGICB + ot);
57fec1fe 1350 break;
2c0262af 1351 case OP_XORL:
57fec1fe 1352 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
d4faa3e0 1353 gen_op_st_rm_T0_A0(s1, ot, d);
cad3a37d 1354 gen_op_update1_cc();
3ca51d07 1355 set_cc_op(s1, CC_OP_LOGICB + ot);
2c0262af
FB
1356 break;
1357 case OP_CMPL:
63633fe6 1358 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
a3251186 1359 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
63633fe6 1360 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
3ca51d07 1361 set_cc_op(s1, CC_OP_SUBB + ot);
2c0262af
FB
1362 break;
1363 }
b6abf97d
FB
1364}
1365
2c0262af 1366/* if d == OR_TMP0, it means memory operand (address in A0) */
d67dc9e6 1367static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
2c0262af 1368{
909be183 1369 if (d != OR_TMP0) {
c56baccf 1370 gen_op_mov_v_reg(ot, cpu_T[0], d);
909be183
RH
1371 } else {
1372 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
1373 }
cc8b6f5b 1374 gen_compute_eflags_c(s1, cpu_cc_src);
2c0262af 1375 if (c > 0) {
b6abf97d 1376 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
3ca51d07 1377 set_cc_op(s1, CC_OP_INCB + ot);
2c0262af 1378 } else {
b6abf97d 1379 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
3ca51d07 1380 set_cc_op(s1, CC_OP_DECB + ot);
2c0262af 1381 }
d4faa3e0 1382 gen_op_st_rm_T0_A0(s1, ot, d);
cd31fefa 1383 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
2c0262af
FB
1384}
1385
d67dc9e6
RH
1386static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
1387 TCGv shm1, TCGv count, bool is_right)
f437d0a3
RH
1388{
1389 TCGv_i32 z32, s32, oldop;
1390 TCGv z_tl;
1391
1392 /* Store the results into the CC variables. If we know that the
1393 variable must be dead, store unconditionally. Otherwise we'll
1394 need to not disrupt the current contents. */
1395 z_tl = tcg_const_tl(0);
1396 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1397 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1398 result, cpu_cc_dst);
1399 } else {
1400 tcg_gen_mov_tl(cpu_cc_dst, result);
1401 }
1402 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1403 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1404 shm1, cpu_cc_src);
1405 } else {
1406 tcg_gen_mov_tl(cpu_cc_src, shm1);
1407 }
1408 tcg_temp_free(z_tl);
1409
1410 /* Get the two potential CC_OP values into temporaries. */
1411 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1412 if (s->cc_op == CC_OP_DYNAMIC) {
1413 oldop = cpu_cc_op;
1414 } else {
1415 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1416 oldop = cpu_tmp3_i32;
1417 }
1418
1419 /* Conditionally store the CC_OP value. */
1420 z32 = tcg_const_i32(0);
1421 s32 = tcg_temp_new_i32();
1422 tcg_gen_trunc_tl_i32(s32, count);
1423 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1424 tcg_temp_free_i32(z32);
1425 tcg_temp_free_i32(s32);
1426
1427 /* The CC_OP value is no longer predictable. */
1428 set_cc_op(s, CC_OP_DYNAMIC);
1429}
1430
d67dc9e6 1431static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
b6abf97d 1432 int is_right, int is_arith)
2c0262af 1433{
4ba9938c 1434 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
3b46e624 1435
b6abf97d 1436 /* load */
82786041 1437 if (op1 == OR_TMP0) {
909be183 1438 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
82786041 1439 } else {
c56baccf 1440 gen_op_mov_v_reg(ot, cpu_T[0], op1);
82786041 1441 }
b6abf97d 1442
a41f62f5
RH
1443 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1444 tcg_gen_subi_tl(cpu_tmp0, cpu_T[1], 1);
b6abf97d
FB
1445
1446 if (is_right) {
1447 if (is_arith) {
f484d386 1448 gen_exts(ot, cpu_T[0]);
a41f62f5
RH
1449 tcg_gen_sar_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1450 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
b6abf97d 1451 } else {
cad3a37d 1452 gen_extu(ot, cpu_T[0]);
a41f62f5
RH
1453 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1454 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
b6abf97d
FB
1455 }
1456 } else {
a41f62f5
RH
1457 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1458 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
b6abf97d
FB
1459 }
1460
1461 /* store */
d4faa3e0 1462 gen_op_st_rm_T0_A0(s, ot, op1);
82786041 1463
f437d0a3 1464 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, cpu_T[1], is_right);
b6abf97d
FB
1465}
1466
d67dc9e6 1467static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
c1c37968
FB
1468 int is_right, int is_arith)
1469{
4ba9938c 1470 int mask = (ot == MO_64 ? 0x3f : 0x1f);
c1c37968
FB
1471
1472 /* load */
1473 if (op1 == OR_TMP0)
909be183 1474 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
c1c37968 1475 else
c56baccf 1476 gen_op_mov_v_reg(ot, cpu_T[0], op1);
c1c37968
FB
1477
1478 op2 &= mask;
1479 if (op2 != 0) {
1480 if (is_right) {
1481 if (is_arith) {
1482 gen_exts(ot, cpu_T[0]);
2a449d14 1483 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
c1c37968
FB
1484 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1485 } else {
1486 gen_extu(ot, cpu_T[0]);
2a449d14 1487 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
c1c37968
FB
1488 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1489 }
1490 } else {
2a449d14 1491 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
c1c37968
FB
1492 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1493 }
1494 }
1495
1496 /* store */
d4faa3e0
RH
1497 gen_op_st_rm_T0_A0(s, ot, op1);
1498
c1c37968
FB
1499 /* update eflags if non zero shift */
1500 if (op2 != 0) {
2a449d14 1501 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
c1c37968 1502 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
3ca51d07 1503 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
c1c37968
FB
1504 }
1505}
1506
d67dc9e6 1507static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
b6abf97d 1508{
4ba9938c 1509 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
34d80a55 1510 TCGv_i32 t0, t1;
b6abf97d
FB
1511
1512 /* load */
1e4840bf 1513 if (op1 == OR_TMP0) {
909be183 1514 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1e4840bf 1515 } else {
c56baccf 1516 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1e4840bf 1517 }
b6abf97d 1518
34d80a55 1519 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
b6abf97d 1520
34d80a55 1521 switch (ot) {
4ba9938c 1522 case MO_8:
34d80a55
RH
1523 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1524 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
1525 tcg_gen_muli_tl(cpu_T[0], cpu_T[0], 0x01010101);
1526 goto do_long;
4ba9938c 1527 case MO_16:
34d80a55
RH
1528 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1529 tcg_gen_deposit_tl(cpu_T[0], cpu_T[0], cpu_T[0], 16, 16);
1530 goto do_long;
1531 do_long:
1532#ifdef TARGET_X86_64
4ba9938c 1533 case MO_32:
34d80a55
RH
1534 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1535 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
1536 if (is_right) {
1537 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1538 } else {
1539 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1540 }
1541 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1542 break;
1543#endif
1544 default:
1545 if (is_right) {
1546 tcg_gen_rotr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1547 } else {
1548 tcg_gen_rotl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1549 }
1550 break;
b6abf97d 1551 }
b6abf97d 1552
b6abf97d 1553 /* store */
d4faa3e0 1554 gen_op_st_rm_T0_A0(s, ot, op1);
b6abf97d 1555
34d80a55
RH
1556 /* We'll need the flags computed into CC_SRC. */
1557 gen_compute_eflags(s);
b6abf97d 1558
34d80a55
RH
1559 /* The value that was "rotated out" is now present at the other end
1560 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1561 since we've computed the flags into CC_SRC, these variables are
1562 currently dead. */
b6abf97d 1563 if (is_right) {
34d80a55
RH
1564 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1565 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
089305ac 1566 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
34d80a55
RH
1567 } else {
1568 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1569 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
b6abf97d 1570 }
34d80a55
RH
1571 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1572 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1573
1574 /* Now conditionally store the new CC_OP value. If the shift count
1575 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1576 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1577 exactly as we computed above. */
1578 t0 = tcg_const_i32(0);
1579 t1 = tcg_temp_new_i32();
1580 tcg_gen_trunc_tl_i32(t1, cpu_T[1]);
1581 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1582 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1583 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1584 cpu_tmp2_i32, cpu_tmp3_i32);
1585 tcg_temp_free_i32(t0);
1586 tcg_temp_free_i32(t1);
1587
1588 /* The CC_OP value is no longer predictable. */
1589 set_cc_op(s, CC_OP_DYNAMIC);
b6abf97d
FB
1590}
1591
d67dc9e6 1592static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
8cd6345d 1593 int is_right)
1594{
4ba9938c 1595 int mask = (ot == MO_64 ? 0x3f : 0x1f);
34d80a55 1596 int shift;
8cd6345d 1597
1598 /* load */
1599 if (op1 == OR_TMP0) {
909be183 1600 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
8cd6345d 1601 } else {
c56baccf 1602 gen_op_mov_v_reg(ot, cpu_T[0], op1);
8cd6345d 1603 }
1604
8cd6345d 1605 op2 &= mask;
8cd6345d 1606 if (op2 != 0) {
34d80a55
RH
1607 switch (ot) {
1608#ifdef TARGET_X86_64
4ba9938c 1609 case MO_32:
34d80a55
RH
1610 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1611 if (is_right) {
1612 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1613 } else {
1614 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1615 }
1616 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1617 break;
1618#endif
1619 default:
1620 if (is_right) {
1621 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], op2);
1622 } else {
1623 tcg_gen_rotli_tl(cpu_T[0], cpu_T[0], op2);
1624 }
1625 break;
4ba9938c 1626 case MO_8:
34d80a55
RH
1627 mask = 7;
1628 goto do_shifts;
4ba9938c 1629 case MO_16:
34d80a55
RH
1630 mask = 15;
1631 do_shifts:
1632 shift = op2 & mask;
1633 if (is_right) {
1634 shift = mask + 1 - shift;
1635 }
1636 gen_extu(ot, cpu_T[0]);
1637 tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], shift);
1638 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], mask + 1 - shift);
1639 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1640 break;
8cd6345d 1641 }
8cd6345d 1642 }
1643
1644 /* store */
d4faa3e0 1645 gen_op_st_rm_T0_A0(s, ot, op1);
8cd6345d 1646
1647 if (op2 != 0) {
34d80a55 1648 /* Compute the flags into CC_SRC. */
d229edce 1649 gen_compute_eflags(s);
0ff6addd 1650
34d80a55
RH
1651 /* The value that was "rotated out" is now present at the other end
1652 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1653 since we've computed the flags into CC_SRC, these variables are
1654 currently dead. */
8cd6345d 1655 if (is_right) {
34d80a55
RH
1656 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1657 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
38ebb396 1658 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
34d80a55
RH
1659 } else {
1660 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1661 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
8cd6345d 1662 }
34d80a55
RH
1663 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1664 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1665 set_cc_op(s, CC_OP_ADCOX);
8cd6345d 1666 }
8cd6345d 1667}
1668
b6abf97d 1669/* XXX: add faster immediate = 1 case */
d67dc9e6 1670static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
b6abf97d
FB
1671 int is_right)
1672{
d229edce 1673 gen_compute_eflags(s);
c7b3c873 1674 assert(s->cc_op == CC_OP_EFLAGS);
b6abf97d
FB
1675
1676 /* load */
1677 if (op1 == OR_TMP0)
909be183 1678 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
b6abf97d 1679 else
c56baccf 1680 gen_op_mov_v_reg(ot, cpu_T[0], op1);
b6abf97d 1681
a7812ae4
PB
1682 if (is_right) {
1683 switch (ot) {
4ba9938c 1684 case MO_8:
7923057b
BS
1685 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1686 break;
4ba9938c 1687 case MO_16:
7923057b
BS
1688 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1689 break;
4ba9938c 1690 case MO_32:
7923057b
BS
1691 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1692 break;
a7812ae4 1693#ifdef TARGET_X86_64
4ba9938c 1694 case MO_64:
7923057b
BS
1695 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1696 break;
a7812ae4 1697#endif
d67dc9e6
RH
1698 default:
1699 tcg_abort();
a7812ae4
PB
1700 }
1701 } else {
1702 switch (ot) {
4ba9938c 1703 case MO_8:
7923057b
BS
1704 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1705 break;
4ba9938c 1706 case MO_16:
7923057b
BS
1707 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1708 break;
4ba9938c 1709 case MO_32:
7923057b
BS
1710 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1711 break;
a7812ae4 1712#ifdef TARGET_X86_64
4ba9938c 1713 case MO_64:
7923057b
BS
1714 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1715 break;
a7812ae4 1716#endif
d67dc9e6
RH
1717 default:
1718 tcg_abort();
a7812ae4
PB
1719 }
1720 }
b6abf97d 1721 /* store */
d4faa3e0 1722 gen_op_st_rm_T0_A0(s, ot, op1);
b6abf97d
FB
1723}
1724
1725/* XXX: add faster immediate case */
d67dc9e6 1726static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
f437d0a3 1727 bool is_right, TCGv count_in)
b6abf97d 1728{
4ba9938c 1729 target_ulong mask = (ot == MO_64 ? 63 : 31);
f437d0a3 1730 TCGv count;
b6abf97d
FB
1731
1732 /* load */
1e4840bf 1733 if (op1 == OR_TMP0) {
909be183 1734 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1e4840bf 1735 } else {
c56baccf 1736 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1e4840bf 1737 }
b6abf97d 1738
f437d0a3
RH
1739 count = tcg_temp_new();
1740 tcg_gen_andi_tl(count, count_in, mask);
1e4840bf 1741
f437d0a3 1742 switch (ot) {
4ba9938c 1743 case MO_16:
f437d0a3
RH
1744 /* Note: we implement the Intel behaviour for shift count > 16.
1745 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1746 portion by constructing it as a 32-bit value. */
b6abf97d 1747 if (is_right) {
f437d0a3
RH
1748 tcg_gen_deposit_tl(cpu_tmp0, cpu_T[0], cpu_T[1], 16, 16);
1749 tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
1750 tcg_gen_mov_tl(cpu_T[0], cpu_tmp0);
b6abf97d 1751 } else {
f437d0a3 1752 tcg_gen_deposit_tl(cpu_T[1], cpu_T[0], cpu_T[1], 16, 16);
b6abf97d 1753 }
f437d0a3
RH
1754 /* FALLTHRU */
1755#ifdef TARGET_X86_64
4ba9938c 1756 case MO_32:
f437d0a3
RH
1757 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1758 tcg_gen_subi_tl(cpu_tmp0, count, 1);
b6abf97d 1759 if (is_right) {
f437d0a3
RH
1760 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
1761 tcg_gen_shr_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1762 tcg_gen_shr_i64(cpu_T[0], cpu_T[0], count);
1763 } else {
1764 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[1], cpu_T[0]);
1765 tcg_gen_shl_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1766 tcg_gen_shl_i64(cpu_T[0], cpu_T[0], count);
1767 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1768 tcg_gen_shri_i64(cpu_T[0], cpu_T[0], 32);
1769 }
1770 break;
1771#endif
1772 default:
1773 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1774 if (is_right) {
1775 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
b6abf97d 1776
f437d0a3
RH
1777 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1778 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], count);
1779 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
b6abf97d 1780 } else {
f437d0a3 1781 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
4ba9938c 1782 if (ot == MO_16) {
f437d0a3
RH
1783 /* Only needed if count > 16, for Intel behaviour. */
1784 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
1785 tcg_gen_shr_tl(cpu_tmp4, cpu_T[1], cpu_tmp4);
1786 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
1787 }
1788
1789 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1790 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], count);
1791 tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
b6abf97d 1792 }
f437d0a3
RH
1793 tcg_gen_movi_tl(cpu_tmp4, 0);
1794 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[1], count, cpu_tmp4,
1795 cpu_tmp4, cpu_T[1]);
1796 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1797 break;
b6abf97d 1798 }
b6abf97d 1799
b6abf97d 1800 /* store */
d4faa3e0 1801 gen_op_st_rm_T0_A0(s, ot, op1);
1e4840bf 1802
f437d0a3
RH
1803 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, count, is_right);
1804 tcg_temp_free(count);
b6abf97d
FB
1805}
1806
d67dc9e6 1807static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
b6abf97d
FB
1808{
1809 if (s != OR_TMP1)
c56baccf 1810 gen_op_mov_v_reg(ot, cpu_T[1], s);
b6abf97d
FB
1811 switch(op) {
1812 case OP_ROL:
1813 gen_rot_rm_T1(s1, ot, d, 0);
1814 break;
1815 case OP_ROR:
1816 gen_rot_rm_T1(s1, ot, d, 1);
1817 break;
1818 case OP_SHL:
1819 case OP_SHL1:
1820 gen_shift_rm_T1(s1, ot, d, 0, 0);
1821 break;
1822 case OP_SHR:
1823 gen_shift_rm_T1(s1, ot, d, 1, 0);
1824 break;
1825 case OP_SAR:
1826 gen_shift_rm_T1(s1, ot, d, 1, 1);
1827 break;
1828 case OP_RCL:
1829 gen_rotc_rm_T1(s1, ot, d, 0);
1830 break;
1831 case OP_RCR:
1832 gen_rotc_rm_T1(s1, ot, d, 1);
1833 break;
1834 }
2c0262af
FB
1835}
1836
d67dc9e6 1837static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
2c0262af 1838{
c1c37968 1839 switch(op) {
8cd6345d 1840 case OP_ROL:
1841 gen_rot_rm_im(s1, ot, d, c, 0);
1842 break;
1843 case OP_ROR:
1844 gen_rot_rm_im(s1, ot, d, c, 1);
1845 break;
c1c37968
FB
1846 case OP_SHL:
1847 case OP_SHL1:
1848 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1849 break;
1850 case OP_SHR:
1851 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1852 break;
1853 case OP_SAR:
1854 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1855 break;
1856 default:
1857 /* currently not optimized */
0ae657b1 1858 tcg_gen_movi_tl(cpu_T[1], c);
c1c37968
FB
1859 gen_shift(s1, op, ot, d, OR_TMP1);
1860 break;
1861 }
2c0262af
FB
1862}
1863
4eeb3939 1864static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
2c0262af 1865{
14ce26e7 1866 target_long disp;
2c0262af 1867 int havesib;
14ce26e7 1868 int base;
2c0262af
FB
1869 int index;
1870 int scale;
2c0262af 1871 int mod, rm, code, override, must_add_seg;
7865eec4 1872 TCGv sum;
2c0262af
FB
1873
1874 override = s->override;
1875 must_add_seg = s->addseg;
1876 if (override >= 0)
1877 must_add_seg = 1;
1878 mod = (modrm >> 6) & 3;
1879 rm = modrm & 7;
1880
1d71ddb1
RH
1881 switch (s->aflag) {
1882 case MO_64:
1883 case MO_32:
2c0262af
FB
1884 havesib = 0;
1885 base = rm;
7865eec4 1886 index = -1;
2c0262af 1887 scale = 0;
3b46e624 1888
2c0262af
FB
1889 if (base == 4) {
1890 havesib = 1;
0af10c86 1891 code = cpu_ldub_code(env, s->pc++);
2c0262af 1892 scale = (code >> 6) & 3;
14ce26e7 1893 index = ((code >> 3) & 7) | REX_X(s);
7865eec4
RH
1894 if (index == 4) {
1895 index = -1; /* no index */
1896 }
14ce26e7 1897 base = (code & 7);
2c0262af 1898 }
14ce26e7 1899 base |= REX_B(s);
2c0262af
FB
1900
1901 switch (mod) {
1902 case 0:
14ce26e7 1903 if ((base & 7) == 5) {
2c0262af 1904 base = -1;
0af10c86 1905 disp = (int32_t)cpu_ldl_code(env, s->pc);
2c0262af 1906 s->pc += 4;
14ce26e7
FB
1907 if (CODE64(s) && !havesib) {
1908 disp += s->pc + s->rip_offset;
1909 }
2c0262af
FB
1910 } else {
1911 disp = 0;
1912 }
1913 break;
1914 case 1:
0af10c86 1915 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2c0262af
FB
1916 break;
1917 default:
1918 case 2:
0af10c86 1919 disp = (int32_t)cpu_ldl_code(env, s->pc);
2c0262af
FB
1920 s->pc += 4;
1921 break;
1922 }
3b46e624 1923
7865eec4
RH
1924 /* For correct popl handling with esp. */
1925 if (base == R_ESP && s->popl_esp_hack) {
1926 disp += s->popl_esp_hack;
1927 }
1928
1929 /* Compute the address, with a minimum number of TCG ops. */
1930 TCGV_UNUSED(sum);
1931 if (index >= 0) {
1932 if (scale == 0) {
1933 sum = cpu_regs[index];
1934 } else {
1935 tcg_gen_shli_tl(cpu_A0, cpu_regs[index], scale);
1936 sum = cpu_A0;
14ce26e7 1937 }
7865eec4
RH
1938 if (base >= 0) {
1939 tcg_gen_add_tl(cpu_A0, sum, cpu_regs[base]);
1940 sum = cpu_A0;
14ce26e7 1941 }
7865eec4
RH
1942 } else if (base >= 0) {
1943 sum = cpu_regs[base];
2c0262af 1944 }
7865eec4
RH
1945 if (TCGV_IS_UNUSED(sum)) {
1946 tcg_gen_movi_tl(cpu_A0, disp);
1947 } else {
1948 tcg_gen_addi_tl(cpu_A0, sum, disp);
2c0262af 1949 }
7865eec4 1950
2c0262af
FB
1951 if (must_add_seg) {
1952 if (override < 0) {
7865eec4 1953 if (base == R_EBP || base == R_ESP) {
2c0262af 1954 override = R_SS;
7865eec4 1955 } else {
2c0262af 1956 override = R_DS;
7865eec4 1957 }
2c0262af 1958 }
7865eec4
RH
1959
1960 tcg_gen_ld_tl(cpu_tmp0, cpu_env,
1961 offsetof(CPUX86State, segs[override].base));
1962 if (CODE64(s)) {
1d71ddb1 1963 if (s->aflag == MO_32) {
7865eec4
RH
1964 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
1965 }
1966 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
4eeb3939 1967 return;
14ce26e7 1968 }
7865eec4
RH
1969
1970 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
1971 }
1972
1d71ddb1 1973 if (s->aflag == MO_32) {
7865eec4 1974 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
2c0262af 1975 }
1d71ddb1
RH
1976 break;
1977
1978 case MO_16:
2c0262af
FB
1979 switch (mod) {
1980 case 0:
1981 if (rm == 6) {
0af10c86 1982 disp = cpu_lduw_code(env, s->pc);
2c0262af 1983 s->pc += 2;
3250cff8 1984 tcg_gen_movi_tl(cpu_A0, disp);
2c0262af
FB
1985 rm = 0; /* avoid SS override */
1986 goto no_rm;
1987 } else {
1988 disp = 0;
1989 }
1990 break;
1991 case 1:
0af10c86 1992 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2c0262af
FB
1993 break;
1994 default:
1995 case 2:
7effd625 1996 disp = (int16_t)cpu_lduw_code(env, s->pc);
2c0262af
FB
1997 s->pc += 2;
1998 break;
1999 }
7effd625
RH
2000
2001 sum = cpu_A0;
2002 switch (rm) {
2c0262af 2003 case 0:
7effd625 2004 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_ESI]);
2c0262af
FB
2005 break;
2006 case 1:
7effd625 2007 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_EDI]);
2c0262af
FB
2008 break;
2009 case 2:
7effd625 2010 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_ESI]);
2c0262af
FB
2011 break;
2012 case 3:
7effd625 2013 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_EDI]);
2c0262af
FB
2014 break;
2015 case 4:
7effd625 2016 sum = cpu_regs[R_ESI];
2c0262af
FB
2017 break;
2018 case 5:
7effd625 2019 sum = cpu_regs[R_EDI];
2c0262af
FB
2020 break;
2021 case 6:
7effd625 2022 sum = cpu_regs[R_EBP];
2c0262af
FB
2023 break;
2024 default:
2025 case 7:
7effd625 2026 sum = cpu_regs[R_EBX];
2c0262af
FB
2027 break;
2028 }
7effd625 2029 tcg_gen_addi_tl(cpu_A0, sum, disp);
a7e5c7de 2030 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2c0262af
FB
2031 no_rm:
2032 if (must_add_seg) {
2033 if (override < 0) {
7effd625 2034 if (rm == 2 || rm == 3 || rm == 6) {
2c0262af 2035 override = R_SS;
7effd625 2036 } else {
2c0262af 2037 override = R_DS;
7effd625 2038 }
2c0262af 2039 }
7162ab21 2040 gen_op_addl_A0_seg(s, override);
2c0262af 2041 }
1d71ddb1
RH
2042 break;
2043
2044 default:
2045 tcg_abort();
2c0262af 2046 }
2c0262af
FB
2047}
2048
0af10c86 2049static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
e17a36ce
FB
2050{
2051 int mod, rm, base, code;
2052
2053 mod = (modrm >> 6) & 3;
2054 if (mod == 3)
2055 return;
2056 rm = modrm & 7;
2057
1d71ddb1
RH
2058 switch (s->aflag) {
2059 case MO_64:
2060 case MO_32:
e17a36ce 2061 base = rm;
3b46e624 2062
e17a36ce 2063 if (base == 4) {
0af10c86 2064 code = cpu_ldub_code(env, s->pc++);
e17a36ce
FB
2065 base = (code & 7);
2066 }
3b46e624 2067
e17a36ce
FB
2068 switch (mod) {
2069 case 0:
2070 if (base == 5) {
2071 s->pc += 4;
2072 }
2073 break;
2074 case 1:
2075 s->pc++;
2076 break;
2077 default:
2078 case 2:
2079 s->pc += 4;
2080 break;
2081 }
1d71ddb1
RH
2082 break;
2083
2084 case MO_16:
e17a36ce
FB
2085 switch (mod) {
2086 case 0:
2087 if (rm == 6) {
2088 s->pc += 2;
2089 }
2090 break;
2091 case 1:
2092 s->pc++;
2093 break;
2094 default:
2095 case 2:
2096 s->pc += 2;
2097 break;
2098 }
1d71ddb1
RH
2099 break;
2100
2101 default:
2102 tcg_abort();
e17a36ce
FB
2103 }
2104}
2105
664e0f19
FB
2106/* used for LEA and MOV AX, mem */
2107static void gen_add_A0_ds_seg(DisasContext *s)
2108{
2109 int override, must_add_seg;
2110 must_add_seg = s->addseg;
2111 override = R_DS;
2112 if (s->override >= 0) {
2113 override = s->override;
2114 must_add_seg = 1;
664e0f19
FB
2115 }
2116 if (must_add_seg) {
8f091a59
FB
2117#ifdef TARGET_X86_64
2118 if (CODE64(s)) {
57fec1fe 2119 gen_op_addq_A0_seg(override);
5fafdf24 2120 } else
8f091a59
FB
2121#endif
2122 {
7162ab21 2123 gen_op_addl_A0_seg(s, override);
8f091a59 2124 }
664e0f19
FB
2125 }
2126}
2127
222a3336 2128/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2c0262af 2129 OR_TMP0 */
0af10c86 2130static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
d67dc9e6 2131 TCGMemOp ot, int reg, int is_store)
2c0262af 2132{
4eeb3939 2133 int mod, rm;
2c0262af
FB
2134
2135 mod = (modrm >> 6) & 3;
14ce26e7 2136 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
2137 if (mod == 3) {
2138 if (is_store) {
2139 if (reg != OR_TMP0)
c56baccf 2140 gen_op_mov_v_reg(ot, cpu_T[0], reg);
480a762d 2141 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
2c0262af 2142 } else {
c56baccf 2143 gen_op_mov_v_reg(ot, cpu_T[0], rm);
2c0262af 2144 if (reg != OR_TMP0)
480a762d 2145 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2c0262af
FB
2146 }
2147 } else {
4eeb3939 2148 gen_lea_modrm(env, s, modrm);
2c0262af
FB
2149 if (is_store) {
2150 if (reg != OR_TMP0)
c56baccf 2151 gen_op_mov_v_reg(ot, cpu_T[0], reg);
fd8ca9f6 2152 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 2153 } else {
909be183 2154 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 2155 if (reg != OR_TMP0)
480a762d 2156 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2c0262af
FB
2157 }
2158 }
2159}
2160
d67dc9e6 2161static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
2c0262af
FB
2162{
2163 uint32_t ret;
2164
d67dc9e6 2165 switch (ot) {
4ba9938c 2166 case MO_8:
0af10c86 2167 ret = cpu_ldub_code(env, s->pc);
2c0262af
FB
2168 s->pc++;
2169 break;
4ba9938c 2170 case MO_16:
0af10c86 2171 ret = cpu_lduw_code(env, s->pc);
2c0262af
FB
2172 s->pc += 2;
2173 break;
4ba9938c 2174 case MO_32:
d67dc9e6
RH
2175#ifdef TARGET_X86_64
2176 case MO_64:
2177#endif
0af10c86 2178 ret = cpu_ldl_code(env, s->pc);
2c0262af
FB
2179 s->pc += 4;
2180 break;
d67dc9e6
RH
2181 default:
2182 tcg_abort();
2c0262af
FB
2183 }
2184 return ret;
2185}
2186
d67dc9e6 2187static inline int insn_const_size(TCGMemOp ot)
14ce26e7 2188{
4ba9938c 2189 if (ot <= MO_32) {
14ce26e7 2190 return 1 << ot;
4ba9938c 2191 } else {
14ce26e7 2192 return 4;
4ba9938c 2193 }
14ce26e7
FB
2194}
2195
6e256c93
FB
2196static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2197{
2198 TranslationBlock *tb;
2199 target_ulong pc;
2200
2201 pc = s->cs_base + eip;
2202 tb = s->tb;
2203 /* NOTE: we handle the case where the TB spans two pages here */
2204 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2205 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2206 /* jump to same page: we can use a direct jump */
57fec1fe 2207 tcg_gen_goto_tb(tb_num);
6e256c93 2208 gen_jmp_im(eip);
8cfd0495 2209 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
6e256c93
FB
2210 } else {
2211 /* jump to another page: currently not optimized */
2212 gen_jmp_im(eip);
2213 gen_eob(s);
2214 }
2215}
2216
5fafdf24 2217static inline void gen_jcc(DisasContext *s, int b,
14ce26e7 2218 target_ulong val, target_ulong next_eip)
2c0262af 2219{
42a268c2 2220 TCGLabel *l1, *l2;
3b46e624 2221
2c0262af 2222 if (s->jmp_opt) {
14ce26e7 2223 l1 = gen_new_label();
b27fc131 2224 gen_jcc1(s, b, l1);
dc259201 2225
6e256c93 2226 gen_goto_tb(s, 0, next_eip);
14ce26e7
FB
2227
2228 gen_set_label(l1);
6e256c93 2229 gen_goto_tb(s, 1, val);
5779406a 2230 s->is_jmp = DISAS_TB_JUMP;
2c0262af 2231 } else {
14ce26e7
FB
2232 l1 = gen_new_label();
2233 l2 = gen_new_label();
b27fc131 2234 gen_jcc1(s, b, l1);
8e1c85e3 2235
14ce26e7 2236 gen_jmp_im(next_eip);
8e1c85e3
FB
2237 tcg_gen_br(l2);
2238
14ce26e7
FB
2239 gen_set_label(l1);
2240 gen_jmp_im(val);
2241 gen_set_label(l2);
2c0262af
FB
2242 gen_eob(s);
2243 }
2244}
2245
d67dc9e6 2246static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
f32d3781
PB
2247 int modrm, int reg)
2248{
57eb0cc8 2249 CCPrepare cc;
f32d3781 2250
57eb0cc8 2251 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
f32d3781 2252
57eb0cc8
RH
2253 cc = gen_prepare_cc(s, b, cpu_T[1]);
2254 if (cc.mask != -1) {
2255 TCGv t0 = tcg_temp_new();
2256 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2257 cc.reg = t0;
2258 }
2259 if (!cc.use_reg2) {
2260 cc.reg2 = tcg_const_tl(cc.imm);
f32d3781
PB
2261 }
2262
57eb0cc8
RH
2263 tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
2264 cpu_T[0], cpu_regs[reg]);
480a762d 2265 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
57eb0cc8
RH
2266
2267 if (cc.mask != -1) {
2268 tcg_temp_free(cc.reg);
2269 }
2270 if (!cc.use_reg2) {
2271 tcg_temp_free(cc.reg2);
2272 }
f32d3781
PB
2273}
2274
3bd7da9e
FB
2275static inline void gen_op_movl_T0_seg(int seg_reg)
2276{
2277 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2278 offsetof(CPUX86State,segs[seg_reg].selector));
2279}
2280
2281static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2282{
2283 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2284 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2285 offsetof(CPUX86State,segs[seg_reg].selector));
2286 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2287 tcg_gen_st_tl(cpu_T[0], cpu_env,
2288 offsetof(CPUX86State,segs[seg_reg].base));
2289}
2290
2c0262af
FB
2291/* move T0 to seg_reg and compute if the CPU state may change. Never
2292 call this function with seg_reg == R_CS */
100ec099 2293static void gen_movl_seg_T0(DisasContext *s, int seg_reg)
2c0262af 2294{
3415a4dd 2295 if (s->pe && !s->vm86) {
b6abf97d 2296 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 2297 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
dc196a57
FB
2298 /* abort translation because the addseg value may change or
2299 because ss32 may change. For R_SS, translation must always
2300 stop as a special handling must be done to disable hardware
2301 interrupts for the next instruction */
2302 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
5779406a 2303 s->is_jmp = DISAS_TB_JUMP;
3415a4dd 2304 } else {
3bd7da9e 2305 gen_op_movl_seg_T0_vm(seg_reg);
dc196a57 2306 if (seg_reg == R_SS)
5779406a 2307 s->is_jmp = DISAS_TB_JUMP;
3415a4dd 2308 }
2c0262af
FB
2309}
2310
0573fbfc
TS
2311static inline int svm_is_rep(int prefixes)
2312{
2313 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2314}
2315
872929aa 2316static inline void
0573fbfc 2317gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
b8b6a50b 2318 uint32_t type, uint64_t param)
0573fbfc 2319{
872929aa
FB
2320 /* no SVM activated; fast case */
2321 if (likely(!(s->flags & HF_SVMI_MASK)))
2322 return;
773cdfcc 2323 gen_update_cc_op(s);
872929aa 2324 gen_jmp_im(pc_start - s->cs_base);
052e80d5 2325 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
a7812ae4 2326 tcg_const_i64(param));
0573fbfc
TS
2327}
2328
872929aa 2329static inline void
0573fbfc
TS
2330gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2331{
872929aa 2332 gen_svm_check_intercept_param(s, pc_start, type, 0);
0573fbfc
TS
2333}
2334
4f31916f
FB
2335static inline void gen_stack_update(DisasContext *s, int addend)
2336{
14ce26e7
FB
2337#ifdef TARGET_X86_64
2338 if (CODE64(s)) {
d3f4bbe3 2339 gen_op_add_reg_im(MO_64, R_ESP, addend);
14ce26e7
FB
2340 } else
2341#endif
4f31916f 2342 if (s->ss32) {
d3f4bbe3 2343 gen_op_add_reg_im(MO_32, R_ESP, addend);
4f31916f 2344 } else {
d3f4bbe3 2345 gen_op_add_reg_im(MO_16, R_ESP, addend);
4f31916f
FB
2346 }
2347}
2348
432baffe
RH
2349/* Generate a push. It depends on ss32, addseg and dflag. */
2350static void gen_push_v(DisasContext *s, TCGv val)
2c0262af 2351{
432baffe
RH
2352 TCGMemOp a_ot, d_ot = mo_pushpop(s, s->dflag);
2353 int size = 1 << d_ot;
2354 TCGv new_esp = cpu_A0;
2355
2356 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_ESP], size);
2c0262af 2357
14ce26e7 2358 if (CODE64(s)) {
432baffe
RH
2359 a_ot = MO_64;
2360 } else if (s->ss32) {
2361 a_ot = MO_32;
2362 if (s->addseg) {
2363 new_esp = cpu_tmp4;
2364 tcg_gen_mov_tl(new_esp, cpu_A0);
7162ab21 2365 gen_op_addl_A0_seg(s, R_SS);
432baffe
RH
2366 } else {
2367 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
2c0262af 2368 }
432baffe
RH
2369 } else {
2370 a_ot = MO_16;
2371 new_esp = cpu_tmp4;
2372 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2373 tcg_gen_mov_tl(new_esp, cpu_A0);
2374 gen_op_addl_A0_seg(s, R_SS);
2c0262af 2375 }
432baffe
RH
2376
2377 gen_op_st_v(s, d_ot, val, cpu_A0);
2378 gen_op_mov_reg_v(a_ot, R_ESP, new_esp);
2c0262af
FB
2379}
2380
4f31916f 2381/* two step pop is necessary for precise exceptions */
8e31d234 2382static TCGMemOp gen_pop_T0(DisasContext *s)
2c0262af 2383{
8e31d234
RH
2384 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2385 TCGv addr = cpu_A0;
2386
14ce26e7 2387 if (CODE64(s)) {
8e31d234
RH
2388 addr = cpu_regs[R_ESP];
2389 } else if (!s->ss32) {
2390 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESP]);
2391 gen_op_addl_A0_seg(s, R_SS);
2392 } else if (s->addseg) {
2393 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_ESP]);
2394 gen_op_addl_A0_seg(s, R_SS);
2395 } else {
2396 tcg_gen_ext32u_tl(cpu_A0, cpu_regs[R_ESP]);
2c0262af 2397 }
8e31d234
RH
2398
2399 gen_op_ld_v(s, d_ot, cpu_T[0], addr);
2400 return d_ot;
2c0262af
FB
2401}
2402
8e31d234 2403static void gen_pop_update(DisasContext *s, TCGMemOp ot)
2c0262af 2404{
8e31d234 2405 gen_stack_update(s, 1 << ot);
2c0262af
FB
2406}
2407
2408static void gen_stack_A0(DisasContext *s)
2409{
57fec1fe 2410 gen_op_movl_A0_reg(R_ESP);
2c0262af 2411 if (!s->ss32)
a7e5c7de 2412 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
bbf662ee 2413 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2c0262af 2414 if (s->addseg)
7162ab21 2415 gen_op_addl_A0_seg(s, R_SS);
2c0262af
FB
2416}
2417
2418/* NOTE: wrap around in 16 bit not fully handled */
2419static void gen_pusha(DisasContext *s)
2420{
2421 int i;
57fec1fe 2422 gen_op_movl_A0_reg(R_ESP);
ab4e4aec 2423 gen_op_addl_A0_im(-8 << s->dflag);
2c0262af 2424 if (!s->ss32)
a7e5c7de 2425 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
bbf662ee 2426 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2c0262af 2427 if (s->addseg)
7162ab21 2428 gen_op_addl_A0_seg(s, R_SS);
2c0262af 2429 for(i = 0;i < 8; i++) {
c56baccf 2430 gen_op_mov_v_reg(MO_32, cpu_T[0], 7 - i);
ab4e4aec
RH
2431 gen_op_st_v(s, s->dflag, cpu_T[0], cpu_A0);
2432 gen_op_addl_A0_im(1 << s->dflag);
2c0262af 2433 }
68773f84 2434 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2c0262af
FB
2435}
2436
2437/* NOTE: wrap around in 16 bit not fully handled */
2438static void gen_popa(DisasContext *s)
2439{
2440 int i;
57fec1fe 2441 gen_op_movl_A0_reg(R_ESP);
2c0262af 2442 if (!s->ss32)
a7e5c7de 2443 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
bbf662ee 2444 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
ab4e4aec 2445 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 8 << s->dflag);
2c0262af 2446 if (s->addseg)
7162ab21 2447 gen_op_addl_A0_seg(s, R_SS);
2c0262af
FB
2448 for(i = 0;i < 8; i++) {
2449 /* ESP is not reloaded */
2450 if (i != 3) {
ab4e4aec 2451 gen_op_ld_v(s, s->dflag, cpu_T[0], cpu_A0);
480a762d 2452 gen_op_mov_reg_v(s->dflag, 7 - i, cpu_T[0]);
2c0262af 2453 }
ab4e4aec 2454 gen_op_addl_A0_im(1 << s->dflag);
2c0262af 2455 }
68773f84 2456 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2c0262af
FB
2457}
2458
2c0262af
FB
2459static void gen_enter(DisasContext *s, int esp_addend, int level)
2460{
ab4e4aec
RH
2461 TCGMemOp ot = mo_pushpop(s, s->dflag);
2462 int opsize = 1 << ot;
2c0262af 2463
2c0262af 2464 level &= 0x1f;
8f091a59
FB
2465#ifdef TARGET_X86_64
2466 if (CODE64(s)) {
57fec1fe 2467 gen_op_movl_A0_reg(R_ESP);
8f091a59 2468 gen_op_addq_A0_im(-opsize);
bbf662ee 2469 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
8f091a59
FB
2470
2471 /* push bp */
c56baccf 2472 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
fd8ca9f6 2473 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
8f091a59 2474 if (level) {
b5b38f61 2475 /* XXX: must save state */
2999a0b2 2476 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
4ba9938c 2477 tcg_const_i32((ot == MO_64)),
a7812ae4 2478 cpu_T[1]);
8f091a59 2479 }
68773f84 2480 gen_op_mov_reg_v(ot, R_EBP, cpu_T[1]);
bbf662ee 2481 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
68773f84 2482 gen_op_mov_reg_v(MO_64, R_ESP, cpu_T[1]);
5fafdf24 2483 } else
8f091a59
FB
2484#endif
2485 {
57fec1fe 2486 gen_op_movl_A0_reg(R_ESP);
8f091a59
FB
2487 gen_op_addl_A0_im(-opsize);
2488 if (!s->ss32)
a7e5c7de 2489 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
bbf662ee 2490 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
8f091a59 2491 if (s->addseg)
7162ab21 2492 gen_op_addl_A0_seg(s, R_SS);
8f091a59 2493 /* push bp */
c56baccf 2494 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
fd8ca9f6 2495 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
8f091a59 2496 if (level) {
b5b38f61 2497 /* XXX: must save state */
2999a0b2 2498 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
ab4e4aec 2499 tcg_const_i32(s->dflag - 1),
a7812ae4 2500 cpu_T[1]);
8f091a59 2501 }
68773f84 2502 gen_op_mov_reg_v(ot, R_EBP, cpu_T[1]);
bbf662ee 2503 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
68773f84 2504 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2c0262af 2505 }
2c0262af
FB
2506}
2507
14ce26e7 2508static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2c0262af 2509{
773cdfcc 2510 gen_update_cc_op(s);
14ce26e7 2511 gen_jmp_im(cur_eip);
77b2bc2c 2512 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
5779406a 2513 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2514}
2515
2516/* an interrupt is different from an exception because of the
7f75ffd3 2517 privilege checks */
5fafdf24 2518static void gen_interrupt(DisasContext *s, int intno,
14ce26e7 2519 target_ulong cur_eip, target_ulong next_eip)
2c0262af 2520{
773cdfcc 2521 gen_update_cc_op(s);
14ce26e7 2522 gen_jmp_im(cur_eip);
77b2bc2c 2523 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
a7812ae4 2524 tcg_const_i32(next_eip - cur_eip));
5779406a 2525 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2526}
2527
14ce26e7 2528static void gen_debug(DisasContext *s, target_ulong cur_eip)
2c0262af 2529{
773cdfcc 2530 gen_update_cc_op(s);
14ce26e7 2531 gen_jmp_im(cur_eip);
4a7443be 2532 gen_helper_debug(cpu_env);
5779406a 2533 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2534}
2535
2536/* generate a generic end of block. Trace exception is also generated
2537 if needed */
2538static void gen_eob(DisasContext *s)
2539{
773cdfcc 2540 gen_update_cc_op(s);
a2cc3b24 2541 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
f0967a1a 2542 gen_helper_reset_inhibit_irq(cpu_env);
a2cc3b24 2543 }
a2397807 2544 if (s->tb->flags & HF_RF_MASK) {
f0967a1a 2545 gen_helper_reset_rf(cpu_env);
a2397807 2546 }
34865134 2547 if (s->singlestep_enabled) {
4a7443be 2548 gen_helper_debug(cpu_env);
34865134 2549 } else if (s->tf) {
4a7443be 2550 gen_helper_single_step(cpu_env);
2c0262af 2551 } else {
57fec1fe 2552 tcg_gen_exit_tb(0);
2c0262af 2553 }
5779406a 2554 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
2555}
2556
2557/* generate a jump to eip. No segment change must happen before as a
2558 direct call to the next block may occur */
14ce26e7 2559static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2c0262af 2560{
a3251186
RH
2561 gen_update_cc_op(s);
2562 set_cc_op(s, CC_OP_DYNAMIC);
2c0262af 2563 if (s->jmp_opt) {
6e256c93 2564 gen_goto_tb(s, tb_num, eip);
5779406a 2565 s->is_jmp = DISAS_TB_JUMP;
2c0262af 2566 } else {
14ce26e7 2567 gen_jmp_im(eip);
2c0262af
FB
2568 gen_eob(s);
2569 }
2570}
2571
14ce26e7
FB
2572static void gen_jmp(DisasContext *s, target_ulong eip)
2573{
2574 gen_jmp_tb(s, eip, 0);
2575}
2576
323d1876 2577static inline void gen_ldq_env_A0(DisasContext *s, int offset)
8686c490 2578{
3c5f4116 2579 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
b6abf97d 2580 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
8686c490 2581}
664e0f19 2582
323d1876 2583static inline void gen_stq_env_A0(DisasContext *s, int offset)
8686c490 2584{
b6abf97d 2585 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
3523e4bd 2586 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
8686c490 2587}
664e0f19 2588
323d1876 2589static inline void gen_ldo_env_A0(DisasContext *s, int offset)
8686c490 2590{
5c42a7cd 2591 int mem_index = s->mem_index;
3c5f4116 2592 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
b6abf97d 2593 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
8686c490 2594 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
3c5f4116 2595 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
b6abf97d 2596 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
8686c490 2597}
14ce26e7 2598
323d1876 2599static inline void gen_sto_env_A0(DisasContext *s, int offset)
8686c490 2600{
5c42a7cd 2601 int mem_index = s->mem_index;
b6abf97d 2602 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
3523e4bd 2603 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
8686c490 2604 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
b6abf97d 2605 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
3523e4bd 2606 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
8686c490 2607}
14ce26e7 2608
5af45186
FB
2609static inline void gen_op_movo(int d_offset, int s_offset)
2610{
bee81887
PB
2611 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(XMMReg, XMM_Q(0)));
2612 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(XMMReg, XMM_Q(0)));
2613 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(XMMReg, XMM_Q(1)));
2614 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(XMMReg, XMM_Q(1)));
5af45186
FB
2615}
2616
2617static inline void gen_op_movq(int d_offset, int s_offset)
2618{
b6abf97d
FB
2619 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2620 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
5af45186
FB
2621}
2622
2623static inline void gen_op_movl(int d_offset, int s_offset)
2624{
b6abf97d
FB
2625 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2626 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
5af45186
FB
2627}
2628
2629static inline void gen_op_movq_env_0(int d_offset)
2630{
b6abf97d
FB
2631 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2632 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
5af45186 2633}
664e0f19 2634
d3eb5eae
BS
2635typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2636typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2637typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2638typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2639typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2640typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2641 TCGv_i32 val);
c4baa050 2642typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
d3eb5eae
BS
2643typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2644 TCGv val);
c4baa050 2645
5af45186
FB
2646#define SSE_SPECIAL ((void *)1)
2647#define SSE_DUMMY ((void *)2)
664e0f19 2648
a7812ae4
PB
2649#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2650#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2651 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
5af45186 2652
d3eb5eae 2653static const SSEFunc_0_epp sse_op_table1[256][4] = {
a35f3ec7
AJ
2654 /* 3DNow! extensions */
2655 [0x0e] = { SSE_DUMMY }, /* femms */
2656 [0x0f] = { SSE_DUMMY }, /* pf... */
664e0f19
FB
2657 /* pure SSE operations */
2658 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2659 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
465e9838 2660 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
664e0f19 2661 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
a7812ae4
PB
2662 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2663 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
664e0f19
FB
2664 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2665 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2666
2667 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2668 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2669 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
d9f4bb27 2670 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
664e0f19
FB
2671 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2672 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
a7812ae4
PB
2673 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2674 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
664e0f19
FB
2675 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2676 [0x51] = SSE_FOP(sqrt),
a7812ae4
PB
2677 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2678 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2679 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2680 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2681 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2682 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
664e0f19
FB
2683 [0x58] = SSE_FOP(add),
2684 [0x59] = SSE_FOP(mul),
a7812ae4
PB
2685 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2686 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2687 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
664e0f19
FB
2688 [0x5c] = SSE_FOP(sub),
2689 [0x5d] = SSE_FOP(min),
2690 [0x5e] = SSE_FOP(div),
2691 [0x5f] = SSE_FOP(max),
2692
2693 [0xc2] = SSE_FOP(cmpeq),
d3eb5eae
BS
2694 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2695 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
664e0f19 2696
7073fbad
RH
2697 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2698 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2699 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
4242b1bd 2700
664e0f19
FB
2701 /* MMX ops and their SSE extensions */
2702 [0x60] = MMX_OP2(punpcklbw),
2703 [0x61] = MMX_OP2(punpcklwd),
2704 [0x62] = MMX_OP2(punpckldq),
2705 [0x63] = MMX_OP2(packsswb),
2706 [0x64] = MMX_OP2(pcmpgtb),
2707 [0x65] = MMX_OP2(pcmpgtw),
2708 [0x66] = MMX_OP2(pcmpgtl),
2709 [0x67] = MMX_OP2(packuswb),
2710 [0x68] = MMX_OP2(punpckhbw),
2711 [0x69] = MMX_OP2(punpckhwd),
2712 [0x6a] = MMX_OP2(punpckhdq),
2713 [0x6b] = MMX_OP2(packssdw),
a7812ae4
PB
2714 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2715 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
664e0f19
FB
2716 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2717 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
d3eb5eae
BS
2718 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2719 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2720 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2721 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
664e0f19
FB
2722 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2723 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2724 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2725 [0x74] = MMX_OP2(pcmpeqb),
2726 [0x75] = MMX_OP2(pcmpeqw),
2727 [0x76] = MMX_OP2(pcmpeql),
a35f3ec7 2728 [0x77] = { SSE_DUMMY }, /* emms */
d9f4bb27
AP
2729 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2730 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
a7812ae4
PB
2731 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2732 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
664e0f19
FB
2733 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2734 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2735 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2736 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
a7812ae4 2737 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
664e0f19
FB
2738 [0xd1] = MMX_OP2(psrlw),
2739 [0xd2] = MMX_OP2(psrld),
2740 [0xd3] = MMX_OP2(psrlq),
2741 [0xd4] = MMX_OP2(paddq),
2742 [0xd5] = MMX_OP2(pmullw),
2743 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2744 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2745 [0xd8] = MMX_OP2(psubusb),
2746 [0xd9] = MMX_OP2(psubusw),
2747 [0xda] = MMX_OP2(pminub),
2748 [0xdb] = MMX_OP2(pand),
2749 [0xdc] = MMX_OP2(paddusb),
2750 [0xdd] = MMX_OP2(paddusw),
2751 [0xde] = MMX_OP2(pmaxub),
2752 [0xdf] = MMX_OP2(pandn),
2753 [0xe0] = MMX_OP2(pavgb),
2754 [0xe1] = MMX_OP2(psraw),
2755 [0xe2] = MMX_OP2(psrad),
2756 [0xe3] = MMX_OP2(pavgw),
2757 [0xe4] = MMX_OP2(pmulhuw),
2758 [0xe5] = MMX_OP2(pmulhw),
a7812ae4 2759 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
664e0f19
FB
2760 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2761 [0xe8] = MMX_OP2(psubsb),
2762 [0xe9] = MMX_OP2(psubsw),
2763 [0xea] = MMX_OP2(pminsw),
2764 [0xeb] = MMX_OP2(por),
2765 [0xec] = MMX_OP2(paddsb),
2766 [0xed] = MMX_OP2(paddsw),
2767 [0xee] = MMX_OP2(pmaxsw),
2768 [0xef] = MMX_OP2(pxor),
465e9838 2769 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
664e0f19
FB
2770 [0xf1] = MMX_OP2(psllw),
2771 [0xf2] = MMX_OP2(pslld),
2772 [0xf3] = MMX_OP2(psllq),
2773 [0xf4] = MMX_OP2(pmuludq),
2774 [0xf5] = MMX_OP2(pmaddwd),
2775 [0xf6] = MMX_OP2(psadbw),
d3eb5eae
BS
2776 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2777 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
664e0f19
FB
2778 [0xf8] = MMX_OP2(psubb),
2779 [0xf9] = MMX_OP2(psubw),
2780 [0xfa] = MMX_OP2(psubl),
2781 [0xfb] = MMX_OP2(psubq),
2782 [0xfc] = MMX_OP2(paddb),
2783 [0xfd] = MMX_OP2(paddw),
2784 [0xfe] = MMX_OP2(paddl),
2785};
2786
d3eb5eae 2787static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
664e0f19
FB
2788 [0 + 2] = MMX_OP2(psrlw),
2789 [0 + 4] = MMX_OP2(psraw),
2790 [0 + 6] = MMX_OP2(psllw),
2791 [8 + 2] = MMX_OP2(psrld),
2792 [8 + 4] = MMX_OP2(psrad),
2793 [8 + 6] = MMX_OP2(pslld),
2794 [16 + 2] = MMX_OP2(psrlq),
a7812ae4 2795 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
664e0f19 2796 [16 + 6] = MMX_OP2(psllq),
a7812ae4 2797 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
664e0f19
FB
2798};
2799
d3eb5eae 2800static const SSEFunc_0_epi sse_op_table3ai[] = {
a7812ae4 2801 gen_helper_cvtsi2ss,
11f8cdbc 2802 gen_helper_cvtsi2sd
c4baa050 2803};
a7812ae4 2804
11f8cdbc 2805#ifdef TARGET_X86_64
d3eb5eae 2806static const SSEFunc_0_epl sse_op_table3aq[] = {
11f8cdbc
SW
2807 gen_helper_cvtsq2ss,
2808 gen_helper_cvtsq2sd
2809};
2810#endif
2811
d3eb5eae 2812static const SSEFunc_i_ep sse_op_table3bi[] = {
a7812ae4 2813 gen_helper_cvttss2si,
a7812ae4 2814 gen_helper_cvtss2si,
bedc2ac1 2815 gen_helper_cvttsd2si,
11f8cdbc 2816 gen_helper_cvtsd2si
664e0f19 2817};
3b46e624 2818
11f8cdbc 2819#ifdef TARGET_X86_64
d3eb5eae 2820static const SSEFunc_l_ep sse_op_table3bq[] = {
11f8cdbc 2821 gen_helper_cvttss2sq,
11f8cdbc 2822 gen_helper_cvtss2sq,
bedc2ac1 2823 gen_helper_cvttsd2sq,
11f8cdbc
SW
2824 gen_helper_cvtsd2sq
2825};
2826#endif
2827
d3eb5eae 2828static const SSEFunc_0_epp sse_op_table4[8][4] = {
664e0f19
FB
2829 SSE_FOP(cmpeq),
2830 SSE_FOP(cmplt),
2831 SSE_FOP(cmple),
2832 SSE_FOP(cmpunord),
2833 SSE_FOP(cmpneq),
2834 SSE_FOP(cmpnlt),
2835 SSE_FOP(cmpnle),
2836 SSE_FOP(cmpord),
2837};
3b46e624 2838
d3eb5eae 2839static const SSEFunc_0_epp sse_op_table5[256] = {
a7812ae4
PB
2840 [0x0c] = gen_helper_pi2fw,
2841 [0x0d] = gen_helper_pi2fd,
2842 [0x1c] = gen_helper_pf2iw,
2843 [0x1d] = gen_helper_pf2id,
2844 [0x8a] = gen_helper_pfnacc,
2845 [0x8e] = gen_helper_pfpnacc,
2846 [0x90] = gen_helper_pfcmpge,
2847 [0x94] = gen_helper_pfmin,
2848 [0x96] = gen_helper_pfrcp,
2849 [0x97] = gen_helper_pfrsqrt,
2850 [0x9a] = gen_helper_pfsub,
2851 [0x9e] = gen_helper_pfadd,
2852 [0xa0] = gen_helper_pfcmpgt,
2853 [0xa4] = gen_helper_pfmax,
2854 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2855 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2856 [0xaa] = gen_helper_pfsubr,
2857 [0xae] = gen_helper_pfacc,
2858 [0xb0] = gen_helper_pfcmpeq,
2859 [0xb4] = gen_helper_pfmul,
2860 [0xb6] = gen_helper_movq, /* pfrcpit2 */
2861 [0xb7] = gen_helper_pmulhrw_mmx,
2862 [0xbb] = gen_helper_pswapd,
2863 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
a35f3ec7
AJ
2864};
2865
d3eb5eae
BS
2866struct SSEOpHelper_epp {
2867 SSEFunc_0_epp op[2];
c4baa050
BS
2868 uint32_t ext_mask;
2869};
2870
d3eb5eae
BS
2871struct SSEOpHelper_eppi {
2872 SSEFunc_0_eppi op[2];
c4baa050 2873 uint32_t ext_mask;
222a3336 2874};
c4baa050 2875
222a3336 2876#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
a7812ae4
PB
2877#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2878#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
222a3336 2879#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
e71827bc
AJ
2880#define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2881 CPUID_EXT_PCLMULQDQ }
d640045a 2882#define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
c4baa050 2883
d3eb5eae 2884static const struct SSEOpHelper_epp sse_op_table6[256] = {
222a3336
AZ
2885 [0x00] = SSSE3_OP(pshufb),
2886 [0x01] = SSSE3_OP(phaddw),
2887 [0x02] = SSSE3_OP(phaddd),
2888 [0x03] = SSSE3_OP(phaddsw),
2889 [0x04] = SSSE3_OP(pmaddubsw),
2890 [0x05] = SSSE3_OP(phsubw),
2891 [0x06] = SSSE3_OP(phsubd),
2892 [0x07] = SSSE3_OP(phsubsw),
2893 [0x08] = SSSE3_OP(psignb),
2894 [0x09] = SSSE3_OP(psignw),
2895 [0x0a] = SSSE3_OP(psignd),
2896 [0x0b] = SSSE3_OP(pmulhrsw),
2897 [0x10] = SSE41_OP(pblendvb),
2898 [0x14] = SSE41_OP(blendvps),
2899 [0x15] = SSE41_OP(blendvpd),
2900 [0x17] = SSE41_OP(ptest),
2901 [0x1c] = SSSE3_OP(pabsb),
2902 [0x1d] = SSSE3_OP(pabsw),
2903 [0x1e] = SSSE3_OP(pabsd),
2904 [0x20] = SSE41_OP(pmovsxbw),
2905 [0x21] = SSE41_OP(pmovsxbd),
2906 [0x22] = SSE41_OP(pmovsxbq),
2907 [0x23] = SSE41_OP(pmovsxwd),
2908 [0x24] = SSE41_OP(pmovsxwq),
2909 [0x25] = SSE41_OP(pmovsxdq),
2910 [0x28] = SSE41_OP(pmuldq),
2911 [0x29] = SSE41_OP(pcmpeqq),
2912 [0x2a] = SSE41_SPECIAL, /* movntqda */
2913 [0x2b] = SSE41_OP(packusdw),
2914 [0x30] = SSE41_OP(pmovzxbw),
2915 [0x31] = SSE41_OP(pmovzxbd),
2916 [0x32] = SSE41_OP(pmovzxbq),
2917 [0x33] = SSE41_OP(pmovzxwd),
2918 [0x34] = SSE41_OP(pmovzxwq),
2919 [0x35] = SSE41_OP(pmovzxdq),
2920 [0x37] = SSE42_OP(pcmpgtq),
2921 [0x38] = SSE41_OP(pminsb),
2922 [0x39] = SSE41_OP(pminsd),
2923 [0x3a] = SSE41_OP(pminuw),
2924 [0x3b] = SSE41_OP(pminud),
2925 [0x3c] = SSE41_OP(pmaxsb),
2926 [0x3d] = SSE41_OP(pmaxsd),
2927 [0x3e] = SSE41_OP(pmaxuw),
2928 [0x3f] = SSE41_OP(pmaxud),
2929 [0x40] = SSE41_OP(pmulld),
2930 [0x41] = SSE41_OP(phminposuw),
d640045a
AJ
2931 [0xdb] = AESNI_OP(aesimc),
2932 [0xdc] = AESNI_OP(aesenc),
2933 [0xdd] = AESNI_OP(aesenclast),
2934 [0xde] = AESNI_OP(aesdec),
2935 [0xdf] = AESNI_OP(aesdeclast),
4242b1bd
AZ
2936};
2937
d3eb5eae 2938static const struct SSEOpHelper_eppi sse_op_table7[256] = {
222a3336
AZ
2939 [0x08] = SSE41_OP(roundps),
2940 [0x09] = SSE41_OP(roundpd),
2941 [0x0a] = SSE41_OP(roundss),
2942 [0x0b] = SSE41_OP(roundsd),
2943 [0x0c] = SSE41_OP(blendps),
2944 [0x0d] = SSE41_OP(blendpd),
2945 [0x0e] = SSE41_OP(pblendw),
2946 [0x0f] = SSSE3_OP(palignr),
2947 [0x14] = SSE41_SPECIAL, /* pextrb */
2948 [0x15] = SSE41_SPECIAL, /* pextrw */
2949 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
2950 [0x17] = SSE41_SPECIAL, /* extractps */
2951 [0x20] = SSE41_SPECIAL, /* pinsrb */
2952 [0x21] = SSE41_SPECIAL, /* insertps */
2953 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
2954 [0x40] = SSE41_OP(dpps),
2955 [0x41] = SSE41_OP(dppd),
2956 [0x42] = SSE41_OP(mpsadbw),
e71827bc 2957 [0x44] = PCLMULQDQ_OP(pclmulqdq),
222a3336
AZ
2958 [0x60] = SSE42_OP(pcmpestrm),
2959 [0x61] = SSE42_OP(pcmpestri),
2960 [0x62] = SSE42_OP(pcmpistrm),
2961 [0x63] = SSE42_OP(pcmpistri),
d640045a 2962 [0xdf] = AESNI_OP(aeskeygenassist),
4242b1bd
AZ
2963};
2964
0af10c86
BS
2965static void gen_sse(CPUX86State *env, DisasContext *s, int b,
2966 target_ulong pc_start, int rex_r)
664e0f19 2967{
d67dc9e6 2968 int b1, op1_offset, op2_offset, is_xmm, val;
4eeb3939 2969 int modrm, mod, rm, reg;
d3eb5eae
BS
2970 SSEFunc_0_epp sse_fn_epp;
2971 SSEFunc_0_eppi sse_fn_eppi;
c4baa050 2972 SSEFunc_0_ppi sse_fn_ppi;
d3eb5eae 2973 SSEFunc_0_eppt sse_fn_eppt;
d67dc9e6 2974 TCGMemOp ot;
664e0f19
FB
2975
2976 b &= 0xff;
5fafdf24 2977 if (s->prefix & PREFIX_DATA)
664e0f19 2978 b1 = 1;
5fafdf24 2979 else if (s->prefix & PREFIX_REPZ)
664e0f19 2980 b1 = 2;
5fafdf24 2981 else if (s->prefix & PREFIX_REPNZ)
664e0f19
FB
2982 b1 = 3;
2983 else
2984 b1 = 0;
d3eb5eae
BS
2985 sse_fn_epp = sse_op_table1[b][b1];
2986 if (!sse_fn_epp) {
664e0f19 2987 goto illegal_op;
c4baa050 2988 }
a35f3ec7 2989 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
664e0f19
FB
2990 is_xmm = 1;
2991 } else {
2992 if (b1 == 0) {
2993 /* MMX case */
2994 is_xmm = 0;
2995 } else {
2996 is_xmm = 1;
2997 }
2998 }
2999 /* simple MMX/SSE operation */
3000 if (s->flags & HF_TS_MASK) {
3001 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3002 return;
3003 }
3004 if (s->flags & HF_EM_MASK) {
3005 illegal_op:
3006 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3007 return;
3008 }
3009 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
4242b1bd
AZ
3010 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3011 goto illegal_op;
e771edab
AJ
3012 if (b == 0x0e) {
3013 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3014 goto illegal_op;
3015 /* femms */
d3eb5eae 3016 gen_helper_emms(cpu_env);
e771edab
AJ
3017 return;
3018 }
3019 if (b == 0x77) {
3020 /* emms */
d3eb5eae 3021 gen_helper_emms(cpu_env);
664e0f19
FB
3022 return;
3023 }
3024 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3025 the static cpu state) */
3026 if (!is_xmm) {
d3eb5eae 3027 gen_helper_enter_mmx(cpu_env);
664e0f19
FB
3028 }
3029
0af10c86 3030 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3031 reg = ((modrm >> 3) & 7);
3032 if (is_xmm)
3033 reg |= rex_r;
3034 mod = (modrm >> 6) & 3;
d3eb5eae 3035 if (sse_fn_epp == SSE_SPECIAL) {
664e0f19
FB
3036 b |= (b1 << 8);
3037 switch(b) {
3038 case 0x0e7: /* movntq */
5fafdf24 3039 if (mod == 3)
664e0f19 3040 goto illegal_op;
4eeb3939 3041 gen_lea_modrm(env, s, modrm);
323d1876 3042 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
664e0f19
FB
3043 break;
3044 case 0x1e7: /* movntdq */
3045 case 0x02b: /* movntps */
3046 case 0x12b: /* movntps */
2e21e749
T
3047 if (mod == 3)
3048 goto illegal_op;
4eeb3939 3049 gen_lea_modrm(env, s, modrm);
323d1876 3050 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
2e21e749 3051 break;
465e9838
FB
3052 case 0x3f0: /* lddqu */
3053 if (mod == 3)
664e0f19 3054 goto illegal_op;
4eeb3939 3055 gen_lea_modrm(env, s, modrm);
323d1876 3056 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19 3057 break;
d9f4bb27
AP
3058 case 0x22b: /* movntss */
3059 case 0x32b: /* movntsd */
3060 if (mod == 3)
3061 goto illegal_op;
4eeb3939 3062 gen_lea_modrm(env, s, modrm);
d9f4bb27 3063 if (b1 & 1) {
07958082
PB
3064 gen_stq_env_A0(s, offsetof(CPUX86State,
3065 xmm_regs[reg].XMM_Q(0)));
d9f4bb27
AP
3066 } else {
3067 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3068 xmm_regs[reg].XMM_L(0)));
fd8ca9f6 3069 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
d9f4bb27
AP
3070 }
3071 break;
664e0f19 3072 case 0x6e: /* movd mm, ea */
dabd98dd 3073#ifdef TARGET_X86_64
ab4e4aec 3074 if (s->dflag == MO_64) {
4ba9938c 3075 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
5af45186 3076 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
5fafdf24 3077 } else
dabd98dd
FB
3078#endif
3079 {
4ba9938c 3080 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
5af45186
FB
3081 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3082 offsetof(CPUX86State,fpregs[reg].mmx));
a7812ae4
PB
3083 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3084 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
dabd98dd 3085 }
664e0f19
FB
3086 break;
3087 case 0x16e: /* movd xmm, ea */
dabd98dd 3088#ifdef TARGET_X86_64
ab4e4aec 3089 if (s->dflag == MO_64) {
4ba9938c 3090 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
5af45186
FB
3091 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3092 offsetof(CPUX86State,xmm_regs[reg]));
a7812ae4 3093 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
5fafdf24 3094 } else
dabd98dd
FB
3095#endif
3096 {
4ba9938c 3097 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
5af45186
FB
3098 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3099 offsetof(CPUX86State,xmm_regs[reg]));
b6abf97d 3100 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 3101 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
dabd98dd 3102 }
664e0f19
FB
3103 break;
3104 case 0x6f: /* movq mm, ea */
3105 if (mod != 3) {
4eeb3939 3106 gen_lea_modrm(env, s, modrm);
323d1876 3107 gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
664e0f19
FB
3108 } else {
3109 rm = (modrm & 7);
b6abf97d 3110 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
5af45186 3111 offsetof(CPUX86State,fpregs[rm].mmx));
b6abf97d 3112 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
5af45186 3113 offsetof(CPUX86State,fpregs[reg].mmx));
664e0f19
FB
3114 }
3115 break;
3116 case 0x010: /* movups */
3117 case 0x110: /* movupd */
3118 case 0x028: /* movaps */
3119 case 0x128: /* movapd */
3120 case 0x16f: /* movdqa xmm, ea */
3121 case 0x26f: /* movdqu xmm, ea */
3122 if (mod != 3) {
4eeb3939 3123 gen_lea_modrm(env, s, modrm);
323d1876 3124 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19
FB
3125 } else {
3126 rm = (modrm & 7) | REX_B(s);
3127 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3128 offsetof(CPUX86State,xmm_regs[rm]));
3129 }
3130 break;
3131 case 0x210: /* movss xmm, ea */
3132 if (mod != 3) {
4eeb3939 3133 gen_lea_modrm(env, s, modrm);
909be183 3134 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
651ba608 3135 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
97212c88 3136 tcg_gen_movi_tl(cpu_T[0], 0);
651ba608
FB
3137 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3138 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3139 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
664e0f19
FB
3140 } else {
3141 rm = (modrm & 7) | REX_B(s);
3142 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3143 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3144 }
3145 break;
3146 case 0x310: /* movsd xmm, ea */
3147 if (mod != 3) {
4eeb3939 3148 gen_lea_modrm(env, s, modrm);
323d1876
RH
3149 gen_ldq_env_A0(s, offsetof(CPUX86State,
3150 xmm_regs[reg].XMM_Q(0)));
97212c88 3151 tcg_gen_movi_tl(cpu_T[0], 0);
651ba608
FB
3152 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3153 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
664e0f19
FB
3154 } else {
3155 rm = (modrm & 7) | REX_B(s);
3156 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3157 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3158 }
3159 break;
3160 case 0x012: /* movlps */
3161 case 0x112: /* movlpd */
3162 if (mod != 3) {
4eeb3939 3163 gen_lea_modrm(env, s, modrm);
323d1876
RH
3164 gen_ldq_env_A0(s, offsetof(CPUX86State,
3165 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3166 } else {
3167 /* movhlps */
3168 rm = (modrm & 7) | REX_B(s);
3169 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3170 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3171 }
3172 break;
465e9838
FB
3173 case 0x212: /* movsldup */
3174 if (mod != 3) {
4eeb3939 3175 gen_lea_modrm(env, s, modrm);
323d1876 3176 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
465e9838
FB
3177 } else {
3178 rm = (modrm & 7) | REX_B(s);
3179 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3180 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3181 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3182 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3183 }
3184 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3185 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3186 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3187 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3188 break;
3189 case 0x312: /* movddup */
3190 if (mod != 3) {
4eeb3939 3191 gen_lea_modrm(env, s, modrm);
323d1876
RH
3192 gen_ldq_env_A0(s, offsetof(CPUX86State,
3193 xmm_regs[reg].XMM_Q(0)));
465e9838
FB
3194 } else {
3195 rm = (modrm & 7) | REX_B(s);
3196 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3197 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3198 }
3199 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
ba6526df 3200 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
465e9838 3201 break;
664e0f19
FB
3202 case 0x016: /* movhps */
3203 case 0x116: /* movhpd */
3204 if (mod != 3) {
4eeb3939 3205 gen_lea_modrm(env, s, modrm);
323d1876
RH
3206 gen_ldq_env_A0(s, offsetof(CPUX86State,
3207 xmm_regs[reg].XMM_Q(1)));
664e0f19
FB
3208 } else {
3209 /* movlhps */
3210 rm = (modrm & 7) | REX_B(s);
3211 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3212 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3213 }
3214 break;
3215 case 0x216: /* movshdup */
3216 if (mod != 3) {
4eeb3939 3217 gen_lea_modrm(env, s, modrm);
323d1876 3218 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19
FB
3219 } else {
3220 rm = (modrm & 7) | REX_B(s);
3221 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3222 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3223 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3224 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3225 }
3226 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3227 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3228 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3229 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3230 break;
d9f4bb27
AP
3231 case 0x178:
3232 case 0x378:
3233 {
3234 int bit_index, field_length;
3235
3236 if (b1 == 1 && reg != 0)
3237 goto illegal_op;
0af10c86
BS
3238 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3239 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
d9f4bb27
AP
3240 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3241 offsetof(CPUX86State,xmm_regs[reg]));
3242 if (b1 == 1)
d3eb5eae
BS
3243 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3244 tcg_const_i32(bit_index),
3245 tcg_const_i32(field_length));
d9f4bb27 3246 else
d3eb5eae
BS
3247 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3248 tcg_const_i32(bit_index),
3249 tcg_const_i32(field_length));
d9f4bb27
AP
3250 }
3251 break;
664e0f19 3252 case 0x7e: /* movd ea, mm */
dabd98dd 3253#ifdef TARGET_X86_64
ab4e4aec 3254 if (s->dflag == MO_64) {
5af45186
FB
3255 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3256 offsetof(CPUX86State,fpregs[reg].mmx));
4ba9938c 3257 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
5fafdf24 3258 } else
dabd98dd
FB
3259#endif
3260 {
5af45186
FB
3261 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3262 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
4ba9938c 3263 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
dabd98dd 3264 }
664e0f19
FB
3265 break;
3266 case 0x17e: /* movd ea, xmm */
dabd98dd 3267#ifdef TARGET_X86_64
ab4e4aec 3268 if (s->dflag == MO_64) {
5af45186
FB
3269 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3270 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
4ba9938c 3271 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
5fafdf24 3272 } else
dabd98dd
FB
3273#endif
3274 {
5af45186
FB
3275 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3276 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
4ba9938c 3277 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
dabd98dd 3278 }
664e0f19
FB
3279 break;
3280 case 0x27e: /* movq xmm, ea */
3281 if (mod != 3) {
4eeb3939 3282 gen_lea_modrm(env, s, modrm);
323d1876
RH
3283 gen_ldq_env_A0(s, offsetof(CPUX86State,
3284 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3285 } else {
3286 rm = (modrm & 7) | REX_B(s);
3287 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3288 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3289 }
3290 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3291 break;
3292 case 0x7f: /* movq ea, mm */
3293 if (mod != 3) {
4eeb3939 3294 gen_lea_modrm(env, s, modrm);
323d1876 3295 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
664e0f19
FB
3296 } else {
3297 rm = (modrm & 7);
3298 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3299 offsetof(CPUX86State,fpregs[reg].mmx));
3300 }
3301 break;
3302 case 0x011: /* movups */
3303 case 0x111: /* movupd */
3304 case 0x029: /* movaps */
3305 case 0x129: /* movapd */
3306 case 0x17f: /* movdqa ea, xmm */
3307 case 0x27f: /* movdqu ea, xmm */
3308 if (mod != 3) {
4eeb3939 3309 gen_lea_modrm(env, s, modrm);
323d1876 3310 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
664e0f19
FB
3311 } else {
3312 rm = (modrm & 7) | REX_B(s);
3313 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3314 offsetof(CPUX86State,xmm_regs[reg]));
3315 }
3316 break;
3317 case 0x211: /* movss ea, xmm */
3318 if (mod != 3) {
4eeb3939 3319 gen_lea_modrm(env, s, modrm);
651ba608 3320 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
fd8ca9f6 3321 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
664e0f19
FB
3322 } else {
3323 rm = (modrm & 7) | REX_B(s);
3324 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3325 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3326 }
3327 break;
3328 case 0x311: /* movsd ea, xmm */
3329 if (mod != 3) {
4eeb3939 3330 gen_lea_modrm(env, s, modrm);
323d1876
RH
3331 gen_stq_env_A0(s, offsetof(CPUX86State,
3332 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3333 } else {
3334 rm = (modrm & 7) | REX_B(s);
3335 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3336 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3337 }
3338 break;
3339 case 0x013: /* movlps */
3340 case 0x113: /* movlpd */
3341 if (mod != 3) {
4eeb3939 3342 gen_lea_modrm(env, s, modrm);
323d1876
RH
3343 gen_stq_env_A0(s, offsetof(CPUX86State,
3344 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3345 } else {
3346 goto illegal_op;
3347 }
3348 break;
3349 case 0x017: /* movhps */
3350 case 0x117: /* movhpd */
3351 if (mod != 3) {
4eeb3939 3352 gen_lea_modrm(env, s, modrm);
323d1876
RH
3353 gen_stq_env_A0(s, offsetof(CPUX86State,
3354 xmm_regs[reg].XMM_Q(1)));
664e0f19
FB
3355 } else {
3356 goto illegal_op;
3357 }
3358 break;
3359 case 0x71: /* shift mm, im */
3360 case 0x72:
3361 case 0x73:
3362 case 0x171: /* shift xmm, im */
3363 case 0x172:
3364 case 0x173:
c045af25
AK
3365 if (b1 >= 2) {
3366 goto illegal_op;
3367 }
0af10c86 3368 val = cpu_ldub_code(env, s->pc++);
664e0f19 3369 if (is_xmm) {
1b90d56e 3370 tcg_gen_movi_tl(cpu_T[0], val);
651ba608 3371 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
97212c88 3372 tcg_gen_movi_tl(cpu_T[0], 0);
651ba608 3373 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
664e0f19
FB
3374 op1_offset = offsetof(CPUX86State,xmm_t0);
3375 } else {
1b90d56e 3376 tcg_gen_movi_tl(cpu_T[0], val);
651ba608 3377 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
97212c88 3378 tcg_gen_movi_tl(cpu_T[0], 0);
651ba608 3379 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
664e0f19
FB
3380 op1_offset = offsetof(CPUX86State,mmx_t0);
3381 }
d3eb5eae
BS
3382 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3383 (((modrm >> 3)) & 7)][b1];
3384 if (!sse_fn_epp) {
664e0f19 3385 goto illegal_op;
c4baa050 3386 }
664e0f19
FB
3387 if (is_xmm) {
3388 rm = (modrm & 7) | REX_B(s);
3389 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3390 } else {
3391 rm = (modrm & 7);
3392 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3393 }
5af45186
FB
3394 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3395 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
d3eb5eae 3396 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3397 break;
3398 case 0x050: /* movmskps */
664e0f19 3399 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3400 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3401 offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3402 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
a7fbcbe5 3403 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
664e0f19
FB
3404 break;
3405 case 0x150: /* movmskpd */
664e0f19 3406 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3407 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3408 offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3409 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
a7fbcbe5 3410 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
664e0f19
FB
3411 break;
3412 case 0x02a: /* cvtpi2ps */
3413 case 0x12a: /* cvtpi2pd */
d3eb5eae 3414 gen_helper_enter_mmx(cpu_env);
664e0f19 3415 if (mod != 3) {
4eeb3939 3416 gen_lea_modrm(env, s, modrm);
664e0f19 3417 op2_offset = offsetof(CPUX86State,mmx_t0);
323d1876 3418 gen_ldq_env_A0(s, op2_offset);
664e0f19
FB
3419 } else {
3420 rm = (modrm & 7);
3421 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3422 }
3423 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
5af45186
FB
3424 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3425 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
664e0f19
FB
3426 switch(b >> 8) {
3427 case 0x0:
d3eb5eae 3428 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3429 break;
3430 default:
3431 case 0x1:
d3eb5eae 3432 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3433 break;
3434 }
3435 break;
3436 case 0x22a: /* cvtsi2ss */
3437 case 0x32a: /* cvtsi2sd */
ab4e4aec 3438 ot = mo_64_32(s->dflag);
0af10c86 3439 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
664e0f19 3440 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
5af45186 3441 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4ba9938c 3442 if (ot == MO_32) {
d3eb5eae 3443 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
28e10711 3444 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
d3eb5eae 3445 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
28e10711 3446 } else {
11f8cdbc 3447#ifdef TARGET_X86_64
d3eb5eae
BS
3448 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3449 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
11f8cdbc
SW
3450#else
3451 goto illegal_op;
3452#endif
28e10711 3453 }
664e0f19
FB
3454 break;
3455 case 0x02c: /* cvttps2pi */
3456 case 0x12c: /* cvttpd2pi */
3457 case 0x02d: /* cvtps2pi */
3458 case 0x12d: /* cvtpd2pi */
d3eb5eae 3459 gen_helper_enter_mmx(cpu_env);
664e0f19 3460 if (mod != 3) {
4eeb3939 3461 gen_lea_modrm(env, s, modrm);
664e0f19 3462 op2_offset = offsetof(CPUX86State,xmm_t0);
323d1876 3463 gen_ldo_env_A0(s, op2_offset);
664e0f19
FB
3464 } else {
3465 rm = (modrm & 7) | REX_B(s);
3466 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3467 }
3468 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
5af45186
FB
3469 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3470 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
664e0f19
FB
3471 switch(b) {
3472 case 0x02c:
d3eb5eae 3473 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3474 break;
3475 case 0x12c:
d3eb5eae 3476 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3477 break;
3478 case 0x02d:
d3eb5eae 3479 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3480 break;
3481 case 0x12d:
d3eb5eae 3482 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
3483 break;
3484 }
3485 break;
3486 case 0x22c: /* cvttss2si */
3487 case 0x32c: /* cvttsd2si */
3488 case 0x22d: /* cvtss2si */
3489 case 0x32d: /* cvtsd2si */
ab4e4aec 3490 ot = mo_64_32(s->dflag);
31313213 3491 if (mod != 3) {
4eeb3939 3492 gen_lea_modrm(env, s, modrm);
31313213 3493 if ((b >> 8) & 1) {
323d1876 3494 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_Q(0)));
31313213 3495 } else {
909be183 3496 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
651ba608 3497 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
31313213
FB
3498 }
3499 op2_offset = offsetof(CPUX86State,xmm_t0);
3500 } else {
3501 rm = (modrm & 7) | REX_B(s);
3502 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3503 }
5af45186 3504 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
4ba9938c 3505 if (ot == MO_32) {
d3eb5eae 3506 SSEFunc_i_ep sse_fn_i_ep =
bedc2ac1 3507 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
d3eb5eae 3508 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
b6abf97d 3509 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5af45186 3510 } else {
11f8cdbc 3511#ifdef TARGET_X86_64
d3eb5eae 3512 SSEFunc_l_ep sse_fn_l_ep =
bedc2ac1 3513 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
d3eb5eae 3514 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
11f8cdbc
SW
3515#else
3516 goto illegal_op;
3517#endif
5af45186 3518 }
480a762d 3519 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
664e0f19
FB
3520 break;
3521 case 0xc4: /* pinsrw */
5fafdf24 3522 case 0x1c4:
d1e42c5c 3523 s->rip_offset = 1;
4ba9938c 3524 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
0af10c86 3525 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3526 if (b1) {
3527 val &= 7;
5af45186
FB
3528 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3529 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
664e0f19
FB
3530 } else {
3531 val &= 3;
5af45186
FB
3532 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3533 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
664e0f19
FB
3534 }
3535 break;
3536 case 0xc5: /* pextrw */
5fafdf24 3537 case 0x1c5:
664e0f19
FB
3538 if (mod != 3)
3539 goto illegal_op;
ab4e4aec 3540 ot = mo_64_32(s->dflag);
0af10c86 3541 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
3542 if (b1) {
3543 val &= 7;
3544 rm = (modrm & 7) | REX_B(s);
5af45186
FB
3545 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3546 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
664e0f19
FB
3547 } else {
3548 val &= 3;
3549 rm = (modrm & 7);
5af45186
FB
3550 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3551 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
664e0f19
FB
3552 }
3553 reg = ((modrm >> 3) & 7) | rex_r;
480a762d 3554 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
664e0f19
FB
3555 break;
3556 case 0x1d6: /* movq ea, xmm */
3557 if (mod != 3) {
4eeb3939 3558 gen_lea_modrm(env, s, modrm);
323d1876
RH
3559 gen_stq_env_A0(s, offsetof(CPUX86State,
3560 xmm_regs[reg].XMM_Q(0)));
664e0f19
FB
3561 } else {
3562 rm = (modrm & 7) | REX_B(s);
3563 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3564 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3565 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3566 }
3567 break;
3568 case 0x2d6: /* movq2dq */
d3eb5eae 3569 gen_helper_enter_mmx(cpu_env);
480c1cdb
FB
3570 rm = (modrm & 7);
3571 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3572 offsetof(CPUX86State,fpregs[rm].mmx));
3573 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
664e0f19
FB
3574 break;
3575 case 0x3d6: /* movdq2q */
d3eb5eae 3576 gen_helper_enter_mmx(cpu_env);
480c1cdb
FB
3577 rm = (modrm & 7) | REX_B(s);
3578 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3579 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
664e0f19
FB
3580 break;
3581 case 0xd7: /* pmovmskb */
3582 case 0x1d7:
3583 if (mod != 3)
3584 goto illegal_op;
3585 if (b1) {
3586 rm = (modrm & 7) | REX_B(s);
5af45186 3587 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
d3eb5eae 3588 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
664e0f19
FB
3589 } else {
3590 rm = (modrm & 7);
5af45186 3591 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
d3eb5eae 3592 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
664e0f19
FB
3593 }
3594 reg = ((modrm >> 3) & 7) | rex_r;
a7fbcbe5 3595 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
664e0f19 3596 break;
111994ee 3597
4242b1bd 3598 case 0x138:
000cacf6 3599 case 0x038:
4242b1bd 3600 b = modrm;
111994ee
RH
3601 if ((b & 0xf0) == 0xf0) {
3602 goto do_0f_38_fx;
3603 }
0af10c86 3604 modrm = cpu_ldub_code(env, s->pc++);
4242b1bd
AZ
3605 rm = modrm & 7;
3606 reg = ((modrm >> 3) & 7) | rex_r;
3607 mod = (modrm >> 6) & 3;
c045af25
AK
3608 if (b1 >= 2) {
3609 goto illegal_op;
3610 }
4242b1bd 3611
d3eb5eae
BS
3612 sse_fn_epp = sse_op_table6[b].op[b1];
3613 if (!sse_fn_epp) {
4242b1bd 3614 goto illegal_op;
c4baa050 3615 }
222a3336
AZ
3616 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3617 goto illegal_op;
4242b1bd
AZ
3618
3619 if (b1) {
3620 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3621 if (mod == 3) {
3622 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3623 } else {
3624 op2_offset = offsetof(CPUX86State,xmm_t0);
4eeb3939 3625 gen_lea_modrm(env, s, modrm);
222a3336
AZ
3626 switch (b) {
3627 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3628 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3629 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
323d1876 3630 gen_ldq_env_A0(s, op2_offset +
222a3336
AZ
3631 offsetof(XMMReg, XMM_Q(0)));
3632 break;
3633 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3634 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3c5f4116
RH
3635 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
3636 s->mem_index, MO_LEUL);
222a3336
AZ
3637 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3638 offsetof(XMMReg, XMM_L(0)));
3639 break;
3640 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3c5f4116
RH
3641 tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
3642 s->mem_index, MO_LEUW);
222a3336
AZ
3643 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3644 offsetof(XMMReg, XMM_W(0)));
3645 break;
3646 case 0x2a: /* movntqda */
323d1876 3647 gen_ldo_env_A0(s, op1_offset);
222a3336
AZ
3648 return;
3649 default:
323d1876 3650 gen_ldo_env_A0(s, op2_offset);
222a3336 3651 }
4242b1bd
AZ
3652 }
3653 } else {
3654 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3655 if (mod == 3) {
3656 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3657 } else {
3658 op2_offset = offsetof(CPUX86State,mmx_t0);
4eeb3939 3659 gen_lea_modrm(env, s, modrm);
323d1876 3660 gen_ldq_env_A0(s, op2_offset);
4242b1bd
AZ
3661 }
3662 }
d3eb5eae 3663 if (sse_fn_epp == SSE_SPECIAL) {
222a3336 3664 goto illegal_op;
c4baa050 3665 }
222a3336 3666
4242b1bd
AZ
3667 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3668 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 3669 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
222a3336 3670
3ca51d07
RH
3671 if (b == 0x17) {
3672 set_cc_op(s, CC_OP_EFLAGS);
3673 }
4242b1bd 3674 break;
111994ee
RH
3675
3676 case 0x238:
3677 case 0x338:
3678 do_0f_38_fx:
3679 /* Various integer extensions at 0f 38 f[0-f]. */
3680 b = modrm | (b1 << 8);
0af10c86 3681 modrm = cpu_ldub_code(env, s->pc++);
222a3336
AZ
3682 reg = ((modrm >> 3) & 7) | rex_r;
3683
111994ee
RH
3684 switch (b) {
3685 case 0x3f0: /* crc32 Gd,Eb */
3686 case 0x3f1: /* crc32 Gd,Ey */
3687 do_crc32:
3688 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3689 goto illegal_op;
3690 }
3691 if ((b & 0xff) == 0xf0) {
4ba9938c 3692 ot = MO_8;
ab4e4aec 3693 } else if (s->dflag != MO_64) {
4ba9938c 3694 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
111994ee 3695 } else {
4ba9938c 3696 ot = MO_64;
111994ee 3697 }
4242b1bd 3698
24b9c00f 3699 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
111994ee
RH
3700 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3701 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3702 cpu_T[0], tcg_const_i32(8 << ot));
222a3336 3703
ab4e4aec 3704 ot = mo_64_32(s->dflag);
480a762d 3705 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
111994ee 3706 break;
222a3336 3707
111994ee
RH
3708 case 0x1f0: /* crc32 or movbe */
3709 case 0x1f1:
3710 /* For these insns, the f3 prefix is supposed to have priority
3711 over the 66 prefix, but that's not what we implement above
3712 setting b1. */
3713 if (s->prefix & PREFIX_REPNZ) {
3714 goto do_crc32;
3715 }
3716 /* FALLTHRU */
3717 case 0x0f0: /* movbe Gy,My */
3718 case 0x0f1: /* movbe My,Gy */
3719 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3720 goto illegal_op;
3721 }
ab4e4aec 3722 if (s->dflag != MO_64) {
4ba9938c 3723 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
111994ee 3724 } else {
4ba9938c 3725 ot = MO_64;
111994ee
RH
3726 }
3727
3655a19f 3728 gen_lea_modrm(env, s, modrm);
111994ee 3729 if ((b & 1) == 0) {
3655a19f
RH
3730 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
3731 s->mem_index, ot | MO_BE);
480a762d 3732 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
111994ee 3733 } else {
3655a19f
RH
3734 tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
3735 s->mem_index, ot | MO_BE);
111994ee
RH
3736 }
3737 break;
3738
7073fbad
RH
3739 case 0x0f2: /* andn Gy, By, Ey */
3740 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3741 || !(s->prefix & PREFIX_VEX)
3742 || s->vex_l != 0) {
3743 goto illegal_op;
3744 }
ab4e4aec 3745 ot = mo_64_32(s->dflag);
7073fbad
RH
3746 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3747 tcg_gen_andc_tl(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]);
480a762d 3748 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
7073fbad
RH
3749 gen_op_update1_cc();
3750 set_cc_op(s, CC_OP_LOGICB + ot);
3751 break;
3752
c7ab7565
RH
3753 case 0x0f7: /* bextr Gy, Ey, By */
3754 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3755 || !(s->prefix & PREFIX_VEX)
3756 || s->vex_l != 0) {
3757 goto illegal_op;
3758 }
ab4e4aec 3759 ot = mo_64_32(s->dflag);
c7ab7565
RH
3760 {
3761 TCGv bound, zero;
3762
3763 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3764 /* Extract START, and shift the operand.
3765 Shifts larger than operand size get zeros. */
3766 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
3767 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_A0);
3768
4ba9938c 3769 bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
c7ab7565
RH
3770 zero = tcg_const_tl(0);
3771 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T[0], cpu_A0, bound,
3772 cpu_T[0], zero);
3773 tcg_temp_free(zero);
3774
3775 /* Extract the LEN into a mask. Lengths larger than
3776 operand size get all ones. */
3777 tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
3778 tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
3779 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
3780 cpu_A0, bound);
3781 tcg_temp_free(bound);
3782 tcg_gen_movi_tl(cpu_T[1], 1);
3783 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_A0);
3784 tcg_gen_subi_tl(cpu_T[1], cpu_T[1], 1);
3785 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3786
480a762d 3787 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
c7ab7565
RH
3788 gen_op_update1_cc();
3789 set_cc_op(s, CC_OP_LOGICB + ot);
3790 }
3791 break;
3792
02ea1e6b
RH
3793 case 0x0f5: /* bzhi Gy, Ey, By */
3794 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3795 || !(s->prefix & PREFIX_VEX)
3796 || s->vex_l != 0) {
3797 goto illegal_op;
3798 }
ab4e4aec 3799 ot = mo_64_32(s->dflag);
02ea1e6b
RH
3800 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3801 tcg_gen_ext8u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3802 {
4ba9938c 3803 TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
02ea1e6b
RH
3804 /* Note that since we're using BMILG (in order to get O
3805 cleared) we need to store the inverse into C. */
3806 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
3807 cpu_T[1], bound);
3808 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T[1], cpu_T[1],
3809 bound, bound, cpu_T[1]);
3810 tcg_temp_free(bound);
3811 }
3812 tcg_gen_movi_tl(cpu_A0, -1);
3813 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T[1]);
3814 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_A0);
480a762d 3815 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
02ea1e6b
RH
3816 gen_op_update1_cc();
3817 set_cc_op(s, CC_OP_BMILGB + ot);
3818 break;
3819
5f1f4b17
RH
3820 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3821 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3822 || !(s->prefix & PREFIX_VEX)
3823 || s->vex_l != 0) {
3824 goto illegal_op;
3825 }
ab4e4aec 3826 ot = mo_64_32(s->dflag);
5f1f4b17
RH
3827 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3828 switch (ot) {
5f1f4b17 3829 default:
a4bcea3d
RH
3830 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3831 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
3832 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
3833 cpu_tmp2_i32, cpu_tmp3_i32);
3834 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
3835 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
5f1f4b17
RH
3836 break;
3837#ifdef TARGET_X86_64
4ba9938c 3838 case MO_64:
a4bcea3d
RH
3839 tcg_gen_mulu2_i64(cpu_regs[s->vex_v], cpu_regs[reg],
3840 cpu_T[0], cpu_regs[R_EDX]);
5f1f4b17
RH
3841 break;
3842#endif
3843 }
3844 break;
3845
0592f74a
RH
3846 case 0x3f5: /* pdep Gy, By, Ey */
3847 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3848 || !(s->prefix & PREFIX_VEX)
3849 || s->vex_l != 0) {
3850 goto illegal_op;
3851 }
ab4e4aec 3852 ot = mo_64_32(s->dflag);
0592f74a
RH
3853 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3854 /* Note that by zero-extending the mask operand, we
3855 automatically handle zero-extending the result. */
ab4e4aec 3856 if (ot == MO_64) {
0592f74a
RH
3857 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3858 } else {
3859 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3860 }
3861 gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3862 break;
3863
3864 case 0x2f5: /* pext Gy, By, Ey */
3865 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3866 || !(s->prefix & PREFIX_VEX)
3867 || s->vex_l != 0) {
3868 goto illegal_op;
3869 }
ab4e4aec 3870 ot = mo_64_32(s->dflag);
0592f74a
RH
3871 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3872 /* Note that by zero-extending the mask operand, we
3873 automatically handle zero-extending the result. */
ab4e4aec 3874 if (ot == MO_64) {
0592f74a
RH
3875 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3876 } else {
3877 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3878 }
3879 gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3880 break;
3881
cd7f97ca
RH
3882 case 0x1f6: /* adcx Gy, Ey */
3883 case 0x2f6: /* adox Gy, Ey */
3884 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
3885 goto illegal_op;
3886 } else {
76f13133 3887 TCGv carry_in, carry_out, zero;
cd7f97ca
RH
3888 int end_op;
3889
ab4e4aec 3890 ot = mo_64_32(s->dflag);
cd7f97ca
RH
3891 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3892
3893 /* Re-use the carry-out from a previous round. */
3894 TCGV_UNUSED(carry_in);
3895 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
3896 switch (s->cc_op) {
3897 case CC_OP_ADCX:
3898 if (b == 0x1f6) {
3899 carry_in = cpu_cc_dst;
3900 end_op = CC_OP_ADCX;
3901 } else {
3902 end_op = CC_OP_ADCOX;
3903 }
3904 break;
3905 case CC_OP_ADOX:
3906 if (b == 0x1f6) {
3907 end_op = CC_OP_ADCOX;
3908 } else {
3909 carry_in = cpu_cc_src2;
3910 end_op = CC_OP_ADOX;
3911 }
3912 break;
3913 case CC_OP_ADCOX:
3914 end_op = CC_OP_ADCOX;
3915 carry_in = carry_out;
3916 break;
3917 default:
c53de1a2 3918 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
cd7f97ca
RH
3919 break;
3920 }
3921 /* If we can't reuse carry-out, get it out of EFLAGS. */
3922 if (TCGV_IS_UNUSED(carry_in)) {
3923 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
3924 gen_compute_eflags(s);
3925 }
3926 carry_in = cpu_tmp0;
3927 tcg_gen_shri_tl(carry_in, cpu_cc_src,
3928 ctz32(b == 0x1f6 ? CC_C : CC_O));
3929 tcg_gen_andi_tl(carry_in, carry_in, 1);
3930 }
3931
3932 switch (ot) {
3933#ifdef TARGET_X86_64
4ba9938c 3934 case MO_32:
cd7f97ca
RH
3935 /* If we know TL is 64-bit, and we want a 32-bit
3936 result, just do everything in 64-bit arithmetic. */
3937 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
3938 tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]);
3939 tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]);
3940 tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in);
3941 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]);
3942 tcg_gen_shri_i64(carry_out, cpu_T[0], 32);
3943 break;
3944#endif
3945 default:
3946 /* Otherwise compute the carry-out in two steps. */
76f13133
RH
3947 zero = tcg_const_tl(0);
3948 tcg_gen_add2_tl(cpu_T[0], carry_out,
3949 cpu_T[0], zero,
3950 carry_in, zero);
3951 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
3952 cpu_regs[reg], carry_out,
3953 cpu_T[0], zero);
3954 tcg_temp_free(zero);
cd7f97ca
RH
3955 break;
3956 }
cd7f97ca
RH
3957 set_cc_op(s, end_op);
3958 }
3959 break;
3960
4a554890
RH
3961 case 0x1f7: /* shlx Gy, Ey, By */
3962 case 0x2f7: /* sarx Gy, Ey, By */
3963 case 0x3f7: /* shrx Gy, Ey, By */
3964 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3965 || !(s->prefix & PREFIX_VEX)
3966 || s->vex_l != 0) {
3967 goto illegal_op;
3968 }
ab4e4aec 3969 ot = mo_64_32(s->dflag);
4a554890 3970 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4ba9938c 3971 if (ot == MO_64) {
4a554890
RH
3972 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 63);
3973 } else {
3974 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 31);
3975 }
3976 if (b == 0x1f7) {
3977 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3978 } else if (b == 0x2f7) {
4ba9938c 3979 if (ot != MO_64) {
4a554890
RH
3980 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
3981 }
3982 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3983 } else {
4ba9938c 3984 if (ot != MO_64) {
4a554890
RH
3985 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
3986 }
3987 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3988 }
480a762d 3989 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
4a554890
RH
3990 break;
3991
bc4b43dc
RH
3992 case 0x0f3:
3993 case 0x1f3:
3994 case 0x2f3:
3995 case 0x3f3: /* Group 17 */
3996 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3997 || !(s->prefix & PREFIX_VEX)
3998 || s->vex_l != 0) {
3999 goto illegal_op;
4000 }
ab4e4aec 4001 ot = mo_64_32(s->dflag);
bc4b43dc
RH
4002 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4003
4004 switch (reg & 7) {
4005 case 1: /* blsr By,Ey */
4006 tcg_gen_neg_tl(cpu_T[1], cpu_T[0]);
4007 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
480a762d 4008 gen_op_mov_reg_v(ot, s->vex_v, cpu_T[0]);
bc4b43dc
RH
4009 gen_op_update2_cc();
4010 set_cc_op(s, CC_OP_BMILGB + ot);
4011 break;
4012
4013 case 2: /* blsmsk By,Ey */
4014 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4015 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4016 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4017 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4018 set_cc_op(s, CC_OP_BMILGB + ot);
4019 break;
4020
4021 case 3: /* blsi By, Ey */
4022 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4023 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4024 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4025 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4026 set_cc_op(s, CC_OP_BMILGB + ot);
4027 break;
4028
4029 default:
4030 goto illegal_op;
4031 }
4032 break;
4033
111994ee
RH
4034 default:
4035 goto illegal_op;
4036 }
222a3336 4037 break;
111994ee 4038
222a3336
AZ
4039 case 0x03a:
4040 case 0x13a:
4242b1bd 4041 b = modrm;
0af10c86 4042 modrm = cpu_ldub_code(env, s->pc++);
4242b1bd
AZ
4043 rm = modrm & 7;
4044 reg = ((modrm >> 3) & 7) | rex_r;
4045 mod = (modrm >> 6) & 3;
c045af25
AK
4046 if (b1 >= 2) {
4047 goto illegal_op;
4048 }
4242b1bd 4049
d3eb5eae
BS
4050 sse_fn_eppi = sse_op_table7[b].op[b1];
4051 if (!sse_fn_eppi) {
4242b1bd 4052 goto illegal_op;
c4baa050 4053 }
222a3336
AZ
4054 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
4055 goto illegal_op;
4056
d3eb5eae 4057 if (sse_fn_eppi == SSE_SPECIAL) {
ab4e4aec 4058 ot = mo_64_32(s->dflag);
222a3336
AZ
4059 rm = (modrm & 7) | REX_B(s);
4060 if (mod != 3)
4eeb3939 4061 gen_lea_modrm(env, s, modrm);
222a3336 4062 reg = ((modrm >> 3) & 7) | rex_r;
0af10c86 4063 val = cpu_ldub_code(env, s->pc++);
222a3336
AZ
4064 switch (b) {
4065 case 0x14: /* pextrb */
4066 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4067 xmm_regs[reg].XMM_B(val & 15)));
3523e4bd 4068 if (mod == 3) {
480a762d 4069 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
3523e4bd
RH
4070 } else {
4071 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4072 s->mem_index, MO_UB);
4073 }
222a3336
AZ
4074 break;
4075 case 0x15: /* pextrw */
4076 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4077 xmm_regs[reg].XMM_W(val & 7)));
3523e4bd 4078 if (mod == 3) {
480a762d 4079 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
3523e4bd
RH
4080 } else {
4081 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4082 s->mem_index, MO_LEUW);
4083 }
222a3336
AZ
4084 break;
4085 case 0x16:
4ba9938c 4086 if (ot == MO_32) { /* pextrd */
222a3336
AZ
4087 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4088 offsetof(CPUX86State,
4089 xmm_regs[reg].XMM_L(val & 3)));
3523e4bd 4090 if (mod == 3) {
a7fbcbe5 4091 tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
3523e4bd 4092 } else {
d5601ad0
RH
4093 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
4094 s->mem_index, MO_LEUL);
3523e4bd 4095 }
222a3336 4096 } else { /* pextrq */
a7812ae4 4097#ifdef TARGET_X86_64
222a3336
AZ
4098 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4099 offsetof(CPUX86State,
4100 xmm_regs[reg].XMM_Q(val & 1)));
3523e4bd 4101 if (mod == 3) {
a7fbcbe5 4102 tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
3523e4bd
RH
4103 } else {
4104 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
4105 s->mem_index, MO_LEQ);
4106 }
a7812ae4
PB
4107#else
4108 goto illegal_op;
4109#endif
222a3336
AZ
4110 }
4111 break;
4112 case 0x17: /* extractps */
4113 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4114 xmm_regs[reg].XMM_L(val & 3)));
3523e4bd 4115 if (mod == 3) {
480a762d 4116 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
3523e4bd
RH
4117 } else {
4118 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4119 s->mem_index, MO_LEUL);
4120 }
222a3336
AZ
4121 break;
4122 case 0x20: /* pinsrb */
3c5f4116 4123 if (mod == 3) {
c56baccf 4124 gen_op_mov_v_reg(MO_32, cpu_T[0], rm);
3c5f4116
RH
4125 } else {
4126 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
4127 s->mem_index, MO_UB);
4128 }
34c6addd 4129 tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
222a3336
AZ
4130 xmm_regs[reg].XMM_B(val & 15)));
4131 break;
4132 case 0x21: /* insertps */
a7812ae4 4133 if (mod == 3) {
222a3336
AZ
4134 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4135 offsetof(CPUX86State,xmm_regs[rm]
4136 .XMM_L((val >> 6) & 3)));
a7812ae4 4137 } else {
3c5f4116
RH
4138 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4139 s->mem_index, MO_LEUL);
a7812ae4 4140 }
222a3336
AZ
4141 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4142 offsetof(CPUX86State,xmm_regs[reg]
4143 .XMM_L((val >> 4) & 3)));
4144 if ((val >> 0) & 1)
4145 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4146 cpu_env, offsetof(CPUX86State,
4147 xmm_regs[reg].XMM_L(0)));
4148 if ((val >> 1) & 1)
4149 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4150 cpu_env, offsetof(CPUX86State,
4151 xmm_regs[reg].XMM_L(1)));
4152 if ((val >> 2) & 1)
4153 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4154 cpu_env, offsetof(CPUX86State,
4155 xmm_regs[reg].XMM_L(2)));
4156 if ((val >> 3) & 1)
4157 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4158 cpu_env, offsetof(CPUX86State,
4159 xmm_regs[reg].XMM_L(3)));
4160 break;
4161 case 0x22:
4ba9938c 4162 if (ot == MO_32) { /* pinsrd */
3c5f4116 4163 if (mod == 3) {
80b02013 4164 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
3c5f4116 4165 } else {
80b02013
RH
4166 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4167 s->mem_index, MO_LEUL);
3c5f4116 4168 }
222a3336
AZ
4169 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4170 offsetof(CPUX86State,
4171 xmm_regs[reg].XMM_L(val & 3)));
4172 } else { /* pinsrq */
a7812ae4 4173#ifdef TARGET_X86_64
3c5f4116 4174 if (mod == 3) {
222a3336 4175 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
3c5f4116
RH
4176 } else {
4177 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
4178 s->mem_index, MO_LEQ);
4179 }
222a3336
AZ
4180 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4181 offsetof(CPUX86State,
4182 xmm_regs[reg].XMM_Q(val & 1)));
a7812ae4
PB
4183#else
4184 goto illegal_op;
4185#endif
222a3336
AZ
4186 }
4187 break;
4188 }
4189 return;
4190 }
4242b1bd
AZ
4191
4192 if (b1) {
4193 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4194 if (mod == 3) {
4195 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4196 } else {
4197 op2_offset = offsetof(CPUX86State,xmm_t0);
4eeb3939 4198 gen_lea_modrm(env, s, modrm);
323d1876 4199 gen_ldo_env_A0(s, op2_offset);
4242b1bd
AZ
4200 }
4201 } else {
4202 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4203 if (mod == 3) {
4204 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4205 } else {
4206 op2_offset = offsetof(CPUX86State,mmx_t0);
4eeb3939 4207 gen_lea_modrm(env, s, modrm);
323d1876 4208 gen_ldq_env_A0(s, op2_offset);
4242b1bd
AZ
4209 }
4210 }
0af10c86 4211 val = cpu_ldub_code(env, s->pc++);
4242b1bd 4212
222a3336 4213 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
3ca51d07 4214 set_cc_op(s, CC_OP_EFLAGS);
222a3336 4215
ab4e4aec 4216 if (s->dflag == MO_64) {
222a3336
AZ
4217 /* The helper must use entire 64-bit gp registers */
4218 val |= 1 << 8;
ab4e4aec 4219 }
222a3336
AZ
4220 }
4221
4242b1bd
AZ
4222 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4223 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4224 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4242b1bd 4225 break;
e2c3c2c5
RH
4226
4227 case 0x33a:
4228 /* Various integer extensions at 0f 3a f[0-f]. */
4229 b = modrm | (b1 << 8);
4230 modrm = cpu_ldub_code(env, s->pc++);
4231 reg = ((modrm >> 3) & 7) | rex_r;
4232
4233 switch (b) {
4234 case 0x3f0: /* rorx Gy,Ey, Ib */
4235 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4236 || !(s->prefix & PREFIX_VEX)
4237 || s->vex_l != 0) {
4238 goto illegal_op;
4239 }
ab4e4aec 4240 ot = mo_64_32(s->dflag);
e2c3c2c5
RH
4241 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4242 b = cpu_ldub_code(env, s->pc++);
4ba9938c 4243 if (ot == MO_64) {
e2c3c2c5
RH
4244 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], b & 63);
4245 } else {
4246 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4247 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
4248 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4249 }
480a762d 4250 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
e2c3c2c5
RH
4251 break;
4252
4253 default:
4254 goto illegal_op;
4255 }
4256 break;
4257
664e0f19
FB
4258 default:
4259 goto illegal_op;
4260 }
4261 } else {
4262 /* generic MMX or SSE operation */
d1e42c5c 4263 switch(b) {
d1e42c5c
FB
4264 case 0x70: /* pshufx insn */
4265 case 0xc6: /* pshufx insn */
4266 case 0xc2: /* compare insns */
4267 s->rip_offset = 1;
4268 break;
4269 default:
4270 break;
664e0f19
FB
4271 }
4272 if (is_xmm) {
4273 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4274 if (mod != 3) {
cb48da7f
RH
4275 int sz = 4;
4276
4eeb3939 4277 gen_lea_modrm(env, s, modrm);
664e0f19 4278 op2_offset = offsetof(CPUX86State,xmm_t0);
cb48da7f
RH
4279
4280 switch (b) {
4281 case 0x50 ... 0x5a:
4282 case 0x5c ... 0x5f:
4283 case 0xc2:
4284 /* Most sse scalar operations. */
664e0f19 4285 if (b1 == 2) {
cb48da7f
RH
4286 sz = 2;
4287 } else if (b1 == 3) {
4288 sz = 3;
4289 }
4290 break;
4291
4292 case 0x2e: /* ucomis[sd] */
4293 case 0x2f: /* comis[sd] */
4294 if (b1 == 0) {
4295 sz = 2;
664e0f19 4296 } else {
cb48da7f 4297 sz = 3;
664e0f19 4298 }
cb48da7f
RH
4299 break;
4300 }
4301
4302 switch (sz) {
4303 case 2:
4304 /* 32 bit access */
4305 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
4306 tcg_gen_st32_tl(cpu_T[0], cpu_env,
4307 offsetof(CPUX86State,xmm_t0.XMM_L(0)));
4308 break;
4309 case 3:
4310 /* 64 bit access */
4311 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_D(0)));
4312 break;
4313 default:
4314 /* 128 bit access */
323d1876 4315 gen_ldo_env_A0(s, op2_offset);
cb48da7f 4316 break;
664e0f19
FB
4317 }
4318 } else {
4319 rm = (modrm & 7) | REX_B(s);
4320 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4321 }
4322 } else {
4323 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4324 if (mod != 3) {
4eeb3939 4325 gen_lea_modrm(env, s, modrm);
664e0f19 4326 op2_offset = offsetof(CPUX86State,mmx_t0);
323d1876 4327 gen_ldq_env_A0(s, op2_offset);
664e0f19
FB
4328 } else {
4329 rm = (modrm & 7);
4330 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4331 }
4332 }
4333 switch(b) {
a35f3ec7 4334 case 0x0f: /* 3DNow! data insns */
e771edab
AJ
4335 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4336 goto illegal_op;
0af10c86 4337 val = cpu_ldub_code(env, s->pc++);
d3eb5eae
BS
4338 sse_fn_epp = sse_op_table5[val];
4339 if (!sse_fn_epp) {
a35f3ec7 4340 goto illegal_op;
c4baa050 4341 }
5af45186
FB
4342 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4343 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4344 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
a35f3ec7 4345 break;
664e0f19
FB
4346 case 0x70: /* pshufx insn */
4347 case 0xc6: /* pshufx insn */
0af10c86 4348 val = cpu_ldub_code(env, s->pc++);
5af45186
FB
4349 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4350 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
c4baa050 4351 /* XXX: introduce a new table? */
d3eb5eae 4352 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
c4baa050 4353 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
664e0f19
FB
4354 break;
4355 case 0xc2:
4356 /* compare insns */
0af10c86 4357 val = cpu_ldub_code(env, s->pc++);
664e0f19
FB
4358 if (val >= 8)
4359 goto illegal_op;
d3eb5eae 4360 sse_fn_epp = sse_op_table4[val][b1];
c4baa050 4361
5af45186
FB
4362 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4363 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4364 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19 4365 break;
b8b6a50b
FB
4366 case 0xf7:
4367 /* maskmov : we must prepare A0 */
4368 if (mod != 3)
4369 goto illegal_op;
1d71ddb1
RH
4370 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);
4371 gen_extu(s->aflag, cpu_A0);
b8b6a50b
FB
4372 gen_add_A0_ds_seg(s);
4373
4374 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4375 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
c4baa050 4376 /* XXX: introduce a new table? */
d3eb5eae
BS
4377 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4378 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
b8b6a50b 4379 break;
664e0f19 4380 default:
5af45186
FB
4381 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4382 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
d3eb5eae 4383 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
664e0f19
FB
4384 break;
4385 }
4386 if (b == 0x2e || b == 0x2f) {
3ca51d07 4387 set_cc_op(s, CC_OP_EFLAGS);
664e0f19
FB
4388 }
4389 }
4390}
4391
2c0262af
FB
4392/* convert one instruction. s->is_jmp is set if the translation must
4393 be stopped. Return the next pc value */
0af10c86
BS
4394static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4395 target_ulong pc_start)
2c0262af 4396{
ab4e4aec 4397 int b, prefixes;
d67dc9e6 4398 int shift;
ab4e4aec 4399 TCGMemOp ot, aflag, dflag;
4eeb3939 4400 int modrm, reg, rm, mod, op, opreg, val;
14ce26e7
FB
4401 target_ulong next_eip, tval;
4402 int rex_w, rex_r;
2c0262af 4403
fdefe51c 4404 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
70cff25e 4405 tcg_gen_debug_insn_start(pc_start);
fdefe51c 4406 }
2c0262af
FB
4407 s->pc = pc_start;
4408 prefixes = 0;
2c0262af 4409 s->override = -1;
14ce26e7
FB
4410 rex_w = -1;
4411 rex_r = 0;
4412#ifdef TARGET_X86_64
4413 s->rex_x = 0;
4414 s->rex_b = 0;
5fafdf24 4415 x86_64_hregs = 0;
14ce26e7
FB
4416#endif
4417 s->rip_offset = 0; /* for relative ip address */
701ed211
RH
4418 s->vex_l = 0;
4419 s->vex_v = 0;
2c0262af 4420 next_byte:
0af10c86 4421 b = cpu_ldub_code(env, s->pc);
2c0262af 4422 s->pc++;
4a6fd938
RH
4423 /* Collect prefixes. */
4424 switch (b) {
4425 case 0xf3:
4426 prefixes |= PREFIX_REPZ;
4427 goto next_byte;
4428 case 0xf2:
4429 prefixes |= PREFIX_REPNZ;
4430 goto next_byte;
4431 case 0xf0:
4432 prefixes |= PREFIX_LOCK;
4433 goto next_byte;
4434 case 0x2e:
4435 s->override = R_CS;
4436 goto next_byte;
4437 case 0x36:
4438 s->override = R_SS;
4439 goto next_byte;
4440 case 0x3e:
4441 s->override = R_DS;
4442 goto next_byte;
4443 case 0x26:
4444 s->override = R_ES;
4445 goto next_byte;
4446 case 0x64:
4447 s->override = R_FS;
4448 goto next_byte;
4449 case 0x65:
4450 s->override = R_GS;
4451 goto next_byte;
4452 case 0x66:
4453 prefixes |= PREFIX_DATA;
4454 goto next_byte;
4455 case 0x67:
4456 prefixes |= PREFIX_ADR;
4457 goto next_byte;
14ce26e7 4458#ifdef TARGET_X86_64
4a6fd938
RH
4459 case 0x40 ... 0x4f:
4460 if (CODE64(s)) {
14ce26e7
FB
4461 /* REX prefix */
4462 rex_w = (b >> 3) & 1;
4463 rex_r = (b & 0x4) << 1;
4464 s->rex_x = (b & 0x2) << 2;
4465 REX_B(s) = (b & 0x1) << 3;
4466 x86_64_hregs = 1; /* select uniform byte register addressing */
4467 goto next_byte;
4468 }
4a6fd938
RH
4469 break;
4470#endif
701ed211
RH
4471 case 0xc5: /* 2-byte VEX */
4472 case 0xc4: /* 3-byte VEX */
4473 /* VEX prefixes cannot be used except in 32-bit mode.
4474 Otherwise the instruction is LES or LDS. */
4475 if (s->code32 && !s->vm86) {
4476 static const int pp_prefix[4] = {
4477 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4478 };
4479 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4480
4481 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4482 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4483 otherwise the instruction is LES or LDS. */
4484 break;
4485 }
4486 s->pc++;
4487
085d8134 4488 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
701ed211
RH
4489 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4490 | PREFIX_LOCK | PREFIX_DATA)) {
4491 goto illegal_op;
4492 }
4493#ifdef TARGET_X86_64
4494 if (x86_64_hregs) {
4495 goto illegal_op;
4496 }
4497#endif
4498 rex_r = (~vex2 >> 4) & 8;
4499 if (b == 0xc5) {
4500 vex3 = vex2;
4501 b = cpu_ldub_code(env, s->pc++);
4502 } else {
4503#ifdef TARGET_X86_64
4504 s->rex_x = (~vex2 >> 3) & 8;
4505 s->rex_b = (~vex2 >> 2) & 8;
4506#endif
4507 vex3 = cpu_ldub_code(env, s->pc++);
4508 rex_w = (vex3 >> 7) & 1;
4509 switch (vex2 & 0x1f) {
4510 case 0x01: /* Implied 0f leading opcode bytes. */
4511 b = cpu_ldub_code(env, s->pc++) | 0x100;
4512 break;
4513 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4514 b = 0x138;
4515 break;
4516 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4517 b = 0x13a;
4518 break;
4519 default: /* Reserved for future use. */
4520 goto illegal_op;
4521 }
4522 }
4523 s->vex_v = (~vex3 >> 3) & 0xf;
4524 s->vex_l = (vex3 >> 2) & 1;
4525 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4526 }
4527 break;
4a6fd938
RH
4528 }
4529
4530 /* Post-process prefixes. */
4a6fd938 4531 if (CODE64(s)) {
dec3fc96
RH
4532 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4533 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4534 over 0x66 if both are present. */
ab4e4aec 4535 dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
dec3fc96 4536 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
1d71ddb1 4537 aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
dec3fc96
RH
4538 } else {
4539 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
ab4e4aec
RH
4540 if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
4541 dflag = MO_32;
4542 } else {
4543 dflag = MO_16;
14ce26e7 4544 }
dec3fc96 4545 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
1d71ddb1
RH
4546 if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
4547 aflag = MO_32;
4548 } else {
4549 aflag = MO_16;
14ce26e7 4550 }
2c0262af
FB
4551 }
4552
2c0262af
FB
4553 s->prefix = prefixes;
4554 s->aflag = aflag;
4555 s->dflag = dflag;
4556
4557 /* lock generation */
4558 if (prefixes & PREFIX_LOCK)
a7812ae4 4559 gen_helper_lock();
2c0262af
FB
4560
4561 /* now check op code */
4562 reswitch:
4563 switch(b) {
4564 case 0x0f:
4565 /**************************/
4566 /* extended op code */
0af10c86 4567 b = cpu_ldub_code(env, s->pc++) | 0x100;
2c0262af 4568 goto reswitch;
3b46e624 4569
2c0262af
FB
4570 /**************************/
4571 /* arith & logic */
4572 case 0x00 ... 0x05:
4573 case 0x08 ... 0x0d:
4574 case 0x10 ... 0x15:
4575 case 0x18 ... 0x1d:
4576 case 0x20 ... 0x25:
4577 case 0x28 ... 0x2d:
4578 case 0x30 ... 0x35:
4579 case 0x38 ... 0x3d:
4580 {
4581 int op, f, val;
4582 op = (b >> 3) & 7;
4583 f = (b >> 1) & 3;
4584
ab4e4aec 4585 ot = mo_b_d(b, dflag);
3b46e624 4586
2c0262af
FB
4587 switch(f) {
4588 case 0: /* OP Ev, Gv */
0af10c86 4589 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 4590 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 4591 mod = (modrm >> 6) & 3;
14ce26e7 4592 rm = (modrm & 7) | REX_B(s);
2c0262af 4593 if (mod != 3) {
4eeb3939 4594 gen_lea_modrm(env, s, modrm);
2c0262af
FB
4595 opreg = OR_TMP0;
4596 } else if (op == OP_XORL && rm == reg) {
4597 xor_zero:
4598 /* xor reg, reg optimisation */
436ff2d2 4599 set_cc_op(s, CC_OP_CLR);
97212c88 4600 tcg_gen_movi_tl(cpu_T[0], 0);
480a762d 4601 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2c0262af
FB
4602 break;
4603 } else {
4604 opreg = rm;
4605 }
c56baccf 4606 gen_op_mov_v_reg(ot, cpu_T[1], reg);
2c0262af
FB
4607 gen_op(s, op, ot, opreg);
4608 break;
4609 case 1: /* OP Gv, Ev */
0af10c86 4610 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4611 mod = (modrm >> 6) & 3;
14ce26e7
FB
4612 reg = ((modrm >> 3) & 7) | rex_r;
4613 rm = (modrm & 7) | REX_B(s);
2c0262af 4614 if (mod != 3) {
4eeb3939 4615 gen_lea_modrm(env, s, modrm);
0f712e10 4616 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
2c0262af
FB
4617 } else if (op == OP_XORL && rm == reg) {
4618 goto xor_zero;
4619 } else {
c56baccf 4620 gen_op_mov_v_reg(ot, cpu_T[1], rm);
2c0262af
FB
4621 }
4622 gen_op(s, op, ot, reg);
4623 break;
4624 case 2: /* OP A, Iv */
0af10c86 4625 val = insn_get(env, s, ot);
0ae657b1 4626 tcg_gen_movi_tl(cpu_T[1], val);
2c0262af
FB
4627 gen_op(s, op, ot, OR_EAX);
4628 break;
4629 }
4630 }
4631 break;
4632
ec9d6075
FB
4633 case 0x82:
4634 if (CODE64(s))
4635 goto illegal_op;
2c0262af
FB
4636 case 0x80: /* GRP1 */
4637 case 0x81:
4638 case 0x83:
4639 {
4640 int val;
4641
ab4e4aec 4642 ot = mo_b_d(b, dflag);
3b46e624 4643
0af10c86 4644 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4645 mod = (modrm >> 6) & 3;
14ce26e7 4646 rm = (modrm & 7) | REX_B(s);
2c0262af 4647 op = (modrm >> 3) & 7;
3b46e624 4648
2c0262af 4649 if (mod != 3) {
14ce26e7
FB
4650 if (b == 0x83)
4651 s->rip_offset = 1;
4652 else
4653 s->rip_offset = insn_const_size(ot);
4eeb3939 4654 gen_lea_modrm(env, s, modrm);
2c0262af
FB
4655 opreg = OR_TMP0;
4656 } else {
14ce26e7 4657 opreg = rm;
2c0262af
FB
4658 }
4659
4660 switch(b) {
4661 default:
4662 case 0x80:
4663 case 0x81:
d64477af 4664 case 0x82:
0af10c86 4665 val = insn_get(env, s, ot);
2c0262af
FB
4666 break;
4667 case 0x83:
4ba9938c 4668 val = (int8_t)insn_get(env, s, MO_8);
2c0262af
FB
4669 break;
4670 }
0ae657b1 4671 tcg_gen_movi_tl(cpu_T[1], val);
2c0262af
FB
4672 gen_op(s, op, ot, opreg);
4673 }
4674 break;
4675
4676 /**************************/
4677 /* inc, dec, and other misc arith */
4678 case 0x40 ... 0x47: /* inc Gv */
ab4e4aec 4679 ot = dflag;
2c0262af
FB
4680 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4681 break;
4682 case 0x48 ... 0x4f: /* dec Gv */
ab4e4aec 4683 ot = dflag;
2c0262af
FB
4684 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4685 break;
4686 case 0xf6: /* GRP3 */
4687 case 0xf7:
ab4e4aec 4688 ot = mo_b_d(b, dflag);
2c0262af 4689
0af10c86 4690 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4691 mod = (modrm >> 6) & 3;
14ce26e7 4692 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
4693 op = (modrm >> 3) & 7;
4694 if (mod != 3) {
14ce26e7
FB
4695 if (op == 0)
4696 s->rip_offset = insn_const_size(ot);
4eeb3939 4697 gen_lea_modrm(env, s, modrm);
909be183 4698 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 4699 } else {
c56baccf 4700 gen_op_mov_v_reg(ot, cpu_T[0], rm);
2c0262af
FB
4701 }
4702
4703 switch(op) {
4704 case 0: /* test */
0af10c86 4705 val = insn_get(env, s, ot);
0ae657b1 4706 tcg_gen_movi_tl(cpu_T[1], val);
2c0262af 4707 gen_op_testl_T0_T1_cc();
3ca51d07 4708 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af
FB
4709 break;
4710 case 2: /* not */
b6abf97d 4711 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
2c0262af 4712 if (mod != 3) {
fd8ca9f6 4713 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 4714 } else {
480a762d 4715 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
2c0262af
FB
4716 }
4717 break;
4718 case 3: /* neg */
b6abf97d 4719 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
2c0262af 4720 if (mod != 3) {
fd8ca9f6 4721 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 4722 } else {
480a762d 4723 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
2c0262af
FB
4724 }
4725 gen_op_update_neg_cc();
3ca51d07 4726 set_cc_op(s, CC_OP_SUBB + ot);
2c0262af
FB
4727 break;
4728 case 4: /* mul */
4729 switch(ot) {
4ba9938c 4730 case MO_8:
c56baccf 4731 gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX);
0211e5af
FB
4732 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4733 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4734 /* XXX: use 32 bit mul which could be faster */
4735 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
480a762d 4736 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
0211e5af
FB
4737 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4738 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
3ca51d07 4739 set_cc_op(s, CC_OP_MULB);
2c0262af 4740 break;
4ba9938c 4741 case MO_16:
c56baccf 4742 gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX);
0211e5af
FB
4743 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4744 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4745 /* XXX: use 32 bit mul which could be faster */
4746 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
480a762d 4747 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
0211e5af
FB
4748 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4749 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
480a762d 4750 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
0211e5af 4751 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
3ca51d07 4752 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
4753 break;
4754 default:
4ba9938c 4755 case MO_32:
a4bcea3d
RH
4756 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4757 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4758 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4759 cpu_tmp2_i32, cpu_tmp3_i32);
4760 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4761 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4762 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4763 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 4764 set_cc_op(s, CC_OP_MULL);
2c0262af 4765 break;
14ce26e7 4766#ifdef TARGET_X86_64
4ba9938c 4767 case MO_64:
a4bcea3d
RH
4768 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4769 cpu_T[0], cpu_regs[R_EAX]);
4770 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4771 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 4772 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
4773 break;
4774#endif
2c0262af 4775 }
2c0262af
FB
4776 break;
4777 case 5: /* imul */
4778 switch(ot) {
4ba9938c 4779 case MO_8:
c56baccf 4780 gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX);
0211e5af
FB
4781 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4782 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4783 /* XXX: use 32 bit mul which could be faster */
4784 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
480a762d 4785 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
0211e5af
FB
4786 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4787 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4788 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
3ca51d07 4789 set_cc_op(s, CC_OP_MULB);
2c0262af 4790 break;
4ba9938c 4791 case MO_16:
c56baccf 4792 gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX);
0211e5af
FB
4793 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4794 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4795 /* XXX: use 32 bit mul which could be faster */
4796 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
480a762d 4797 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
0211e5af
FB
4798 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4799 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4800 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4801 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
480a762d 4802 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
3ca51d07 4803 set_cc_op(s, CC_OP_MULW);
2c0262af
FB
4804 break;
4805 default:
4ba9938c 4806 case MO_32:
a4bcea3d
RH
4807 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4808 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4809 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4810 cpu_tmp2_i32, cpu_tmp3_i32);
4811 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4812 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4813 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
4814 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4815 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
4816 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
3ca51d07 4817 set_cc_op(s, CC_OP_MULL);
2c0262af 4818 break;
14ce26e7 4819#ifdef TARGET_X86_64
4ba9938c 4820 case MO_64:
a4bcea3d
RH
4821 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4822 cpu_T[0], cpu_regs[R_EAX]);
4823 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4824 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
4825 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3ca51d07 4826 set_cc_op(s, CC_OP_MULQ);
14ce26e7
FB
4827 break;
4828#endif
2c0262af 4829 }
2c0262af
FB
4830 break;
4831 case 6: /* div */
4832 switch(ot) {
4ba9938c 4833 case MO_8:
7923057b 4834 gen_helper_divb_AL(cpu_env, cpu_T[0]);
2c0262af 4835 break;
4ba9938c 4836 case MO_16:
7923057b 4837 gen_helper_divw_AX(cpu_env, cpu_T[0]);
2c0262af
FB
4838 break;
4839 default:
4ba9938c 4840 case MO_32:
7923057b 4841 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
14ce26e7
FB
4842 break;
4843#ifdef TARGET_X86_64
4ba9938c 4844 case MO_64:
7923057b 4845 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
2c0262af 4846 break;
14ce26e7 4847#endif
2c0262af
FB
4848 }
4849 break;
4850 case 7: /* idiv */
4851 switch(ot) {
4ba9938c 4852 case MO_8:
7923057b 4853 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
2c0262af 4854 break;
4ba9938c 4855 case MO_16:
7923057b 4856 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
2c0262af
FB
4857 break;
4858 default:
4ba9938c 4859 case MO_32:
7923057b 4860 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
14ce26e7
FB
4861 break;
4862#ifdef TARGET_X86_64
4ba9938c 4863 case MO_64:
7923057b 4864 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
2c0262af 4865 break;
14ce26e7 4866#endif
2c0262af
FB
4867 }
4868 break;
4869 default:
4870 goto illegal_op;
4871 }
4872 break;
4873
4874 case 0xfe: /* GRP4 */
4875 case 0xff: /* GRP5 */
ab4e4aec 4876 ot = mo_b_d(b, dflag);
2c0262af 4877
0af10c86 4878 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 4879 mod = (modrm >> 6) & 3;
14ce26e7 4880 rm = (modrm & 7) | REX_B(s);
2c0262af
FB
4881 op = (modrm >> 3) & 7;
4882 if (op >= 2 && b == 0xfe) {
4883 goto illegal_op;
4884 }
14ce26e7 4885 if (CODE64(s)) {
aba9d61e 4886 if (op == 2 || op == 4) {
14ce26e7 4887 /* operand size for jumps is 64 bit */
4ba9938c 4888 ot = MO_64;
aba9d61e 4889 } else if (op == 3 || op == 5) {
ab4e4aec 4890 ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
14ce26e7
FB
4891 } else if (op == 6) {
4892 /* default push size is 64 bit */
ab4e4aec 4893 ot = mo_pushpop(s, dflag);
14ce26e7
FB
4894 }
4895 }
2c0262af 4896 if (mod != 3) {
4eeb3939 4897 gen_lea_modrm(env, s, modrm);
2c0262af 4898 if (op >= 2 && op != 3 && op != 5)
909be183 4899 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 4900 } else {
c56baccf 4901 gen_op_mov_v_reg(ot, cpu_T[0], rm);
2c0262af
FB
4902 }
4903
4904 switch(op) {
4905 case 0: /* inc Ev */
4906 if (mod != 3)
4907 opreg = OR_TMP0;
4908 else
4909 opreg = rm;
4910 gen_inc(s, ot, opreg, 1);
4911 break;
4912 case 1: /* dec Ev */
4913 if (mod != 3)
4914 opreg = OR_TMP0;
4915 else
4916 opreg = rm;
4917 gen_inc(s, ot, opreg, -1);
4918 break;
4919 case 2: /* call Ev */
4f31916f 4920 /* XXX: optimize if memory (no 'and' is necessary) */
ab4e4aec 4921 if (dflag == MO_16) {
40b90233
RH
4922 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4923 }
2c0262af 4924 next_eip = s->pc - s->cs_base;
cc0bce88 4925 tcg_gen_movi_tl(cpu_T[1], next_eip);
432baffe 4926 gen_push_v(s, cpu_T[1]);
74bdfbda 4927 gen_op_jmp_v(cpu_T[0]);
2c0262af
FB
4928 gen_eob(s);
4929 break;
61382a50 4930 case 3: /* lcall Ev */
0f712e10 4931 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
830a19a4 4932 gen_add_A0_im(s, 1 << ot);
cc1a80df 4933 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
2c0262af
FB
4934 do_lcall:
4935 if (s->pe && !s->vm86) {
b6abf97d 4936 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 4937 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
ab4e4aec 4938 tcg_const_i32(dflag - 1),
100ec099 4939 tcg_const_tl(s->pc - s->cs_base));
2c0262af 4940 } else {
b6abf97d 4941 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 4942 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
ab4e4aec 4943 tcg_const_i32(dflag - 1),
a7812ae4 4944 tcg_const_i32(s->pc - s->cs_base));
2c0262af
FB
4945 }
4946 gen_eob(s);
4947 break;
4948 case 4: /* jmp Ev */
ab4e4aec 4949 if (dflag == MO_16) {
40b90233
RH
4950 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4951 }
74bdfbda 4952 gen_op_jmp_v(cpu_T[0]);
2c0262af
FB
4953 gen_eob(s);
4954 break;
4955 case 5: /* ljmp Ev */
0f712e10 4956 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
830a19a4 4957 gen_add_A0_im(s, 1 << ot);
cc1a80df 4958 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
2c0262af
FB
4959 do_ljmp:
4960 if (s->pe && !s->vm86) {
b6abf97d 4961 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 4962 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
100ec099 4963 tcg_const_tl(s->pc - s->cs_base));
2c0262af 4964 } else {
3bd7da9e 4965 gen_op_movl_seg_T0_vm(R_CS);
78261634 4966 gen_op_jmp_v(cpu_T[1]);
2c0262af
FB
4967 }
4968 gen_eob(s);
4969 break;
4970 case 6: /* push Ev */
432baffe 4971 gen_push_v(s, cpu_T[0]);
2c0262af
FB
4972 break;
4973 default:
4974 goto illegal_op;
4975 }
4976 break;
4977
4978 case 0x84: /* test Ev, Gv */
5fafdf24 4979 case 0x85:
ab4e4aec 4980 ot = mo_b_d(b, dflag);
2c0262af 4981
0af10c86 4982 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 4983 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 4984
0af10c86 4985 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
c56baccf 4986 gen_op_mov_v_reg(ot, cpu_T[1], reg);
2c0262af 4987 gen_op_testl_T0_T1_cc();
3ca51d07 4988 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 4989 break;
3b46e624 4990
2c0262af
FB
4991 case 0xa8: /* test eAX, Iv */
4992 case 0xa9:
ab4e4aec 4993 ot = mo_b_d(b, dflag);
0af10c86 4994 val = insn_get(env, s, ot);
2c0262af 4995
c56baccf 4996 gen_op_mov_v_reg(ot, cpu_T[0], OR_EAX);
0ae657b1 4997 tcg_gen_movi_tl(cpu_T[1], val);
2c0262af 4998 gen_op_testl_T0_T1_cc();
3ca51d07 4999 set_cc_op(s, CC_OP_LOGICB + ot);
2c0262af 5000 break;
3b46e624 5001
2c0262af 5002 case 0x98: /* CWDE/CBW */
ab4e4aec 5003 switch (dflag) {
14ce26e7 5004#ifdef TARGET_X86_64
ab4e4aec 5005 case MO_64:
c56baccf 5006 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
e108dd01 5007 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
480a762d 5008 gen_op_mov_reg_v(MO_64, R_EAX, cpu_T[0]);
ab4e4aec 5009 break;
14ce26e7 5010#endif
ab4e4aec 5011 case MO_32:
c56baccf 5012 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX);
e108dd01 5013 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
480a762d 5014 gen_op_mov_reg_v(MO_32, R_EAX, cpu_T[0]);
ab4e4aec
RH
5015 break;
5016 case MO_16:
c56baccf 5017 gen_op_mov_v_reg(MO_8, cpu_T[0], R_EAX);
e108dd01 5018 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
480a762d 5019 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
ab4e4aec
RH
5020 break;
5021 default:
5022 tcg_abort();
e108dd01 5023 }
2c0262af
FB
5024 break;
5025 case 0x99: /* CDQ/CWD */
ab4e4aec 5026 switch (dflag) {
14ce26e7 5027#ifdef TARGET_X86_64
ab4e4aec 5028 case MO_64:
c56baccf 5029 gen_op_mov_v_reg(MO_64, cpu_T[0], R_EAX);
e108dd01 5030 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
480a762d 5031 gen_op_mov_reg_v(MO_64, R_EDX, cpu_T[0]);
ab4e4aec 5032 break;
14ce26e7 5033#endif
ab4e4aec 5034 case MO_32:
c56baccf 5035 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
e108dd01
FB
5036 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5037 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
480a762d 5038 gen_op_mov_reg_v(MO_32, R_EDX, cpu_T[0]);
ab4e4aec
RH
5039 break;
5040 case MO_16:
c56baccf 5041 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX);
e108dd01
FB
5042 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5043 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
480a762d 5044 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
ab4e4aec
RH
5045 break;
5046 default:
5047 tcg_abort();
e108dd01 5048 }
2c0262af
FB
5049 break;
5050 case 0x1af: /* imul Gv, Ev */
5051 case 0x69: /* imul Gv, Ev, I */
5052 case 0x6b:
ab4e4aec 5053 ot = dflag;
0af10c86 5054 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
5055 reg = ((modrm >> 3) & 7) | rex_r;
5056 if (b == 0x69)
5057 s->rip_offset = insn_const_size(ot);
5058 else if (b == 0x6b)
5059 s->rip_offset = 1;
0af10c86 5060 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2c0262af 5061 if (b == 0x69) {
0af10c86 5062 val = insn_get(env, s, ot);
0ae657b1 5063 tcg_gen_movi_tl(cpu_T[1], val);
2c0262af 5064 } else if (b == 0x6b) {
4ba9938c 5065 val = (int8_t)insn_get(env, s, MO_8);
0ae657b1 5066 tcg_gen_movi_tl(cpu_T[1], val);
2c0262af 5067 } else {
c56baccf 5068 gen_op_mov_v_reg(ot, cpu_T[1], reg);
2c0262af 5069 }
a4bcea3d 5070 switch (ot) {
0211e5af 5071#ifdef TARGET_X86_64
4ba9938c 5072 case MO_64:
a4bcea3d
RH
5073 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]);
5074 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5075 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
5076 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T[1]);
5077 break;
0211e5af 5078#endif
4ba9938c 5079 case MO_32:
a4bcea3d
RH
5080 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5081 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5082 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5083 cpu_tmp2_i32, cpu_tmp3_i32);
5084 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
5085 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5086 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5087 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5088 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5089 break;
5090 default:
0211e5af
FB
5091 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5092 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5093 /* XXX: use 32 bit mul which could be faster */
5094 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5095 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5096 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5097 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
480a762d 5098 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
a4bcea3d 5099 break;
2c0262af 5100 }
3ca51d07 5101 set_cc_op(s, CC_OP_MULB + ot);
2c0262af
FB
5102 break;
5103 case 0x1c0:
5104 case 0x1c1: /* xadd Ev, Gv */
ab4e4aec 5105 ot = mo_b_d(b, dflag);
0af10c86 5106 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5107 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5108 mod = (modrm >> 6) & 3;
5109 if (mod == 3) {
14ce26e7 5110 rm = (modrm & 7) | REX_B(s);
c56baccf
RH
5111 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5112 gen_op_mov_v_reg(ot, cpu_T[1], rm);
fd5185ec 5113 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
68773f84 5114 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
480a762d 5115 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
2c0262af 5116 } else {
4eeb3939 5117 gen_lea_modrm(env, s, modrm);
c56baccf 5118 gen_op_mov_v_reg(ot, cpu_T[0], reg);
0f712e10 5119 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
fd5185ec 5120 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
fd8ca9f6 5121 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
68773f84 5122 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
2c0262af
FB
5123 }
5124 gen_op_update2_cc();
3ca51d07 5125 set_cc_op(s, CC_OP_ADDB + ot);
2c0262af
FB
5126 break;
5127 case 0x1b0:
5128 case 0x1b1: /* cmpxchg Ev, Gv */
cad3a37d 5129 {
42a268c2 5130 TCGLabel *label1, *label2;
1e4840bf 5131 TCGv t0, t1, t2, a0;
cad3a37d 5132
ab4e4aec 5133 ot = mo_b_d(b, dflag);
0af10c86 5134 modrm = cpu_ldub_code(env, s->pc++);
cad3a37d
FB
5135 reg = ((modrm >> 3) & 7) | rex_r;
5136 mod = (modrm >> 6) & 3;
a7812ae4
PB
5137 t0 = tcg_temp_local_new();
5138 t1 = tcg_temp_local_new();
5139 t2 = tcg_temp_local_new();
5140 a0 = tcg_temp_local_new();
1e4840bf 5141 gen_op_mov_v_reg(ot, t1, reg);
cad3a37d
FB
5142 if (mod == 3) {
5143 rm = (modrm & 7) | REX_B(s);
1e4840bf 5144 gen_op_mov_v_reg(ot, t0, rm);
cad3a37d 5145 } else {
4eeb3939 5146 gen_lea_modrm(env, s, modrm);
1e4840bf 5147 tcg_gen_mov_tl(a0, cpu_A0);
323d1876 5148 gen_op_ld_v(s, ot, t0, a0);
cad3a37d
FB
5149 rm = 0; /* avoid warning */
5150 }
5151 label1 = gen_new_label();
a3251186
RH
5152 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5153 gen_extu(ot, t0);
1e4840bf 5154 gen_extu(ot, t2);
a3251186 5155 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
f7e80adf 5156 label2 = gen_new_label();
cad3a37d 5157 if (mod == 3) {
1e4840bf 5158 gen_op_mov_reg_v(ot, R_EAX, t0);
1130328e
FB
5159 tcg_gen_br(label2);
5160 gen_set_label(label1);
1e4840bf 5161 gen_op_mov_reg_v(ot, rm, t1);
cad3a37d 5162 } else {
f7e80adf
AG
5163 /* perform no-op store cycle like physical cpu; must be
5164 before changing accumulator to ensure idempotency if
5165 the store faults and the instruction is restarted */
323d1876 5166 gen_op_st_v(s, ot, t0, a0);
1e4840bf 5167 gen_op_mov_reg_v(ot, R_EAX, t0);
f7e80adf 5168 tcg_gen_br(label2);
1130328e 5169 gen_set_label(label1);
323d1876 5170 gen_op_st_v(s, ot, t1, a0);
cad3a37d 5171 }
f7e80adf 5172 gen_set_label(label2);
1e4840bf 5173 tcg_gen_mov_tl(cpu_cc_src, t0);
a3251186
RH
5174 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5175 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
3ca51d07 5176 set_cc_op(s, CC_OP_SUBB + ot);
1e4840bf
FB
5177 tcg_temp_free(t0);
5178 tcg_temp_free(t1);
5179 tcg_temp_free(t2);
5180 tcg_temp_free(a0);
2c0262af 5181 }
2c0262af
FB
5182 break;
5183 case 0x1c7: /* cmpxchg8b */
0af10c86 5184 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5185 mod = (modrm >> 6) & 3;
71c3558e 5186 if ((mod == 3) || ((modrm & 0x38) != 0x8))
2c0262af 5187 goto illegal_op;
1b9d9ebb 5188#ifdef TARGET_X86_64
ab4e4aec 5189 if (dflag == MO_64) {
1b9d9ebb
FB
5190 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5191 goto illegal_op;
4eeb3939 5192 gen_lea_modrm(env, s, modrm);
92fc4b58 5193 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
1b9d9ebb
FB
5194 } else
5195#endif
5196 {
5197 if (!(s->cpuid_features & CPUID_CX8))
5198 goto illegal_op;
4eeb3939 5199 gen_lea_modrm(env, s, modrm);
92fc4b58 5200 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
1b9d9ebb 5201 }
3ca51d07 5202 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 5203 break;
3b46e624 5204
2c0262af
FB
5205 /**************************/
5206 /* push/pop */
5207 case 0x50 ... 0x57: /* push */
c56baccf 5208 gen_op_mov_v_reg(MO_32, cpu_T[0], (b & 7) | REX_B(s));
432baffe 5209 gen_push_v(s, cpu_T[0]);
2c0262af
FB
5210 break;
5211 case 0x58 ... 0x5f: /* pop */
8e31d234 5212 ot = gen_pop_T0(s);
77729c24 5213 /* NOTE: order is important for pop %sp */
8e31d234 5214 gen_pop_update(s, ot);
480a762d 5215 gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T[0]);
2c0262af
FB
5216 break;
5217 case 0x60: /* pusha */
14ce26e7
FB
5218 if (CODE64(s))
5219 goto illegal_op;
2c0262af
FB
5220 gen_pusha(s);
5221 break;
5222 case 0x61: /* popa */
14ce26e7
FB
5223 if (CODE64(s))
5224 goto illegal_op;
2c0262af
FB
5225 gen_popa(s);
5226 break;
5227 case 0x68: /* push Iv */
5228 case 0x6a:
ab4e4aec 5229 ot = mo_pushpop(s, dflag);
2c0262af 5230 if (b == 0x68)
0af10c86 5231 val = insn_get(env, s, ot);
2c0262af 5232 else
4ba9938c 5233 val = (int8_t)insn_get(env, s, MO_8);
1b90d56e 5234 tcg_gen_movi_tl(cpu_T[0], val);
432baffe 5235 gen_push_v(s, cpu_T[0]);
2c0262af
FB
5236 break;
5237 case 0x8f: /* pop Ev */
0af10c86 5238 modrm = cpu_ldub_code(env, s->pc++);
77729c24 5239 mod = (modrm >> 6) & 3;
8e31d234 5240 ot = gen_pop_T0(s);
77729c24
FB
5241 if (mod == 3) {
5242 /* NOTE: order is important for pop %sp */
8e31d234 5243 gen_pop_update(s, ot);
14ce26e7 5244 rm = (modrm & 7) | REX_B(s);
480a762d 5245 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
77729c24
FB
5246 } else {
5247 /* NOTE: order is important too for MMU exceptions */
14ce26e7 5248 s->popl_esp_hack = 1 << ot;
0af10c86 5249 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
77729c24 5250 s->popl_esp_hack = 0;
8e31d234 5251 gen_pop_update(s, ot);
77729c24 5252 }
2c0262af
FB
5253 break;
5254 case 0xc8: /* enter */
5255 {
5256 int level;
0af10c86 5257 val = cpu_lduw_code(env, s->pc);
2c0262af 5258 s->pc += 2;
0af10c86 5259 level = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5260 gen_enter(s, val, level);
5261 }
5262 break;
5263 case 0xc9: /* leave */
5264 /* XXX: exception not precise (ESP is updated before potential exception) */
14ce26e7 5265 if (CODE64(s)) {
c56baccf 5266 gen_op_mov_v_reg(MO_64, cpu_T[0], R_EBP);
480a762d 5267 gen_op_mov_reg_v(MO_64, R_ESP, cpu_T[0]);
14ce26e7 5268 } else if (s->ss32) {
c56baccf 5269 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
480a762d 5270 gen_op_mov_reg_v(MO_32, R_ESP, cpu_T[0]);
2c0262af 5271 } else {
c56baccf 5272 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EBP);
480a762d 5273 gen_op_mov_reg_v(MO_16, R_ESP, cpu_T[0]);
2c0262af 5274 }
8e31d234 5275 ot = gen_pop_T0(s);
480a762d 5276 gen_op_mov_reg_v(ot, R_EBP, cpu_T[0]);
8e31d234 5277 gen_pop_update(s, ot);
2c0262af
FB
5278 break;
5279 case 0x06: /* push es */
5280 case 0x0e: /* push cs */
5281 case 0x16: /* push ss */
5282 case 0x1e: /* push ds */
14ce26e7
FB
5283 if (CODE64(s))
5284 goto illegal_op;
2c0262af 5285 gen_op_movl_T0_seg(b >> 3);
432baffe 5286 gen_push_v(s, cpu_T[0]);
2c0262af
FB
5287 break;
5288 case 0x1a0: /* push fs */
5289 case 0x1a8: /* push gs */
5290 gen_op_movl_T0_seg((b >> 3) & 7);
432baffe 5291 gen_push_v(s, cpu_T[0]);
2c0262af
FB
5292 break;
5293 case 0x07: /* pop es */
5294 case 0x17: /* pop ss */
5295 case 0x1f: /* pop ds */
14ce26e7
FB
5296 if (CODE64(s))
5297 goto illegal_op;
2c0262af 5298 reg = b >> 3;
8e31d234 5299 ot = gen_pop_T0(s);
100ec099 5300 gen_movl_seg_T0(s, reg);
8e31d234 5301 gen_pop_update(s, ot);
2c0262af 5302 if (reg == R_SS) {
a2cc3b24
FB
5303 /* if reg == SS, inhibit interrupts/trace. */
5304 /* If several instructions disable interrupts, only the
5305 _first_ does it */
5306 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
f0967a1a 5307 gen_helper_set_inhibit_irq(cpu_env);
2c0262af
FB
5308 s->tf = 0;
5309 }
5310 if (s->is_jmp) {
14ce26e7 5311 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5312 gen_eob(s);
5313 }
5314 break;
5315 case 0x1a1: /* pop fs */
5316 case 0x1a9: /* pop gs */
8e31d234 5317 ot = gen_pop_T0(s);
100ec099 5318 gen_movl_seg_T0(s, (b >> 3) & 7);
8e31d234 5319 gen_pop_update(s, ot);
2c0262af 5320 if (s->is_jmp) {
14ce26e7 5321 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5322 gen_eob(s);
5323 }
5324 break;
5325
5326 /**************************/
5327 /* mov */
5328 case 0x88:
5329 case 0x89: /* mov Gv, Ev */
ab4e4aec 5330 ot = mo_b_d(b, dflag);
0af10c86 5331 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5332 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5333
2c0262af 5334 /* generate a generic store */
0af10c86 5335 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
2c0262af
FB
5336 break;
5337 case 0xc6:
5338 case 0xc7: /* mov Ev, Iv */
ab4e4aec 5339 ot = mo_b_d(b, dflag);
0af10c86 5340 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5341 mod = (modrm >> 6) & 3;
14ce26e7
FB
5342 if (mod != 3) {
5343 s->rip_offset = insn_const_size(ot);
4eeb3939 5344 gen_lea_modrm(env, s, modrm);
14ce26e7 5345 }
0af10c86 5346 val = insn_get(env, s, ot);
1b90d56e 5347 tcg_gen_movi_tl(cpu_T[0], val);
fd8ca9f6
RH
5348 if (mod != 3) {
5349 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5350 } else {
480a762d 5351 gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T[0]);
fd8ca9f6 5352 }
2c0262af
FB
5353 break;
5354 case 0x8a:
5355 case 0x8b: /* mov Ev, Gv */
ab4e4aec 5356 ot = mo_b_d(b, dflag);
0af10c86 5357 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5358 reg = ((modrm >> 3) & 7) | rex_r;
3b46e624 5359
0af10c86 5360 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
480a762d 5361 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2c0262af
FB
5362 break;
5363 case 0x8e: /* mov seg, Gv */
0af10c86 5364 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5365 reg = (modrm >> 3) & 7;
5366 if (reg >= 6 || reg == R_CS)
5367 goto illegal_op;
4ba9938c 5368 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
100ec099 5369 gen_movl_seg_T0(s, reg);
2c0262af
FB
5370 if (reg == R_SS) {
5371 /* if reg == SS, inhibit interrupts/trace */
a2cc3b24
FB
5372 /* If several instructions disable interrupts, only the
5373 _first_ does it */
5374 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
f0967a1a 5375 gen_helper_set_inhibit_irq(cpu_env);
2c0262af
FB
5376 s->tf = 0;
5377 }
5378 if (s->is_jmp) {
14ce26e7 5379 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5380 gen_eob(s);
5381 }
5382 break;
5383 case 0x8c: /* mov Gv, seg */
0af10c86 5384 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5385 reg = (modrm >> 3) & 7;
5386 mod = (modrm >> 6) & 3;
5387 if (reg >= 6)
5388 goto illegal_op;
5389 gen_op_movl_T0_seg(reg);
ab4e4aec 5390 ot = mod == 3 ? dflag : MO_16;
0af10c86 5391 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
5392 break;
5393
5394 case 0x1b6: /* movzbS Gv, Eb */
5395 case 0x1b7: /* movzwS Gv, Eb */
5396 case 0x1be: /* movsbS Gv, Eb */
5397 case 0x1bf: /* movswS Gv, Eb */
5398 {
c8fbc479
RH
5399 TCGMemOp d_ot;
5400 TCGMemOp s_ot;
5401
2c0262af 5402 /* d_ot is the size of destination */
ab4e4aec 5403 d_ot = dflag;
2c0262af 5404 /* ot is the size of source */
4ba9938c 5405 ot = (b & 1) + MO_8;
c8fbc479
RH
5406 /* s_ot is the sign+size of source */
5407 s_ot = b & 8 ? MO_SIGN | ot : ot;
5408
0af10c86 5409 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5410 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 5411 mod = (modrm >> 6) & 3;
14ce26e7 5412 rm = (modrm & 7) | REX_B(s);
3b46e624 5413
2c0262af 5414 if (mod == 3) {
c56baccf 5415 gen_op_mov_v_reg(ot, cpu_T[0], rm);
c8fbc479
RH
5416 switch (s_ot) {
5417 case MO_UB:
e108dd01 5418 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
2c0262af 5419 break;
c8fbc479 5420 case MO_SB:
e108dd01 5421 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
2c0262af 5422 break;
c8fbc479 5423 case MO_UW:
e108dd01 5424 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
2c0262af
FB
5425 break;
5426 default:
c8fbc479 5427 case MO_SW:
e108dd01 5428 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
2c0262af
FB
5429 break;
5430 }
480a762d 5431 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
2c0262af 5432 } else {
4eeb3939 5433 gen_lea_modrm(env, s, modrm);
c8fbc479 5434 gen_op_ld_v(s, s_ot, cpu_T[0], cpu_A0);
480a762d 5435 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
2c0262af
FB
5436 }
5437 }
5438 break;
5439
5440 case 0x8d: /* lea */
ab4e4aec 5441 ot = dflag;
0af10c86 5442 modrm = cpu_ldub_code(env, s->pc++);
3a1d9b8b
FB
5443 mod = (modrm >> 6) & 3;
5444 if (mod == 3)
5445 goto illegal_op;
14ce26e7 5446 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5447 /* we must ensure that no segment is added */
5448 s->override = -1;
5449 val = s->addseg;
5450 s->addseg = 0;
4eeb3939 5451 gen_lea_modrm(env, s, modrm);
2c0262af 5452 s->addseg = val;
33b7891b 5453 gen_op_mov_reg_v(ot, reg, cpu_A0);
2c0262af 5454 break;
3b46e624 5455
2c0262af
FB
5456 case 0xa0: /* mov EAX, Ov */
5457 case 0xa1:
5458 case 0xa2: /* mov Ov, EAX */
5459 case 0xa3:
2c0262af 5460 {
14ce26e7
FB
5461 target_ulong offset_addr;
5462
ab4e4aec 5463 ot = mo_b_d(b, dflag);
1d71ddb1 5464 switch (s->aflag) {
14ce26e7 5465#ifdef TARGET_X86_64
1d71ddb1 5466 case MO_64:
0af10c86 5467 offset_addr = cpu_ldq_code(env, s->pc);
14ce26e7 5468 s->pc += 8;
1d71ddb1 5469 break;
14ce26e7 5470#endif
1d71ddb1
RH
5471 default:
5472 offset_addr = insn_get(env, s, s->aflag);
5473 break;
14ce26e7 5474 }
3250cff8 5475 tcg_gen_movi_tl(cpu_A0, offset_addr);
664e0f19 5476 gen_add_A0_ds_seg(s);
14ce26e7 5477 if ((b & 2) == 0) {
909be183 5478 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
480a762d 5479 gen_op_mov_reg_v(ot, R_EAX, cpu_T[0]);
14ce26e7 5480 } else {
c56baccf 5481 gen_op_mov_v_reg(ot, cpu_T[0], R_EAX);
fd8ca9f6 5482 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2c0262af
FB
5483 }
5484 }
2c0262af
FB
5485 break;
5486 case 0xd7: /* xlat */
1d71ddb1
RH
5487 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
5488 tcg_gen_ext8u_tl(cpu_T[0], cpu_regs[R_EAX]);
5489 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5490 gen_extu(s->aflag, cpu_A0);
664e0f19 5491 gen_add_A0_ds_seg(s);
cc1a80df 5492 gen_op_ld_v(s, MO_8, cpu_T[0], cpu_A0);
480a762d 5493 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T[0]);
2c0262af
FB
5494 break;
5495 case 0xb0 ... 0xb7: /* mov R, Ib */
4ba9938c 5496 val = insn_get(env, s, MO_8);
1b90d56e 5497 tcg_gen_movi_tl(cpu_T[0], val);
480a762d 5498 gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T[0]);
2c0262af
FB
5499 break;
5500 case 0xb8 ... 0xbf: /* mov R, Iv */
14ce26e7 5501#ifdef TARGET_X86_64
ab4e4aec 5502 if (dflag == MO_64) {
14ce26e7
FB
5503 uint64_t tmp;
5504 /* 64 bit case */
0af10c86 5505 tmp = cpu_ldq_code(env, s->pc);
14ce26e7
FB
5506 s->pc += 8;
5507 reg = (b & 7) | REX_B(s);
cc0bce88 5508 tcg_gen_movi_tl(cpu_T[0], tmp);
480a762d 5509 gen_op_mov_reg_v(MO_64, reg, cpu_T[0]);
5fafdf24 5510 } else
14ce26e7
FB
5511#endif
5512 {
ab4e4aec 5513 ot = dflag;
0af10c86 5514 val = insn_get(env, s, ot);
14ce26e7 5515 reg = (b & 7) | REX_B(s);
1b90d56e 5516 tcg_gen_movi_tl(cpu_T[0], val);
480a762d 5517 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
14ce26e7 5518 }
2c0262af
FB
5519 break;
5520
5521 case 0x91 ... 0x97: /* xchg R, EAX */
7418027e 5522 do_xchg_reg_eax:
ab4e4aec 5523 ot = dflag;
14ce26e7 5524 reg = (b & 7) | REX_B(s);
2c0262af
FB
5525 rm = R_EAX;
5526 goto do_xchg_reg;
5527 case 0x86:
5528 case 0x87: /* xchg Ev, Gv */
ab4e4aec 5529 ot = mo_b_d(b, dflag);
0af10c86 5530 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5531 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5532 mod = (modrm >> 6) & 3;
5533 if (mod == 3) {
14ce26e7 5534 rm = (modrm & 7) | REX_B(s);
2c0262af 5535 do_xchg_reg:
c56baccf
RH
5536 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5537 gen_op_mov_v_reg(ot, cpu_T[1], rm);
480a762d 5538 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
68773f84 5539 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
2c0262af 5540 } else {
4eeb3939 5541 gen_lea_modrm(env, s, modrm);
c56baccf 5542 gen_op_mov_v_reg(ot, cpu_T[0], reg);
2c0262af
FB
5543 /* for xchg, lock is implicit */
5544 if (!(prefixes & PREFIX_LOCK))
a7812ae4 5545 gen_helper_lock();
0f712e10 5546 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
fd8ca9f6 5547 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 5548 if (!(prefixes & PREFIX_LOCK))
a7812ae4 5549 gen_helper_unlock();
68773f84 5550 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
2c0262af
FB
5551 }
5552 break;
5553 case 0xc4: /* les Gv */
701ed211 5554 /* In CODE64 this is VEX3; see above. */
2c0262af
FB
5555 op = R_ES;
5556 goto do_lxx;
5557 case 0xc5: /* lds Gv */
701ed211 5558 /* In CODE64 this is VEX2; see above. */
2c0262af
FB
5559 op = R_DS;
5560 goto do_lxx;
5561 case 0x1b2: /* lss Gv */
5562 op = R_SS;
5563 goto do_lxx;
5564 case 0x1b4: /* lfs Gv */
5565 op = R_FS;
5566 goto do_lxx;
5567 case 0x1b5: /* lgs Gv */
5568 op = R_GS;
5569 do_lxx:
ab4e4aec 5570 ot = dflag != MO_16 ? MO_32 : MO_16;
0af10c86 5571 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 5572 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af
FB
5573 mod = (modrm >> 6) & 3;
5574 if (mod == 3)
5575 goto illegal_op;
4eeb3939 5576 gen_lea_modrm(env, s, modrm);
0f712e10 5577 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
830a19a4 5578 gen_add_A0_im(s, 1 << ot);
2c0262af 5579 /* load the segment first to handle exceptions properly */
cc1a80df 5580 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
100ec099 5581 gen_movl_seg_T0(s, op);
2c0262af 5582 /* then put the data */
68773f84 5583 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
2c0262af 5584 if (s->is_jmp) {
14ce26e7 5585 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
5586 gen_eob(s);
5587 }
5588 break;
3b46e624 5589
2c0262af
FB
5590 /************************/
5591 /* shifts */
5592 case 0xc0:
5593 case 0xc1:
5594 /* shift Ev,Ib */
5595 shift = 2;
5596 grp2:
5597 {
ab4e4aec 5598 ot = mo_b_d(b, dflag);
0af10c86 5599 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5600 mod = (modrm >> 6) & 3;
2c0262af 5601 op = (modrm >> 3) & 7;
3b46e624 5602
2c0262af 5603 if (mod != 3) {
14ce26e7
FB
5604 if (shift == 2) {
5605 s->rip_offset = 1;
5606 }
4eeb3939 5607 gen_lea_modrm(env, s, modrm);
2c0262af
FB
5608 opreg = OR_TMP0;
5609 } else {
14ce26e7 5610 opreg = (modrm & 7) | REX_B(s);
2c0262af
FB
5611 }
5612
5613 /* simpler op */
5614 if (shift == 0) {
5615 gen_shift(s, op, ot, opreg, OR_ECX);
5616 } else {
5617 if (shift == 2) {
0af10c86 5618 shift = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5619 }
5620 gen_shifti(s, op, ot, opreg, shift);
5621 }
5622 }
5623 break;
5624 case 0xd0:
5625 case 0xd1:
5626 /* shift Ev,1 */
5627 shift = 1;
5628 goto grp2;
5629 case 0xd2:
5630 case 0xd3:
5631 /* shift Ev,cl */
5632 shift = 0;
5633 goto grp2;
5634
5635 case 0x1a4: /* shld imm */
5636 op = 0;
5637 shift = 1;
5638 goto do_shiftd;
5639 case 0x1a5: /* shld cl */
5640 op = 0;
5641 shift = 0;
5642 goto do_shiftd;
5643 case 0x1ac: /* shrd imm */
5644 op = 1;
5645 shift = 1;
5646 goto do_shiftd;
5647 case 0x1ad: /* shrd cl */
5648 op = 1;
5649 shift = 0;
5650 do_shiftd:
ab4e4aec 5651 ot = dflag;
0af10c86 5652 modrm = cpu_ldub_code(env, s->pc++);
2c0262af 5653 mod = (modrm >> 6) & 3;
14ce26e7
FB
5654 rm = (modrm & 7) | REX_B(s);
5655 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 5656 if (mod != 3) {
4eeb3939 5657 gen_lea_modrm(env, s, modrm);
b6abf97d 5658 opreg = OR_TMP0;
2c0262af 5659 } else {
b6abf97d 5660 opreg = rm;
2c0262af 5661 }
c56baccf 5662 gen_op_mov_v_reg(ot, cpu_T[1], reg);
3b46e624 5663
2c0262af 5664 if (shift) {
3b9d3cf1
PB
5665 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5666 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5667 tcg_temp_free(imm);
2c0262af 5668 } else {
3b9d3cf1 5669 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
2c0262af
FB
5670 }
5671 break;
5672
5673 /************************/
5674 /* floats */
5fafdf24 5675 case 0xd8 ... 0xdf:
7eee2a50
FB
5676 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5677 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5678 /* XXX: what to do if illegal op ? */
5679 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5680 break;
5681 }
0af10c86 5682 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
5683 mod = (modrm >> 6) & 3;
5684 rm = modrm & 7;
5685 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2c0262af
FB
5686 if (mod != 3) {
5687 /* memory op */
4eeb3939 5688 gen_lea_modrm(env, s, modrm);
2c0262af
FB
5689 switch(op) {
5690 case 0x00 ... 0x07: /* fxxxs */
5691 case 0x10 ... 0x17: /* fixxxl */
5692 case 0x20 ... 0x27: /* fxxxl */
5693 case 0x30 ... 0x37: /* fixxx */
5694 {
5695 int op1;
5696 op1 = op & 7;
5697
5698 switch(op >> 4) {
5699 case 0:
80b02013
RH
5700 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5701 s->mem_index, MO_LEUL);
d3eb5eae 5702 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5703 break;
5704 case 1:
80b02013
RH
5705 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5706 s->mem_index, MO_LEUL);
d3eb5eae 5707 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5708 break;
5709 case 2:
3c5f4116
RH
5710 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5711 s->mem_index, MO_LEQ);
d3eb5eae 5712 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
5713 break;
5714 case 3:
5715 default:
80b02013
RH
5716 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5717 s->mem_index, MO_LESW);
d3eb5eae 5718 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5719 break;
5720 }
3b46e624 5721
a7812ae4 5722 gen_helper_fp_arith_ST0_FT0(op1);
2c0262af
FB
5723 if (op1 == 3) {
5724 /* fcomp needs pop */
d3eb5eae 5725 gen_helper_fpop(cpu_env);
2c0262af
FB
5726 }
5727 }
5728 break;
5729 case 0x08: /* flds */
5730 case 0x0a: /* fsts */
5731 case 0x0b: /* fstps */
465e9838
FB
5732 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5733 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5734 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2c0262af
FB
5735 switch(op & 7) {
5736 case 0:
5737 switch(op >> 4) {
5738 case 0:
80b02013
RH
5739 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5740 s->mem_index, MO_LEUL);
d3eb5eae 5741 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5742 break;
5743 case 1:
80b02013
RH
5744 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5745 s->mem_index, MO_LEUL);
d3eb5eae 5746 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5747 break;
5748 case 2:
3c5f4116
RH
5749 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5750 s->mem_index, MO_LEQ);
d3eb5eae 5751 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
5752 break;
5753 case 3:
5754 default:
80b02013
RH
5755 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5756 s->mem_index, MO_LESW);
d3eb5eae 5757 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5758 break;
5759 }
5760 break;
465e9838 5761 case 1:
19e6c4b8 5762 /* XXX: the corresponding CPUID bit must be tested ! */
465e9838
FB
5763 switch(op >> 4) {
5764 case 1:
d3eb5eae 5765 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5766 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5767 s->mem_index, MO_LEUL);
465e9838
FB
5768 break;
5769 case 2:
d3eb5eae 5770 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
3523e4bd
RH
5771 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5772 s->mem_index, MO_LEQ);
465e9838
FB
5773 break;
5774 case 3:
5775 default:
d3eb5eae 5776 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5777 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5778 s->mem_index, MO_LEUW);
19e6c4b8 5779 break;
465e9838 5780 }
d3eb5eae 5781 gen_helper_fpop(cpu_env);
465e9838 5782 break;
2c0262af
FB
5783 default:
5784 switch(op >> 4) {
5785 case 0:
d3eb5eae 5786 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5787 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5788 s->mem_index, MO_LEUL);
2c0262af
FB
5789 break;
5790 case 1:
d3eb5eae 5791 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5792 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5793 s->mem_index, MO_LEUL);
2c0262af
FB
5794 break;
5795 case 2:
d3eb5eae 5796 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
3523e4bd
RH
5797 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5798 s->mem_index, MO_LEQ);
2c0262af
FB
5799 break;
5800 case 3:
5801 default:
d3eb5eae 5802 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5803 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5804 s->mem_index, MO_LEUW);
2c0262af
FB
5805 break;
5806 }
5807 if ((op & 7) == 3)
d3eb5eae 5808 gen_helper_fpop(cpu_env);
2c0262af
FB
5809 break;
5810 }
5811 break;
5812 case 0x0c: /* fldenv mem */
ab4e4aec 5813 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
2c0262af
FB
5814 break;
5815 case 0x0d: /* fldcw mem */
80b02013
RH
5816 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5817 s->mem_index, MO_LEUW);
d3eb5eae 5818 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
2c0262af
FB
5819 break;
5820 case 0x0e: /* fnstenv mem */
ab4e4aec 5821 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
2c0262af
FB
5822 break;
5823 case 0x0f: /* fnstcw mem */
d3eb5eae 5824 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5825 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5826 s->mem_index, MO_LEUW);
2c0262af
FB
5827 break;
5828 case 0x1d: /* fldt mem */
d3eb5eae 5829 gen_helper_fldt_ST0(cpu_env, cpu_A0);
2c0262af
FB
5830 break;
5831 case 0x1f: /* fstpt mem */
d3eb5eae
BS
5832 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5833 gen_helper_fpop(cpu_env);
2c0262af
FB
5834 break;
5835 case 0x2c: /* frstor mem */
ab4e4aec 5836 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
2c0262af
FB
5837 break;
5838 case 0x2e: /* fnsave mem */
ab4e4aec 5839 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
2c0262af
FB
5840 break;
5841 case 0x2f: /* fnstsw mem */
d3eb5eae 5842 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
d5601ad0
RH
5843 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5844 s->mem_index, MO_LEUW);
2c0262af
FB
5845 break;
5846 case 0x3c: /* fbld */
d3eb5eae 5847 gen_helper_fbld_ST0(cpu_env, cpu_A0);
2c0262af
FB
5848 break;
5849 case 0x3e: /* fbstp */
d3eb5eae
BS
5850 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5851 gen_helper_fpop(cpu_env);
2c0262af
FB
5852 break;
5853 case 0x3d: /* fildll */
3c5f4116 5854 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
d3eb5eae 5855 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
2c0262af
FB
5856 break;
5857 case 0x3f: /* fistpll */
d3eb5eae 5858 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
3523e4bd 5859 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
d3eb5eae 5860 gen_helper_fpop(cpu_env);
2c0262af
FB
5861 break;
5862 default:
5863 goto illegal_op;
5864 }
5865 } else {
5866 /* register float ops */
5867 opreg = rm;
5868
5869 switch(op) {
5870 case 0x08: /* fld sti */
d3eb5eae
BS
5871 gen_helper_fpush(cpu_env);
5872 gen_helper_fmov_ST0_STN(cpu_env,
5873 tcg_const_i32((opreg + 1) & 7));
2c0262af
FB
5874 break;
5875 case 0x09: /* fxchg sti */
c169c906
FB
5876 case 0x29: /* fxchg4 sti, undocumented op */
5877 case 0x39: /* fxchg7 sti, undocumented op */
d3eb5eae 5878 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
2c0262af
FB
5879 break;
5880 case 0x0a: /* grp d9/2 */
5881 switch(rm) {
5882 case 0: /* fnop */
023fe10d 5883 /* check exceptions (FreeBSD FPU probe) */
d3eb5eae 5884 gen_helper_fwait(cpu_env);
2c0262af
FB
5885 break;
5886 default:
5887 goto illegal_op;
5888 }
5889 break;
5890 case 0x0c: /* grp d9/4 */
5891 switch(rm) {
5892 case 0: /* fchs */
d3eb5eae 5893 gen_helper_fchs_ST0(cpu_env);
2c0262af
FB
5894 break;
5895 case 1: /* fabs */
d3eb5eae 5896 gen_helper_fabs_ST0(cpu_env);
2c0262af
FB
5897 break;
5898 case 4: /* ftst */
d3eb5eae
BS
5899 gen_helper_fldz_FT0(cpu_env);
5900 gen_helper_fcom_ST0_FT0(cpu_env);
2c0262af
FB
5901 break;
5902 case 5: /* fxam */
d3eb5eae 5903 gen_helper_fxam_ST0(cpu_env);
2c0262af
FB
5904 break;
5905 default:
5906 goto illegal_op;
5907 }
5908 break;
5909 case 0x0d: /* grp d9/5 */
5910 {
5911 switch(rm) {
5912 case 0:
d3eb5eae
BS
5913 gen_helper_fpush(cpu_env);
5914 gen_helper_fld1_ST0(cpu_env);
2c0262af
FB
5915 break;
5916 case 1:
d3eb5eae
BS
5917 gen_helper_fpush(cpu_env);
5918 gen_helper_fldl2t_ST0(cpu_env);
2c0262af
FB
5919 break;
5920 case 2:
d3eb5eae
BS
5921 gen_helper_fpush(cpu_env);
5922 gen_helper_fldl2e_ST0(cpu_env);
2c0262af
FB
5923 break;
5924 case 3:
d3eb5eae
BS
5925 gen_helper_fpush(cpu_env);
5926 gen_helper_fldpi_ST0(cpu_env);
2c0262af
FB
5927 break;
5928 case 4:
d3eb5eae
BS
5929 gen_helper_fpush(cpu_env);
5930 gen_helper_fldlg2_ST0(cpu_env);
2c0262af
FB
5931 break;
5932 case 5:
d3eb5eae
BS
5933 gen_helper_fpush(cpu_env);
5934 gen_helper_fldln2_ST0(cpu_env);
2c0262af
FB
5935 break;
5936 case 6:
d3eb5eae
BS
5937 gen_helper_fpush(cpu_env);
5938 gen_helper_fldz_ST0(cpu_env);
2c0262af
FB
5939 break;
5940 default:
5941 goto illegal_op;
5942 }
5943 }
5944 break;
5945 case 0x0e: /* grp d9/6 */
5946 switch(rm) {
5947 case 0: /* f2xm1 */
d3eb5eae 5948 gen_helper_f2xm1(cpu_env);
2c0262af
FB
5949 break;
5950 case 1: /* fyl2x */
d3eb5eae 5951 gen_helper_fyl2x(cpu_env);
2c0262af
FB
5952 break;
5953 case 2: /* fptan */
d3eb5eae 5954 gen_helper_fptan(cpu_env);
2c0262af
FB
5955 break;
5956 case 3: /* fpatan */
d3eb5eae 5957 gen_helper_fpatan(cpu_env);
2c0262af
FB
5958 break;
5959 case 4: /* fxtract */
d3eb5eae 5960 gen_helper_fxtract(cpu_env);
2c0262af
FB
5961 break;
5962 case 5: /* fprem1 */
d3eb5eae 5963 gen_helper_fprem1(cpu_env);
2c0262af
FB
5964 break;
5965 case 6: /* fdecstp */
d3eb5eae 5966 gen_helper_fdecstp(cpu_env);
2c0262af
FB
5967 break;
5968 default:
5969 case 7: /* fincstp */
d3eb5eae 5970 gen_helper_fincstp(cpu_env);
2c0262af
FB
5971 break;
5972 }
5973 break;
5974 case 0x0f: /* grp d9/7 */
5975 switch(rm) {
5976 case 0: /* fprem */
d3eb5eae 5977 gen_helper_fprem(cpu_env);
2c0262af
FB
5978 break;
5979 case 1: /* fyl2xp1 */
d3eb5eae 5980 gen_helper_fyl2xp1(cpu_env);
2c0262af
FB
5981 break;
5982 case 2: /* fsqrt */
d3eb5eae 5983 gen_helper_fsqrt(cpu_env);
2c0262af
FB
5984 break;
5985 case 3: /* fsincos */
d3eb5eae 5986 gen_helper_fsincos(cpu_env);
2c0262af
FB
5987 break;
5988 case 5: /* fscale */
d3eb5eae 5989 gen_helper_fscale(cpu_env);
2c0262af
FB
5990 break;
5991 case 4: /* frndint */
d3eb5eae 5992 gen_helper_frndint(cpu_env);
2c0262af
FB
5993 break;
5994 case 6: /* fsin */
d3eb5eae 5995 gen_helper_fsin(cpu_env);
2c0262af
FB
5996 break;
5997 default:
5998 case 7: /* fcos */
d3eb5eae 5999 gen_helper_fcos(cpu_env);
2c0262af
FB
6000 break;
6001 }
6002 break;
6003 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6004 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6005 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6006 {
6007 int op1;
3b46e624 6008
2c0262af
FB
6009 op1 = op & 7;
6010 if (op >= 0x20) {
a7812ae4 6011 gen_helper_fp_arith_STN_ST0(op1, opreg);
2c0262af 6012 if (op >= 0x30)
d3eb5eae 6013 gen_helper_fpop(cpu_env);
2c0262af 6014 } else {
d3eb5eae 6015 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
a7812ae4 6016 gen_helper_fp_arith_ST0_FT0(op1);
2c0262af
FB
6017 }
6018 }
6019 break;
6020 case 0x02: /* fcom */
c169c906 6021 case 0x22: /* fcom2, undocumented op */
d3eb5eae
BS
6022 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6023 gen_helper_fcom_ST0_FT0(cpu_env);
2c0262af
FB
6024 break;
6025 case 0x03: /* fcomp */
c169c906
FB
6026 case 0x23: /* fcomp3, undocumented op */
6027 case 0x32: /* fcomp5, undocumented op */
d3eb5eae
BS
6028 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6029 gen_helper_fcom_ST0_FT0(cpu_env);
6030 gen_helper_fpop(cpu_env);
2c0262af
FB
6031 break;
6032 case 0x15: /* da/5 */
6033 switch(rm) {
6034 case 1: /* fucompp */
d3eb5eae
BS
6035 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6036 gen_helper_fucom_ST0_FT0(cpu_env);
6037 gen_helper_fpop(cpu_env);
6038 gen_helper_fpop(cpu_env);
2c0262af
FB
6039 break;
6040 default:
6041 goto illegal_op;
6042 }
6043 break;
6044 case 0x1c:
6045 switch(rm) {
6046 case 0: /* feni (287 only, just do nop here) */
6047 break;
6048 case 1: /* fdisi (287 only, just do nop here) */
6049 break;
6050 case 2: /* fclex */
d3eb5eae 6051 gen_helper_fclex(cpu_env);
2c0262af
FB
6052 break;
6053 case 3: /* fninit */
d3eb5eae 6054 gen_helper_fninit(cpu_env);
2c0262af
FB
6055 break;
6056 case 4: /* fsetpm (287 only, just do nop here) */
6057 break;
6058 default:
6059 goto illegal_op;
6060 }
6061 break;
6062 case 0x1d: /* fucomi */
bff93281
PM
6063 if (!(s->cpuid_features & CPUID_CMOV)) {
6064 goto illegal_op;
6065 }
773cdfcc 6066 gen_update_cc_op(s);
d3eb5eae
BS
6067 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6068 gen_helper_fucomi_ST0_FT0(cpu_env);
3ca51d07 6069 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6070 break;
6071 case 0x1e: /* fcomi */
bff93281
PM
6072 if (!(s->cpuid_features & CPUID_CMOV)) {
6073 goto illegal_op;
6074 }
773cdfcc 6075 gen_update_cc_op(s);
d3eb5eae
BS
6076 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6077 gen_helper_fcomi_ST0_FT0(cpu_env);
3ca51d07 6078 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 6079 break;
658c8bda 6080 case 0x28: /* ffree sti */
d3eb5eae 6081 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
5fafdf24 6082 break;
2c0262af 6083 case 0x2a: /* fst sti */
d3eb5eae 6084 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
2c0262af
FB
6085 break;
6086 case 0x2b: /* fstp sti */
c169c906
FB
6087 case 0x0b: /* fstp1 sti, undocumented op */
6088 case 0x3a: /* fstp8 sti, undocumented op */
6089 case 0x3b: /* fstp9 sti, undocumented op */
d3eb5eae
BS
6090 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6091 gen_helper_fpop(cpu_env);
2c0262af
FB
6092 break;
6093 case 0x2c: /* fucom st(i) */
d3eb5eae
BS
6094 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6095 gen_helper_fucom_ST0_FT0(cpu_env);
2c0262af
FB
6096 break;
6097 case 0x2d: /* fucomp st(i) */
d3eb5eae
BS
6098 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6099 gen_helper_fucom_ST0_FT0(cpu_env);
6100 gen_helper_fpop(cpu_env);
2c0262af
FB
6101 break;
6102 case 0x33: /* de/3 */
6103 switch(rm) {
6104 case 1: /* fcompp */
d3eb5eae
BS
6105 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6106 gen_helper_fcom_ST0_FT0(cpu_env);
6107 gen_helper_fpop(cpu_env);
6108 gen_helper_fpop(cpu_env);
2c0262af
FB
6109 break;
6110 default:
6111 goto illegal_op;
6112 }
6113 break;
c169c906 6114 case 0x38: /* ffreep sti, undocumented op */
d3eb5eae
BS
6115 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6116 gen_helper_fpop(cpu_env);
c169c906 6117 break;
2c0262af
FB
6118 case 0x3c: /* df/4 */
6119 switch(rm) {
6120 case 0:
d3eb5eae 6121 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
b6abf97d 6122 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
480a762d 6123 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
2c0262af
FB
6124 break;
6125 default:
6126 goto illegal_op;
6127 }
6128 break;
6129 case 0x3d: /* fucomip */
bff93281
PM
6130 if (!(s->cpuid_features & CPUID_CMOV)) {
6131 goto illegal_op;
6132 }
773cdfcc 6133 gen_update_cc_op(s);
d3eb5eae
BS
6134 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6135 gen_helper_fucomi_ST0_FT0(cpu_env);
6136 gen_helper_fpop(cpu_env);
3ca51d07 6137 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6138 break;
6139 case 0x3e: /* fcomip */
bff93281
PM
6140 if (!(s->cpuid_features & CPUID_CMOV)) {
6141 goto illegal_op;
6142 }
773cdfcc 6143 gen_update_cc_op(s);
d3eb5eae
BS
6144 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6145 gen_helper_fcomi_ST0_FT0(cpu_env);
6146 gen_helper_fpop(cpu_env);
3ca51d07 6147 set_cc_op(s, CC_OP_EFLAGS);
2c0262af 6148 break;
a2cc3b24
FB
6149 case 0x10 ... 0x13: /* fcmovxx */
6150 case 0x18 ... 0x1b:
6151 {
42a268c2
RH
6152 int op1;
6153 TCGLabel *l1;
d70040bc 6154 static const uint8_t fcmov_cc[8] = {
a2cc3b24
FB
6155 (JCC_B << 1),
6156 (JCC_Z << 1),
6157 (JCC_BE << 1),
6158 (JCC_P << 1),
6159 };
bff93281
PM
6160
6161 if (!(s->cpuid_features & CPUID_CMOV)) {
6162 goto illegal_op;
6163 }
1e4840bf 6164 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
19e6c4b8 6165 l1 = gen_new_label();
dc259201 6166 gen_jcc1_noeob(s, op1, l1);
d3eb5eae 6167 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
19e6c4b8 6168 gen_set_label(l1);
a2cc3b24
FB
6169 }
6170 break;
2c0262af
FB
6171 default:
6172 goto illegal_op;
6173 }
6174 }
6175 break;
6176 /************************/
6177 /* string ops */
6178
6179 case 0xa4: /* movsS */
6180 case 0xa5:
ab4e4aec 6181 ot = mo_b_d(b, dflag);
2c0262af
FB
6182 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6183 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6184 } else {
6185 gen_movs(s, ot);
6186 }
6187 break;
3b46e624 6188
2c0262af
FB
6189 case 0xaa: /* stosS */
6190 case 0xab:
ab4e4aec 6191 ot = mo_b_d(b, dflag);
2c0262af
FB
6192 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6193 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6194 } else {
6195 gen_stos(s, ot);
6196 }
6197 break;
6198 case 0xac: /* lodsS */
6199 case 0xad:
ab4e4aec 6200 ot = mo_b_d(b, dflag);
2c0262af
FB
6201 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6202 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6203 } else {
6204 gen_lods(s, ot);
6205 }
6206 break;
6207 case 0xae: /* scasS */
6208 case 0xaf:
ab4e4aec 6209 ot = mo_b_d(b, dflag);
2c0262af
FB
6210 if (prefixes & PREFIX_REPNZ) {
6211 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6212 } else if (prefixes & PREFIX_REPZ) {
6213 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6214 } else {
6215 gen_scas(s, ot);
2c0262af
FB
6216 }
6217 break;
6218
6219 case 0xa6: /* cmpsS */
6220 case 0xa7:
ab4e4aec 6221 ot = mo_b_d(b, dflag);
2c0262af
FB
6222 if (prefixes & PREFIX_REPNZ) {
6223 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6224 } else if (prefixes & PREFIX_REPZ) {
6225 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6226 } else {
6227 gen_cmps(s, ot);
2c0262af
FB
6228 }
6229 break;
6230 case 0x6c: /* insS */
6231 case 0x6d:
ab4e4aec 6232 ot = mo_b_d32(b, dflag);
40b90233 6233 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
b8b6a50b
FB
6234 gen_check_io(s, ot, pc_start - s->cs_base,
6235 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
f115e911
FB
6236 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6237 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
2c0262af 6238 } else {
f115e911 6239 gen_ins(s, ot);
bd79255d 6240 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef
PB
6241 gen_jmp(s, s->pc - s->cs_base);
6242 }
2c0262af
FB
6243 }
6244 break;
6245 case 0x6e: /* outsS */
6246 case 0x6f:
ab4e4aec 6247 ot = mo_b_d32(b, dflag);
40b90233 6248 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
b8b6a50b
FB
6249 gen_check_io(s, ot, pc_start - s->cs_base,
6250 svm_is_rep(prefixes) | 4);
f115e911
FB
6251 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6252 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
2c0262af 6253 } else {
f115e911 6254 gen_outs(s, ot);
bd79255d 6255 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef
PB
6256 gen_jmp(s, s->pc - s->cs_base);
6257 }
2c0262af
FB
6258 }
6259 break;
6260
6261 /************************/
6262 /* port I/O */
0573fbfc 6263
2c0262af
FB
6264 case 0xe4:
6265 case 0xe5:
ab4e4aec 6266 ot = mo_b_d32(b, dflag);
0af10c86 6267 val = cpu_ldub_code(env, s->pc++);
6d093a4f 6268 tcg_gen_movi_tl(cpu_T[0], val);
b8b6a50b
FB
6269 gen_check_io(s, ot, pc_start - s->cs_base,
6270 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
bd79255d 6271 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 6272 gen_io_start();
bd79255d 6273 }
1b90d56e 6274 tcg_gen_movi_i32(cpu_tmp2_i32, val);
a7812ae4 6275 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
68773f84 6276 gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
bd79255d 6277 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef
PB
6278 gen_io_end();
6279 gen_jmp(s, s->pc - s->cs_base);
6280 }
2c0262af
FB
6281 break;
6282 case 0xe6:
6283 case 0xe7:
ab4e4aec 6284 ot = mo_b_d32(b, dflag);
0af10c86 6285 val = cpu_ldub_code(env, s->pc++);
6d093a4f 6286 tcg_gen_movi_tl(cpu_T[0], val);
b8b6a50b
FB
6287 gen_check_io(s, ot, pc_start - s->cs_base,
6288 svm_is_rep(prefixes));
c56baccf 6289 gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
b8b6a50b 6290
bd79255d 6291 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 6292 gen_io_start();
bd79255d 6293 }
1b90d56e 6294 tcg_gen_movi_i32(cpu_tmp2_i32, val);
b6abf97d 6295 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
a7812ae4 6296 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
bd79255d 6297 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef
PB
6298 gen_io_end();
6299 gen_jmp(s, s->pc - s->cs_base);
6300 }
2c0262af
FB
6301 break;
6302 case 0xec:
6303 case 0xed:
ab4e4aec 6304 ot = mo_b_d32(b, dflag);
40b90233 6305 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
b8b6a50b
FB
6306 gen_check_io(s, ot, pc_start - s->cs_base,
6307 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
bd79255d 6308 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 6309 gen_io_start();
bd79255d 6310 }
b6abf97d 6311 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
a7812ae4 6312 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
68773f84 6313 gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
bd79255d 6314 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef
PB
6315 gen_io_end();
6316 gen_jmp(s, s->pc - s->cs_base);
6317 }
2c0262af
FB
6318 break;
6319 case 0xee:
6320 case 0xef:
ab4e4aec 6321 ot = mo_b_d32(b, dflag);
40b90233 6322 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
b8b6a50b
FB
6323 gen_check_io(s, ot, pc_start - s->cs_base,
6324 svm_is_rep(prefixes));
c56baccf 6325 gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
b8b6a50b 6326
bd79255d 6327 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef 6328 gen_io_start();
bd79255d 6329 }
b6abf97d 6330 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
b6abf97d 6331 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
a7812ae4 6332 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
bd79255d 6333 if (s->tb->cflags & CF_USE_ICOUNT) {
2e70f6ef
PB
6334 gen_io_end();
6335 gen_jmp(s, s->pc - s->cs_base);
6336 }
2c0262af
FB
6337 break;
6338
6339 /************************/
6340 /* control */
6341 case 0xc2: /* ret im */
0af10c86 6342 val = cpu_ldsw_code(env, s->pc);
2c0262af 6343 s->pc += 2;
8e31d234
RH
6344 ot = gen_pop_T0(s);
6345 gen_stack_update(s, val + (1 << ot));
6346 /* Note that gen_pop_T0 uses a zero-extending load. */
74bdfbda 6347 gen_op_jmp_v(cpu_T[0]);
2c0262af
FB
6348 gen_eob(s);
6349 break;
6350 case 0xc3: /* ret */
8e31d234
RH
6351 ot = gen_pop_T0(s);
6352 gen_pop_update(s, ot);
6353 /* Note that gen_pop_T0 uses a zero-extending load. */
74bdfbda 6354 gen_op_jmp_v(cpu_T[0]);
2c0262af
FB
6355 gen_eob(s);
6356 break;
6357 case 0xca: /* lret im */
0af10c86 6358 val = cpu_ldsw_code(env, s->pc);
2c0262af
FB
6359 s->pc += 2;
6360 do_lret:
6361 if (s->pe && !s->vm86) {
773cdfcc 6362 gen_update_cc_op(s);
14ce26e7 6363 gen_jmp_im(pc_start - s->cs_base);
ab4e4aec 6364 gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
a7812ae4 6365 tcg_const_i32(val));
2c0262af
FB
6366 } else {
6367 gen_stack_A0(s);
6368 /* pop offset */
ab4e4aec 6369 gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0);
2c0262af
FB
6370 /* NOTE: keeping EIP updated is not a problem in case of
6371 exception */
74bdfbda 6372 gen_op_jmp_v(cpu_T[0]);
2c0262af 6373 /* pop selector */
ab4e4aec
RH
6374 gen_op_addl_A0_im(1 << dflag);
6375 gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0);
3bd7da9e 6376 gen_op_movl_seg_T0_vm(R_CS);
2c0262af 6377 /* add stack offset */
ab4e4aec 6378 gen_stack_update(s, val + (2 << dflag));
2c0262af
FB
6379 }
6380 gen_eob(s);
6381 break;
6382 case 0xcb: /* lret */
6383 val = 0;
6384 goto do_lret;
6385 case 0xcf: /* iret */
872929aa 6386 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
2c0262af
FB
6387 if (!s->pe) {
6388 /* real mode */
ab4e4aec 6389 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
3ca51d07 6390 set_cc_op(s, CC_OP_EFLAGS);
f115e911
FB
6391 } else if (s->vm86) {
6392 if (s->iopl != 3) {
6393 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6394 } else {
ab4e4aec 6395 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
3ca51d07 6396 set_cc_op(s, CC_OP_EFLAGS);
f115e911 6397 }
2c0262af 6398 } else {
ab4e4aec 6399 gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
a7812ae4 6400 tcg_const_i32(s->pc - s->cs_base));
3ca51d07 6401 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6402 }
6403 gen_eob(s);
6404 break;
6405 case 0xe8: /* call im */
6406 {
ab4e4aec 6407 if (dflag != MO_16) {
4ba9938c 6408 tval = (int32_t)insn_get(env, s, MO_32);
ab4e4aec 6409 } else {
4ba9938c 6410 tval = (int16_t)insn_get(env, s, MO_16);
ab4e4aec 6411 }
2c0262af 6412 next_eip = s->pc - s->cs_base;
14ce26e7 6413 tval += next_eip;
ab4e4aec 6414 if (dflag == MO_16) {
14ce26e7 6415 tval &= 0xffff;
ab4e4aec 6416 } else if (!CODE64(s)) {
99596385 6417 tval &= 0xffffffff;
ab4e4aec 6418 }
cc0bce88 6419 tcg_gen_movi_tl(cpu_T[0], next_eip);
432baffe 6420 gen_push_v(s, cpu_T[0]);
14ce26e7 6421 gen_jmp(s, tval);
2c0262af
FB
6422 }
6423 break;
6424 case 0x9a: /* lcall im */
6425 {
6426 unsigned int selector, offset;
3b46e624 6427
14ce26e7
FB
6428 if (CODE64(s))
6429 goto illegal_op;
ab4e4aec 6430 ot = dflag;
0af10c86 6431 offset = insn_get(env, s, ot);
4ba9938c 6432 selector = insn_get(env, s, MO_16);
3b46e624 6433
1b90d56e 6434 tcg_gen_movi_tl(cpu_T[0], selector);
0ae657b1 6435 tcg_gen_movi_tl(cpu_T[1], offset);
2c0262af
FB
6436 }
6437 goto do_lcall;
ecada8a2 6438 case 0xe9: /* jmp im */
ab4e4aec 6439 if (dflag != MO_16) {
4ba9938c 6440 tval = (int32_t)insn_get(env, s, MO_32);
ab4e4aec 6441 } else {
4ba9938c 6442 tval = (int16_t)insn_get(env, s, MO_16);
ab4e4aec 6443 }
14ce26e7 6444 tval += s->pc - s->cs_base;
ab4e4aec 6445 if (dflag == MO_16) {
14ce26e7 6446 tval &= 0xffff;
ab4e4aec 6447 } else if (!CODE64(s)) {
32938e12 6448 tval &= 0xffffffff;
ab4e4aec 6449 }
14ce26e7 6450 gen_jmp(s, tval);
2c0262af
FB
6451 break;
6452 case 0xea: /* ljmp im */
6453 {
6454 unsigned int selector, offset;
6455
14ce26e7
FB
6456 if (CODE64(s))
6457 goto illegal_op;
ab4e4aec 6458 ot = dflag;
0af10c86 6459 offset = insn_get(env, s, ot);
4ba9938c 6460 selector = insn_get(env, s, MO_16);
3b46e624 6461
1b90d56e 6462 tcg_gen_movi_tl(cpu_T[0], selector);
0ae657b1 6463 tcg_gen_movi_tl(cpu_T[1], offset);
2c0262af
FB
6464 }
6465 goto do_ljmp;
6466 case 0xeb: /* jmp Jb */
4ba9938c 6467 tval = (int8_t)insn_get(env, s, MO_8);
14ce26e7 6468 tval += s->pc - s->cs_base;
ab4e4aec 6469 if (dflag == MO_16) {
14ce26e7 6470 tval &= 0xffff;
ab4e4aec 6471 }
14ce26e7 6472 gen_jmp(s, tval);
2c0262af
FB
6473 break;
6474 case 0x70 ... 0x7f: /* jcc Jb */
4ba9938c 6475 tval = (int8_t)insn_get(env, s, MO_8);
2c0262af
FB
6476 goto do_jcc;
6477 case 0x180 ... 0x18f: /* jcc Jv */
ab4e4aec 6478 if (dflag != MO_16) {
4ba9938c 6479 tval = (int32_t)insn_get(env, s, MO_32);
2c0262af 6480 } else {
4ba9938c 6481 tval = (int16_t)insn_get(env, s, MO_16);
2c0262af
FB
6482 }
6483 do_jcc:
6484 next_eip = s->pc - s->cs_base;
14ce26e7 6485 tval += next_eip;
ab4e4aec 6486 if (dflag == MO_16) {
14ce26e7 6487 tval &= 0xffff;
ab4e4aec 6488 }
14ce26e7 6489 gen_jcc(s, b, tval, next_eip);
2c0262af
FB
6490 break;
6491
6492 case 0x190 ... 0x19f: /* setcc Gv */
0af10c86 6493 modrm = cpu_ldub_code(env, s->pc++);
cc8b6f5b 6494 gen_setcc1(s, b, cpu_T[0]);
4ba9938c 6495 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
2c0262af
FB
6496 break;
6497 case 0x140 ... 0x14f: /* cmov Gv, Ev */
bff93281
PM
6498 if (!(s->cpuid_features & CPUID_CMOV)) {
6499 goto illegal_op;
6500 }
ab4e4aec 6501 ot = dflag;
f32d3781
PB
6502 modrm = cpu_ldub_code(env, s->pc++);
6503 reg = ((modrm >> 3) & 7) | rex_r;
6504 gen_cmovcc1(env, s, ot, b, modrm, reg);
2c0262af 6505 break;
3b46e624 6506
2c0262af
FB
6507 /************************/
6508 /* flags */
6509 case 0x9c: /* pushf */
872929aa 6510 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
2c0262af
FB
6511 if (s->vm86 && s->iopl != 3) {
6512 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6513 } else {
773cdfcc 6514 gen_update_cc_op(s);
f0967a1a 6515 gen_helper_read_eflags(cpu_T[0], cpu_env);
432baffe 6516 gen_push_v(s, cpu_T[0]);
2c0262af
FB
6517 }
6518 break;
6519 case 0x9d: /* popf */
872929aa 6520 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
2c0262af
FB
6521 if (s->vm86 && s->iopl != 3) {
6522 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6523 } else {
8e31d234 6524 ot = gen_pop_T0(s);
2c0262af 6525 if (s->cpl == 0) {
ab4e4aec 6526 if (dflag != MO_16) {
f0967a1a
BS
6527 gen_helper_write_eflags(cpu_env, cpu_T[0],
6528 tcg_const_i32((TF_MASK | AC_MASK |
6529 ID_MASK | NT_MASK |
6530 IF_MASK |
6531 IOPL_MASK)));
2c0262af 6532 } else {
f0967a1a
BS
6533 gen_helper_write_eflags(cpu_env, cpu_T[0],
6534 tcg_const_i32((TF_MASK | AC_MASK |
6535 ID_MASK | NT_MASK |
6536 IF_MASK | IOPL_MASK)
6537 & 0xffff));
2c0262af
FB
6538 }
6539 } else {
4136f33c 6540 if (s->cpl <= s->iopl) {
ab4e4aec 6541 if (dflag != MO_16) {
f0967a1a
BS
6542 gen_helper_write_eflags(cpu_env, cpu_T[0],
6543 tcg_const_i32((TF_MASK |
6544 AC_MASK |
6545 ID_MASK |
6546 NT_MASK |
6547 IF_MASK)));
4136f33c 6548 } else {
f0967a1a
BS
6549 gen_helper_write_eflags(cpu_env, cpu_T[0],
6550 tcg_const_i32((TF_MASK |
6551 AC_MASK |
6552 ID_MASK |
6553 NT_MASK |
6554 IF_MASK)
6555 & 0xffff));
4136f33c 6556 }
2c0262af 6557 } else {
ab4e4aec 6558 if (dflag != MO_16) {
f0967a1a
BS
6559 gen_helper_write_eflags(cpu_env, cpu_T[0],
6560 tcg_const_i32((TF_MASK | AC_MASK |
6561 ID_MASK | NT_MASK)));
4136f33c 6562 } else {
f0967a1a
BS
6563 gen_helper_write_eflags(cpu_env, cpu_T[0],
6564 tcg_const_i32((TF_MASK | AC_MASK |
6565 ID_MASK | NT_MASK)
6566 & 0xffff));
4136f33c 6567 }
2c0262af
FB
6568 }
6569 }
8e31d234 6570 gen_pop_update(s, ot);
3ca51d07 6571 set_cc_op(s, CC_OP_EFLAGS);
a9321a4d 6572 /* abort translation because TF/AC flag may change */
14ce26e7 6573 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
6574 gen_eob(s);
6575 }
6576 break;
6577 case 0x9e: /* sahf */
12e26b75 6578 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 6579 goto illegal_op;
c56baccf 6580 gen_op_mov_v_reg(MO_8, cpu_T[0], R_AH);
d229edce 6581 gen_compute_eflags(s);
bd7a7b33
FB
6582 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6583 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6584 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
2c0262af
FB
6585 break;
6586 case 0x9f: /* lahf */
12e26b75 6587 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
14ce26e7 6588 goto illegal_op;
d229edce 6589 gen_compute_eflags(s);
bd7a7b33 6590 /* Note: gen_compute_eflags() only gives the condition codes */
d229edce 6591 tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
480a762d 6592 gen_op_mov_reg_v(MO_8, R_AH, cpu_T[0]);
2c0262af
FB
6593 break;
6594 case 0xf5: /* cmc */
d229edce 6595 gen_compute_eflags(s);
bd7a7b33 6596 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
6597 break;
6598 case 0xf8: /* clc */
d229edce 6599 gen_compute_eflags(s);
bd7a7b33 6600 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
2c0262af
FB
6601 break;
6602 case 0xf9: /* stc */
d229edce 6603 gen_compute_eflags(s);
bd7a7b33 6604 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
2c0262af
FB
6605 break;
6606 case 0xfc: /* cld */
b6abf97d 6607 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
317ac620 6608 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
2c0262af
FB
6609 break;
6610 case 0xfd: /* std */
b6abf97d 6611 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
317ac620 6612 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
2c0262af
FB
6613 break;
6614
6615 /************************/
6616 /* bit operations */
6617 case 0x1ba: /* bt/bts/btr/btc Gv, im */
ab4e4aec 6618 ot = dflag;
0af10c86 6619 modrm = cpu_ldub_code(env, s->pc++);
33698e5f 6620 op = (modrm >> 3) & 7;
2c0262af 6621 mod = (modrm >> 6) & 3;
14ce26e7 6622 rm = (modrm & 7) | REX_B(s);
2c0262af 6623 if (mod != 3) {
14ce26e7 6624 s->rip_offset = 1;
4eeb3939 6625 gen_lea_modrm(env, s, modrm);
909be183 6626 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 6627 } else {
c56baccf 6628 gen_op_mov_v_reg(ot, cpu_T[0], rm);
2c0262af
FB
6629 }
6630 /* load shift */
0af10c86 6631 val = cpu_ldub_code(env, s->pc++);
0ae657b1 6632 tcg_gen_movi_tl(cpu_T[1], val);
2c0262af
FB
6633 if (op < 4)
6634 goto illegal_op;
6635 op -= 4;
f484d386 6636 goto bt_op;
2c0262af
FB
6637 case 0x1a3: /* bt Gv, Ev */
6638 op = 0;
6639 goto do_btx;
6640 case 0x1ab: /* bts */
6641 op = 1;
6642 goto do_btx;
6643 case 0x1b3: /* btr */
6644 op = 2;
6645 goto do_btx;
6646 case 0x1bb: /* btc */
6647 op = 3;
6648 do_btx:
ab4e4aec 6649 ot = dflag;
0af10c86 6650 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7 6651 reg = ((modrm >> 3) & 7) | rex_r;
2c0262af 6652 mod = (modrm >> 6) & 3;
14ce26e7 6653 rm = (modrm & 7) | REX_B(s);
c56baccf 6654 gen_op_mov_v_reg(MO_32, cpu_T[1], reg);
2c0262af 6655 if (mod != 3) {
4eeb3939 6656 gen_lea_modrm(env, s, modrm);
2c0262af 6657 /* specific case: we need to add a displacement */
f484d386
FB
6658 gen_exts(ot, cpu_T[1]);
6659 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6660 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6661 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
909be183 6662 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2c0262af 6663 } else {
c56baccf 6664 gen_op_mov_v_reg(ot, cpu_T[0], rm);
2c0262af 6665 }
f484d386
FB
6666 bt_op:
6667 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
dc1823ce 6668 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
f484d386
FB
6669 switch(op) {
6670 case 0:
f484d386
FB
6671 break;
6672 case 1:
f484d386
FB
6673 tcg_gen_movi_tl(cpu_tmp0, 1);
6674 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6675 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6676 break;
6677 case 2:
f484d386
FB
6678 tcg_gen_movi_tl(cpu_tmp0, 1);
6679 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
dc1823ce 6680 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
f484d386
FB
6681 break;
6682 default:
6683 case 3:
f484d386
FB
6684 tcg_gen_movi_tl(cpu_tmp0, 1);
6685 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6686 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6687 break;
6688 }
2c0262af 6689 if (op != 0) {
fd8ca9f6
RH
6690 if (mod != 3) {
6691 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6692 } else {
480a762d 6693 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
fd8ca9f6 6694 }
dc1823ce
RH
6695 }
6696
6697 /* Delay all CC updates until after the store above. Note that
6698 C is the result of the test, Z is unchanged, and the others
6699 are all undefined. */
6700 switch (s->cc_op) {
6701 case CC_OP_MULB ... CC_OP_MULQ:
6702 case CC_OP_ADDB ... CC_OP_ADDQ:
6703 case CC_OP_ADCB ... CC_OP_ADCQ:
6704 case CC_OP_SUBB ... CC_OP_SUBQ:
6705 case CC_OP_SBBB ... CC_OP_SBBQ:
6706 case CC_OP_LOGICB ... CC_OP_LOGICQ:
6707 case CC_OP_INCB ... CC_OP_INCQ:
6708 case CC_OP_DECB ... CC_OP_DECQ:
6709 case CC_OP_SHLB ... CC_OP_SHLQ:
6710 case CC_OP_SARB ... CC_OP_SARQ:
6711 case CC_OP_BMILGB ... CC_OP_BMILGQ:
6712 /* Z was going to be computed from the non-zero status of CC_DST.
6713 We can get that same Z value (and the new C value) by leaving
6714 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6715 same width. */
f484d386 6716 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
dc1823ce
RH
6717 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
6718 break;
6719 default:
6720 /* Otherwise, generate EFLAGS and replace the C bit. */
6721 gen_compute_eflags(s);
6722 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4,
6723 ctz32(CC_C), 1);
6724 break;
2c0262af
FB
6725 }
6726 break;
321c5351
RH
6727 case 0x1bc: /* bsf / tzcnt */
6728 case 0x1bd: /* bsr / lzcnt */
ab4e4aec 6729 ot = dflag;
321c5351
RH
6730 modrm = cpu_ldub_code(env, s->pc++);
6731 reg = ((modrm >> 3) & 7) | rex_r;
6732 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6733 gen_extu(ot, cpu_T[0]);
6734
6735 /* Note that lzcnt and tzcnt are in different extensions. */
6736 if ((prefixes & PREFIX_REPZ)
6737 && (b & 1
6738 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
6739 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
6740 int size = 8 << ot;
6741 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
6742 if (b & 1) {
6743 /* For lzcnt, reduce the target_ulong result by the
6744 number of zeros that we expect to find at the top. */
6745 gen_helper_clz(cpu_T[0], cpu_T[0]);
6746 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - size);
6191b059 6747 } else {
321c5351
RH
6748 /* For tzcnt, a zero input must return the operand size:
6749 force all bits outside the operand size to 1. */
6750 target_ulong mask = (target_ulong)-2 << (size - 1);
6751 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], mask);
6752 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6191b059 6753 }
321c5351
RH
6754 /* For lzcnt/tzcnt, C and Z bits are defined and are
6755 related to the result. */
6756 gen_op_update1_cc();
6757 set_cc_op(s, CC_OP_BMILGB + ot);
6758 } else {
6759 /* For bsr/bsf, only the Z bit is defined and it is related
6760 to the input and not the result. */
6761 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
6762 set_cc_op(s, CC_OP_LOGICB + ot);
6763 if (b & 1) {
6764 /* For bsr, return the bit index of the first 1 bit,
6765 not the count of leading zeros. */
6766 gen_helper_clz(cpu_T[0], cpu_T[0]);
6767 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - 1);
6768 } else {
6769 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6770 }
6771 /* ??? The manual says that the output is undefined when the
6772 input is zero, but real hardware leaves it unchanged, and
6773 real programs appear to depend on that. */
6774 tcg_gen_movi_tl(cpu_tmp0, 0);
6775 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0,
6776 cpu_regs[reg], cpu_T[0]);
6191b059 6777 }
480a762d 6778 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2c0262af
FB
6779 break;
6780 /************************/
6781 /* bcd */
6782 case 0x27: /* daa */
14ce26e7
FB
6783 if (CODE64(s))
6784 goto illegal_op;
773cdfcc 6785 gen_update_cc_op(s);
7923057b 6786 gen_helper_daa(cpu_env);
3ca51d07 6787 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6788 break;
6789 case 0x2f: /* das */
14ce26e7
FB
6790 if (CODE64(s))
6791 goto illegal_op;
773cdfcc 6792 gen_update_cc_op(s);
7923057b 6793 gen_helper_das(cpu_env);
3ca51d07 6794 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6795 break;
6796 case 0x37: /* aaa */
14ce26e7
FB
6797 if (CODE64(s))
6798 goto illegal_op;
773cdfcc 6799 gen_update_cc_op(s);
7923057b 6800 gen_helper_aaa(cpu_env);
3ca51d07 6801 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6802 break;
6803 case 0x3f: /* aas */
14ce26e7
FB
6804 if (CODE64(s))
6805 goto illegal_op;
773cdfcc 6806 gen_update_cc_op(s);
7923057b 6807 gen_helper_aas(cpu_env);
3ca51d07 6808 set_cc_op(s, CC_OP_EFLAGS);
2c0262af
FB
6809 break;
6810 case 0xd4: /* aam */
14ce26e7
FB
6811 if (CODE64(s))
6812 goto illegal_op;
0af10c86 6813 val = cpu_ldub_code(env, s->pc++);
b6d7c3db
TS
6814 if (val == 0) {
6815 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6816 } else {
7923057b 6817 gen_helper_aam(cpu_env, tcg_const_i32(val));
3ca51d07 6818 set_cc_op(s, CC_OP_LOGICB);
b6d7c3db 6819 }
2c0262af
FB
6820 break;
6821 case 0xd5: /* aad */
14ce26e7
FB
6822 if (CODE64(s))
6823 goto illegal_op;
0af10c86 6824 val = cpu_ldub_code(env, s->pc++);
7923057b 6825 gen_helper_aad(cpu_env, tcg_const_i32(val));
3ca51d07 6826 set_cc_op(s, CC_OP_LOGICB);
2c0262af
FB
6827 break;
6828 /************************/
6829 /* misc */
6830 case 0x90: /* nop */
ab1f142b 6831 /* XXX: correct lock test for all insn */
7418027e 6832 if (prefixes & PREFIX_LOCK) {
ab1f142b 6833 goto illegal_op;
7418027e
RH
6834 }
6835 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6836 if (REX_B(s)) {
6837 goto do_xchg_reg_eax;
6838 }
0573fbfc 6839 if (prefixes & PREFIX_REPZ) {
81f3053b
PB
6840 gen_update_cc_op(s);
6841 gen_jmp_im(pc_start - s->cs_base);
6842 gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
6843 s->is_jmp = DISAS_TB_JUMP;
0573fbfc 6844 }
2c0262af
FB
6845 break;
6846 case 0x9b: /* fwait */
5fafdf24 6847 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
7eee2a50
FB
6848 (HF_MP_MASK | HF_TS_MASK)) {
6849 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
2ee73ac3 6850 } else {
d3eb5eae 6851 gen_helper_fwait(cpu_env);
7eee2a50 6852 }
2c0262af
FB
6853 break;
6854 case 0xcc: /* int3 */
6855 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6856 break;
6857 case 0xcd: /* int N */
0af10c86 6858 val = cpu_ldub_code(env, s->pc++);
f115e911 6859 if (s->vm86 && s->iopl != 3) {
5fafdf24 6860 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
f115e911
FB
6861 } else {
6862 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6863 }
2c0262af
FB
6864 break;
6865 case 0xce: /* into */
14ce26e7
FB
6866 if (CODE64(s))
6867 goto illegal_op;
773cdfcc 6868 gen_update_cc_op(s);
a8ede8ba 6869 gen_jmp_im(pc_start - s->cs_base);
4a7443be 6870 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
2c0262af 6871 break;
0b97134b 6872#ifdef WANT_ICEBP
2c0262af 6873 case 0xf1: /* icebp (undocumented, exits to external debugger) */
872929aa 6874 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
aba9d61e 6875#if 1
2c0262af 6876 gen_debug(s, pc_start - s->cs_base);
aba9d61e
FB
6877#else
6878 /* start debug */
bbd77c18 6879 tb_flush(CPU(x86_env_get_cpu(env)));
24537a01 6880 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
aba9d61e 6881#endif
2c0262af 6882 break;
0b97134b 6883#endif
2c0262af
FB
6884 case 0xfa: /* cli */
6885 if (!s->vm86) {
6886 if (s->cpl <= s->iopl) {
f0967a1a 6887 gen_helper_cli(cpu_env);
2c0262af
FB
6888 } else {
6889 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6890 }
6891 } else {
6892 if (s->iopl == 3) {
f0967a1a 6893 gen_helper_cli(cpu_env);
2c0262af
FB
6894 } else {
6895 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6896 }
6897 }
6898 break;
6899 case 0xfb: /* sti */
6900 if (!s->vm86) {
6901 if (s->cpl <= s->iopl) {
6902 gen_sti:
f0967a1a 6903 gen_helper_sti(cpu_env);
2c0262af 6904 /* interruptions are enabled only the first insn after sti */
a2cc3b24
FB
6905 /* If several instructions disable interrupts, only the
6906 _first_ does it */
6907 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
f0967a1a 6908 gen_helper_set_inhibit_irq(cpu_env);
2c0262af 6909 /* give a chance to handle pending irqs */
14ce26e7 6910 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
6911 gen_eob(s);
6912 } else {
6913 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6914 }
6915 } else {
6916 if (s->iopl == 3) {
6917 goto gen_sti;
6918 } else {
6919 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6920 }
6921 }
6922 break;
6923 case 0x62: /* bound */
14ce26e7
FB
6924 if (CODE64(s))
6925 goto illegal_op;
ab4e4aec 6926 ot = dflag;
0af10c86 6927 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
6928 reg = (modrm >> 3) & 7;
6929 mod = (modrm >> 6) & 3;
6930 if (mod == 3)
6931 goto illegal_op;
c56baccf 6932 gen_op_mov_v_reg(ot, cpu_T[0], reg);
4eeb3939 6933 gen_lea_modrm(env, s, modrm);
b6abf97d 6934 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4ba9938c 6935 if (ot == MO_16) {
92fc4b58
BS
6936 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
6937 } else {
6938 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
6939 }
2c0262af
FB
6940 break;
6941 case 0x1c8 ... 0x1cf: /* bswap reg */
14ce26e7
FB
6942 reg = (b & 7) | REX_B(s);
6943#ifdef TARGET_X86_64
ab4e4aec 6944 if (dflag == MO_64) {
c56baccf 6945 gen_op_mov_v_reg(MO_64, cpu_T[0], reg);
66896cb8 6946 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
480a762d 6947 gen_op_mov_reg_v(MO_64, reg, cpu_T[0]);
5fafdf24 6948 } else
8777643e 6949#endif
57fec1fe 6950 {
c56baccf 6951 gen_op_mov_v_reg(MO_32, cpu_T[0], reg);
8777643e
AJ
6952 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
6953 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
480a762d 6954 gen_op_mov_reg_v(MO_32, reg, cpu_T[0]);
14ce26e7 6955 }
2c0262af
FB
6956 break;
6957 case 0xd6: /* salc */
14ce26e7
FB
6958 if (CODE64(s))
6959 goto illegal_op;
cc8b6f5b 6960 gen_compute_eflags_c(s, cpu_T[0]);
bd7a7b33 6961 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
480a762d 6962 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T[0]);
2c0262af
FB
6963 break;
6964 case 0xe0: /* loopnz */
6965 case 0xe1: /* loopz */
2c0262af
FB
6966 case 0xe2: /* loop */
6967 case 0xe3: /* jecxz */
14ce26e7 6968 {
42a268c2 6969 TCGLabel *l1, *l2, *l3;
14ce26e7 6970
4ba9938c 6971 tval = (int8_t)insn_get(env, s, MO_8);
14ce26e7
FB
6972 next_eip = s->pc - s->cs_base;
6973 tval += next_eip;
ab4e4aec 6974 if (dflag == MO_16) {
14ce26e7 6975 tval &= 0xffff;
ab4e4aec 6976 }
3b46e624 6977
14ce26e7
FB
6978 l1 = gen_new_label();
6979 l2 = gen_new_label();
6e0d8677 6980 l3 = gen_new_label();
14ce26e7 6981 b &= 3;
6e0d8677
FB
6982 switch(b) {
6983 case 0: /* loopnz */
6984 case 1: /* loopz */
1d71ddb1
RH
6985 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6986 gen_op_jz_ecx(s->aflag, l3);
5bdb91b0 6987 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
6e0d8677
FB
6988 break;
6989 case 2: /* loop */
1d71ddb1
RH
6990 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6991 gen_op_jnz_ecx(s->aflag, l1);
6e0d8677
FB
6992 break;
6993 default:
6994 case 3: /* jcxz */
1d71ddb1 6995 gen_op_jz_ecx(s->aflag, l1);
6e0d8677 6996 break;
14ce26e7
FB
6997 }
6998
6e0d8677 6999 gen_set_label(l3);
14ce26e7 7000 gen_jmp_im(next_eip);
8e1c85e3 7001 tcg_gen_br(l2);
6e0d8677 7002
14ce26e7
FB
7003 gen_set_label(l1);
7004 gen_jmp_im(tval);
7005 gen_set_label(l2);
7006 gen_eob(s);
7007 }
2c0262af
FB
7008 break;
7009 case 0x130: /* wrmsr */
7010 case 0x132: /* rdmsr */
7011 if (s->cpl != 0) {
7012 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7013 } else {
773cdfcc 7014 gen_update_cc_op(s);
872929aa 7015 gen_jmp_im(pc_start - s->cs_base);
0573fbfc 7016 if (b & 2) {
4a7443be 7017 gen_helper_rdmsr(cpu_env);
0573fbfc 7018 } else {
4a7443be 7019 gen_helper_wrmsr(cpu_env);
0573fbfc 7020 }
2c0262af
FB
7021 }
7022 break;
7023 case 0x131: /* rdtsc */
773cdfcc 7024 gen_update_cc_op(s);
ecada8a2 7025 gen_jmp_im(pc_start - s->cs_base);
bd79255d 7026 if (s->tb->cflags & CF_USE_ICOUNT) {
efade670 7027 gen_io_start();
bd79255d 7028 }
4a7443be 7029 gen_helper_rdtsc(cpu_env);
bd79255d 7030 if (s->tb->cflags & CF_USE_ICOUNT) {
efade670
PB
7031 gen_io_end();
7032 gen_jmp(s, s->pc - s->cs_base);
7033 }
2c0262af 7034 break;
df01e0fc 7035 case 0x133: /* rdpmc */
773cdfcc 7036 gen_update_cc_op(s);
df01e0fc 7037 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7038 gen_helper_rdpmc(cpu_env);
df01e0fc 7039 break;
023fe10d 7040 case 0x134: /* sysenter */
2436b61a 7041 /* For Intel SYSENTER is valid on 64-bit */
0af10c86 7042 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
14ce26e7 7043 goto illegal_op;
023fe10d
FB
7044 if (!s->pe) {
7045 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7046 } else {
2999a0b2 7047 gen_helper_sysenter(cpu_env);
023fe10d
FB
7048 gen_eob(s);
7049 }
7050 break;
7051 case 0x135: /* sysexit */
2436b61a 7052 /* For Intel SYSEXIT is valid on 64-bit */
0af10c86 7053 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
14ce26e7 7054 goto illegal_op;
023fe10d
FB
7055 if (!s->pe) {
7056 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7057 } else {
ab4e4aec 7058 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
023fe10d
FB
7059 gen_eob(s);
7060 }
7061 break;
14ce26e7
FB
7062#ifdef TARGET_X86_64
7063 case 0x105: /* syscall */
7064 /* XXX: is it usable in real mode ? */
728d803b 7065 gen_update_cc_op(s);
14ce26e7 7066 gen_jmp_im(pc_start - s->cs_base);
2999a0b2 7067 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
14ce26e7
FB
7068 gen_eob(s);
7069 break;
7070 case 0x107: /* sysret */
7071 if (!s->pe) {
7072 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7073 } else {
ab4e4aec 7074 gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
aba9d61e 7075 /* condition codes are modified only in long mode */
3ca51d07
RH
7076 if (s->lma) {
7077 set_cc_op(s, CC_OP_EFLAGS);
7078 }
14ce26e7
FB
7079 gen_eob(s);
7080 }
7081 break;
7082#endif
2c0262af 7083 case 0x1a2: /* cpuid */
773cdfcc 7084 gen_update_cc_op(s);
9575cb94 7085 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7086 gen_helper_cpuid(cpu_env);
2c0262af
FB
7087 break;
7088 case 0xf4: /* hlt */
7089 if (s->cpl != 0) {
7090 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7091 } else {
773cdfcc 7092 gen_update_cc_op(s);
94451178 7093 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7094 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
5779406a 7095 s->is_jmp = DISAS_TB_JUMP;
2c0262af
FB
7096 }
7097 break;
7098 case 0x100:
0af10c86 7099 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7100 mod = (modrm >> 6) & 3;
7101 op = (modrm >> 3) & 7;
7102 switch(op) {
7103 case 0: /* sldt */
f115e911
FB
7104 if (!s->pe || s->vm86)
7105 goto illegal_op;
872929aa 7106 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
651ba608 7107 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
ab4e4aec 7108 ot = mod == 3 ? dflag : MO_16;
0af10c86 7109 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
7110 break;
7111 case 2: /* lldt */
f115e911
FB
7112 if (!s->pe || s->vm86)
7113 goto illegal_op;
2c0262af
FB
7114 if (s->cpl != 0) {
7115 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7116 } else {
872929aa 7117 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
4ba9938c 7118 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
b6abf97d 7119 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 7120 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
2c0262af
FB
7121 }
7122 break;
7123 case 1: /* str */
f115e911
FB
7124 if (!s->pe || s->vm86)
7125 goto illegal_op;
872929aa 7126 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
651ba608 7127 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
ab4e4aec 7128 ot = mod == 3 ? dflag : MO_16;
0af10c86 7129 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
2c0262af
FB
7130 break;
7131 case 3: /* ltr */
f115e911
FB
7132 if (!s->pe || s->vm86)
7133 goto illegal_op;
2c0262af
FB
7134 if (s->cpl != 0) {
7135 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7136 } else {
872929aa 7137 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
4ba9938c 7138 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
b6abf97d 7139 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2999a0b2 7140 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
2c0262af
FB
7141 }
7142 break;
7143 case 4: /* verr */
7144 case 5: /* verw */
f115e911
FB
7145 if (!s->pe || s->vm86)
7146 goto illegal_op;
4ba9938c 7147 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
773cdfcc 7148 gen_update_cc_op(s);
2999a0b2
BS
7149 if (op == 4) {
7150 gen_helper_verr(cpu_env, cpu_T[0]);
7151 } else {
7152 gen_helper_verw(cpu_env, cpu_T[0]);
7153 }
3ca51d07 7154 set_cc_op(s, CC_OP_EFLAGS);
f115e911 7155 break;
2c0262af
FB
7156 default:
7157 goto illegal_op;
7158 }
7159 break;
7160 case 0x101:
0af10c86 7161 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7162 mod = (modrm >> 6) & 3;
7163 op = (modrm >> 3) & 7;
3d7374c5 7164 rm = modrm & 7;
2c0262af
FB
7165 switch(op) {
7166 case 0: /* sgdt */
2c0262af
FB
7167 if (mod == 3)
7168 goto illegal_op;
872929aa 7169 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
4eeb3939 7170 gen_lea_modrm(env, s, modrm);
651ba608 7171 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
fd8ca9f6 7172 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
aba9d61e 7173 gen_add_A0_im(s, 2);
651ba608 7174 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
ab4e4aec 7175 if (dflag == MO_16) {
f0706f0c
RH
7176 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7177 }
fd8ca9f6 7178 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
2c0262af 7179 break;
3d7374c5
FB
7180 case 1:
7181 if (mod == 3) {
7182 switch (rm) {
7183 case 0: /* monitor */
7184 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7185 s->cpl != 0)
7186 goto illegal_op;
773cdfcc 7187 gen_update_cc_op(s);
3d7374c5 7188 gen_jmp_im(pc_start - s->cs_base);
1d71ddb1
RH
7189 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
7190 gen_extu(s->aflag, cpu_A0);
3d7374c5 7191 gen_add_A0_ds_seg(s);
4a7443be 7192 gen_helper_monitor(cpu_env, cpu_A0);
3d7374c5
FB
7193 break;
7194 case 1: /* mwait */
7195 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7196 s->cpl != 0)
7197 goto illegal_op;
728d803b 7198 gen_update_cc_op(s);
94451178 7199 gen_jmp_im(pc_start - s->cs_base);
4a7443be 7200 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
3d7374c5
FB
7201 gen_eob(s);
7202 break;
a9321a4d
PA
7203 case 2: /* clac */
7204 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7205 s->cpl != 0) {
7206 goto illegal_op;
7207 }
7208 gen_helper_clac(cpu_env);
7209 gen_jmp_im(s->pc - s->cs_base);
7210 gen_eob(s);
7211 break;
7212 case 3: /* stac */
7213 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7214 s->cpl != 0) {
7215 goto illegal_op;
7216 }
7217 gen_helper_stac(cpu_env);
7218 gen_jmp_im(s->pc - s->cs_base);
7219 gen_eob(s);
7220 break;
3d7374c5
FB
7221 default:
7222 goto illegal_op;
7223 }
7224 } else { /* sidt */
872929aa 7225 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
4eeb3939 7226 gen_lea_modrm(env, s, modrm);
651ba608 7227 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
fd8ca9f6 7228 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
3d7374c5 7229 gen_add_A0_im(s, 2);
651ba608 7230 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
ab4e4aec 7231 if (dflag == MO_16) {
f0706f0c
RH
7232 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7233 }
fd8ca9f6 7234 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
3d7374c5
FB
7235 }
7236 break;
2c0262af
FB
7237 case 2: /* lgdt */
7238 case 3: /* lidt */
0573fbfc 7239 if (mod == 3) {
773cdfcc 7240 gen_update_cc_op(s);
872929aa 7241 gen_jmp_im(pc_start - s->cs_base);
0573fbfc
TS
7242 switch(rm) {
7243 case 0: /* VMRUN */
872929aa
FB
7244 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7245 goto illegal_op;
7246 if (s->cpl != 0) {
7247 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
0573fbfc 7248 break;
872929aa 7249 } else {
1d71ddb1 7250 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
a7812ae4 7251 tcg_const_i32(s->pc - pc_start));
db620f46 7252 tcg_gen_exit_tb(0);
5779406a 7253 s->is_jmp = DISAS_TB_JUMP;
872929aa 7254 }
0573fbfc
TS
7255 break;
7256 case 1: /* VMMCALL */
872929aa
FB
7257 if (!(s->flags & HF_SVME_MASK))
7258 goto illegal_op;
052e80d5 7259 gen_helper_vmmcall(cpu_env);
0573fbfc
TS
7260 break;
7261 case 2: /* VMLOAD */
872929aa
FB
7262 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7263 goto illegal_op;
7264 if (s->cpl != 0) {
7265 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7266 break;
7267 } else {
1d71ddb1 7268 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
872929aa 7269 }
0573fbfc
TS
7270 break;
7271 case 3: /* VMSAVE */
872929aa
FB
7272 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7273 goto illegal_op;
7274 if (s->cpl != 0) {
7275 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7276 break;
7277 } else {
1d71ddb1 7278 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
872929aa 7279 }
0573fbfc
TS
7280 break;
7281 case 4: /* STGI */
872929aa
FB
7282 if ((!(s->flags & HF_SVME_MASK) &&
7283 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7284 !s->pe)
7285 goto illegal_op;
7286 if (s->cpl != 0) {
7287 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7288 break;
7289 } else {
052e80d5 7290 gen_helper_stgi(cpu_env);
872929aa 7291 }
0573fbfc
TS
7292 break;
7293 case 5: /* CLGI */
872929aa
FB
7294 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7295 goto illegal_op;
7296 if (s->cpl != 0) {
7297 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7298 break;
7299 } else {
052e80d5 7300 gen_helper_clgi(cpu_env);
872929aa 7301 }
0573fbfc
TS
7302 break;
7303 case 6: /* SKINIT */
872929aa
FB
7304 if ((!(s->flags & HF_SVME_MASK) &&
7305 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7306 !s->pe)
7307 goto illegal_op;
052e80d5 7308 gen_helper_skinit(cpu_env);
0573fbfc
TS
7309 break;
7310 case 7: /* INVLPGA */
872929aa
FB
7311 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7312 goto illegal_op;
7313 if (s->cpl != 0) {
7314 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7315 break;
7316 } else {
1d71ddb1
RH
7317 gen_helper_invlpga(cpu_env,
7318 tcg_const_i32(s->aflag - 1));
872929aa 7319 }
0573fbfc
TS
7320 break;
7321 default:
7322 goto illegal_op;
7323 }
7324 } else if (s->cpl != 0) {
2c0262af
FB
7325 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7326 } else {
872929aa
FB
7327 gen_svm_check_intercept(s, pc_start,
7328 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
4eeb3939 7329 gen_lea_modrm(env, s, modrm);
0f712e10 7330 gen_op_ld_v(s, MO_16, cpu_T[1], cpu_A0);
aba9d61e 7331 gen_add_A0_im(s, 2);
909be183 7332 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
ab4e4aec 7333 if (dflag == MO_16) {
f0706f0c
RH
7334 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7335 }
2c0262af 7336 if (op == 2) {
651ba608
FB
7337 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7338 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
2c0262af 7339 } else {
651ba608
FB
7340 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7341 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
2c0262af
FB
7342 }
7343 }
7344 break;
7345 case 4: /* smsw */
872929aa 7346 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
e2542fe2 7347#if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
f60d2728 7348 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7349#else
651ba608 7350 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
f60d2728 7351#endif
4ba9938c 7352 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1);
2c0262af
FB
7353 break;
7354 case 6: /* lmsw */
7355 if (s->cpl != 0) {
7356 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7357 } else {
872929aa 7358 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
4ba9938c 7359 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
4a7443be 7360 gen_helper_lmsw(cpu_env, cpu_T[0]);
14ce26e7 7361 gen_jmp_im(s->pc - s->cs_base);
d71b9a8b 7362 gen_eob(s);
2c0262af
FB
7363 }
7364 break;
1b050077
AP
7365 case 7:
7366 if (mod != 3) { /* invlpg */
7367 if (s->cpl != 0) {
7368 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7369 } else {
773cdfcc 7370 gen_update_cc_op(s);
1b050077 7371 gen_jmp_im(pc_start - s->cs_base);
4eeb3939 7372 gen_lea_modrm(env, s, modrm);
4a7443be 7373 gen_helper_invlpg(cpu_env, cpu_A0);
1b050077
AP
7374 gen_jmp_im(s->pc - s->cs_base);
7375 gen_eob(s);
7376 }
2c0262af 7377 } else {
1b050077
AP
7378 switch (rm) {
7379 case 0: /* swapgs */
14ce26e7 7380#ifdef TARGET_X86_64
1b050077
AP
7381 if (CODE64(s)) {
7382 if (s->cpl != 0) {
7383 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7384 } else {
7385 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7386 offsetof(CPUX86State,segs[R_GS].base));
7387 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7388 offsetof(CPUX86State,kernelgsbase));
7389 tcg_gen_st_tl(cpu_T[1], cpu_env,
7390 offsetof(CPUX86State,segs[R_GS].base));
7391 tcg_gen_st_tl(cpu_T[0], cpu_env,
7392 offsetof(CPUX86State,kernelgsbase));
7393 }
5fafdf24 7394 } else
14ce26e7
FB
7395#endif
7396 {
7397 goto illegal_op;
7398 }
1b050077
AP
7399 break;
7400 case 1: /* rdtscp */
7401 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7402 goto illegal_op;
773cdfcc 7403 gen_update_cc_op(s);
9575cb94 7404 gen_jmp_im(pc_start - s->cs_base);
bd79255d 7405 if (s->tb->cflags & CF_USE_ICOUNT) {
1b050077 7406 gen_io_start();
bd79255d 7407 }
4a7443be 7408 gen_helper_rdtscp(cpu_env);
bd79255d 7409 if (s->tb->cflags & CF_USE_ICOUNT) {
1b050077
AP
7410 gen_io_end();
7411 gen_jmp(s, s->pc - s->cs_base);
7412 }
7413 break;
7414 default:
7415 goto illegal_op;
14ce26e7 7416 }
2c0262af
FB
7417 }
7418 break;
7419 default:
7420 goto illegal_op;
7421 }
7422 break;
3415a4dd
FB
7423 case 0x108: /* invd */
7424 case 0x109: /* wbinvd */
7425 if (s->cpl != 0) {
7426 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7427 } else {
872929aa 7428 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
3415a4dd
FB
7429 /* nothing to do */
7430 }
7431 break;
14ce26e7
FB
7432 case 0x63: /* arpl or movslS (x86_64) */
7433#ifdef TARGET_X86_64
7434 if (CODE64(s)) {
7435 int d_ot;
7436 /* d_ot is the size of destination */
ab4e4aec 7437 d_ot = dflag;
14ce26e7 7438
0af10c86 7439 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
7440 reg = ((modrm >> 3) & 7) | rex_r;
7441 mod = (modrm >> 6) & 3;
7442 rm = (modrm & 7) | REX_B(s);
3b46e624 7443
14ce26e7 7444 if (mod == 3) {
c56baccf 7445 gen_op_mov_v_reg(MO_32, cpu_T[0], rm);
14ce26e7 7446 /* sign extend */
4ba9938c 7447 if (d_ot == MO_64) {
e108dd01 7448 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4ba9938c 7449 }
480a762d 7450 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
14ce26e7 7451 } else {
4eeb3939 7452 gen_lea_modrm(env, s, modrm);
4b1fe067 7453 gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T[0], cpu_A0);
480a762d 7454 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
14ce26e7 7455 }
5fafdf24 7456 } else
14ce26e7
FB
7457#endif
7458 {
42a268c2 7459 TCGLabel *label1;
49d9fdcc 7460 TCGv t0, t1, t2, a0;
1e4840bf 7461
14ce26e7
FB
7462 if (!s->pe || s->vm86)
7463 goto illegal_op;
a7812ae4
PB
7464 t0 = tcg_temp_local_new();
7465 t1 = tcg_temp_local_new();
7466 t2 = tcg_temp_local_new();
4ba9938c 7467 ot = MO_16;
0af10c86 7468 modrm = cpu_ldub_code(env, s->pc++);
14ce26e7
FB
7469 reg = (modrm >> 3) & 7;
7470 mod = (modrm >> 6) & 3;
7471 rm = modrm & 7;
7472 if (mod != 3) {
4eeb3939 7473 gen_lea_modrm(env, s, modrm);
323d1876 7474 gen_op_ld_v(s, ot, t0, cpu_A0);
49d9fdcc
LD
7475 a0 = tcg_temp_local_new();
7476 tcg_gen_mov_tl(a0, cpu_A0);
14ce26e7 7477 } else {
1e4840bf 7478 gen_op_mov_v_reg(ot, t0, rm);
49d9fdcc 7479 TCGV_UNUSED(a0);
14ce26e7 7480 }
1e4840bf
FB
7481 gen_op_mov_v_reg(ot, t1, reg);
7482 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7483 tcg_gen_andi_tl(t1, t1, 3);
7484 tcg_gen_movi_tl(t2, 0);
3bd7da9e 7485 label1 = gen_new_label();
1e4840bf
FB
7486 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7487 tcg_gen_andi_tl(t0, t0, ~3);
7488 tcg_gen_or_tl(t0, t0, t1);
7489 tcg_gen_movi_tl(t2, CC_Z);
3bd7da9e 7490 gen_set_label(label1);
14ce26e7 7491 if (mod != 3) {
323d1876 7492 gen_op_st_v(s, ot, t0, a0);
49d9fdcc
LD
7493 tcg_temp_free(a0);
7494 } else {
1e4840bf 7495 gen_op_mov_reg_v(ot, rm, t0);
14ce26e7 7496 }
d229edce 7497 gen_compute_eflags(s);
3bd7da9e 7498 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
1e4840bf 7499 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
1e4840bf
FB
7500 tcg_temp_free(t0);
7501 tcg_temp_free(t1);
7502 tcg_temp_free(t2);
f115e911 7503 }
f115e911 7504 break;
2c0262af
FB
7505 case 0x102: /* lar */
7506 case 0x103: /* lsl */
cec6843e 7507 {
42a268c2 7508 TCGLabel *label1;
1e4840bf 7509 TCGv t0;
cec6843e
FB
7510 if (!s->pe || s->vm86)
7511 goto illegal_op;
ab4e4aec 7512 ot = dflag != MO_16 ? MO_32 : MO_16;
0af10c86 7513 modrm = cpu_ldub_code(env, s->pc++);
cec6843e 7514 reg = ((modrm >> 3) & 7) | rex_r;
4ba9938c 7515 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
a7812ae4 7516 t0 = tcg_temp_local_new();
773cdfcc 7517 gen_update_cc_op(s);
2999a0b2
BS
7518 if (b == 0x102) {
7519 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7520 } else {
7521 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7522 }
cec6843e
FB
7523 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7524 label1 = gen_new_label();
cb63669a 7525 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
1e4840bf 7526 gen_op_mov_reg_v(ot, reg, t0);
cec6843e 7527 gen_set_label(label1);
3ca51d07 7528 set_cc_op(s, CC_OP_EFLAGS);
1e4840bf 7529 tcg_temp_free(t0);
cec6843e 7530 }
2c0262af
FB
7531 break;
7532 case 0x118:
0af10c86 7533 modrm = cpu_ldub_code(env, s->pc++);
2c0262af
FB
7534 mod = (modrm >> 6) & 3;
7535 op = (modrm >> 3) & 7;
7536 switch(op) {
7537 case 0: /* prefetchnta */
7538 case 1: /* prefetchnt0 */
7539 case 2: /* prefetchnt0 */
7540 case 3: /* prefetchnt0 */
7541 if (mod == 3)
7542 goto illegal_op;
4eeb3939 7543 gen_lea_modrm(env, s, modrm);
2c0262af
FB
7544 /* nothing more to do */
7545 break;
e17a36ce 7546 default: /* nop (multi byte) */
0af10c86 7547 gen_nop_modrm(env, s, modrm);
e17a36ce 7548 break;
2c0262af
FB
7549 }
7550 break;
e17a36ce 7551 case 0x119 ... 0x11f: /* nop (multi byte) */
0af10c86
BS
7552 modrm = cpu_ldub_code(env, s->pc++);
7553 gen_nop_modrm(env, s, modrm);
e17a36ce 7554 break;
2c0262af
FB
7555 case 0x120: /* mov reg, crN */
7556 case 0x122: /* mov crN, reg */
7557 if (s->cpl != 0) {
7558 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7559 } else {
0af10c86 7560 modrm = cpu_ldub_code(env, s->pc++);
5c73b757
MO
7561 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7562 * AMD documentation (24594.pdf) and testing of
7563 * intel 386 and 486 processors all show that the mod bits
7564 * are assumed to be 1's, regardless of actual values.
7565 */
14ce26e7
FB
7566 rm = (modrm & 7) | REX_B(s);
7567 reg = ((modrm >> 3) & 7) | rex_r;
7568 if (CODE64(s))
4ba9938c 7569 ot = MO_64;
14ce26e7 7570 else
4ba9938c 7571 ot = MO_32;
ccd59d09
AP
7572 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7573 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7574 reg = 8;
7575 }
2c0262af
FB
7576 switch(reg) {
7577 case 0:
7578 case 2:
7579 case 3:
7580 case 4:
9230e66e 7581 case 8:
773cdfcc 7582 gen_update_cc_op(s);
872929aa 7583 gen_jmp_im(pc_start - s->cs_base);
2c0262af 7584 if (b & 2) {
c56baccf 7585 gen_op_mov_v_reg(ot, cpu_T[0], rm);
4a7443be
BS
7586 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7587 cpu_T[0]);
14ce26e7 7588 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7589 gen_eob(s);
7590 } else {
4a7443be 7591 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
480a762d 7592 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
2c0262af
FB
7593 }
7594 break;
7595 default:
7596 goto illegal_op;
7597 }
7598 }
7599 break;
7600 case 0x121: /* mov reg, drN */
7601 case 0x123: /* mov drN, reg */
7602 if (s->cpl != 0) {
7603 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7604 } else {
0af10c86 7605 modrm = cpu_ldub_code(env, s->pc++);
5c73b757
MO
7606 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7607 * AMD documentation (24594.pdf) and testing of
7608 * intel 386 and 486 processors all show that the mod bits
7609 * are assumed to be 1's, regardless of actual values.
7610 */
14ce26e7
FB
7611 rm = (modrm & 7) | REX_B(s);
7612 reg = ((modrm >> 3) & 7) | rex_r;
7613 if (CODE64(s))
4ba9938c 7614 ot = MO_64;
14ce26e7 7615 else
4ba9938c 7616 ot = MO_32;
2c0262af 7617 /* XXX: do it dynamically with CR4.DE bit */
14ce26e7 7618 if (reg == 4 || reg == 5 || reg >= 8)
2c0262af
FB
7619 goto illegal_op;
7620 if (b & 2) {
0573fbfc 7621 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
c56baccf 7622 gen_op_mov_v_reg(ot, cpu_T[0], rm);
4a7443be 7623 gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
14ce26e7 7624 gen_jmp_im(s->pc - s->cs_base);
2c0262af
FB
7625 gen_eob(s);
7626 } else {
0573fbfc 7627 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
651ba608 7628 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
480a762d 7629 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
2c0262af
FB
7630 }
7631 }
7632 break;
7633 case 0x106: /* clts */
7634 if (s->cpl != 0) {
7635 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7636 } else {
0573fbfc 7637 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
f0967a1a 7638 gen_helper_clts(cpu_env);
7eee2a50 7639 /* abort block because static cpu state changed */
14ce26e7 7640 gen_jmp_im(s->pc - s->cs_base);
7eee2a50 7641 gen_eob(s);
2c0262af
FB
7642 }
7643 break;
222a3336 7644 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
664e0f19
FB
7645 case 0x1c3: /* MOVNTI reg, mem */
7646 if (!(s->cpuid_features & CPUID_SSE2))
14ce26e7 7647 goto illegal_op;
ab4e4aec 7648 ot = mo_64_32(dflag);
0af10c86 7649 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
7650 mod = (modrm >> 6) & 3;
7651 if (mod == 3)
7652 goto illegal_op;
7653 reg = ((modrm >> 3) & 7) | rex_r;
7654 /* generate a generic store */
0af10c86 7655 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
14ce26e7 7656 break;
664e0f19 7657 case 0x1ae:
0af10c86 7658 modrm = cpu_ldub_code(env, s->pc++);
664e0f19
FB
7659 mod = (modrm >> 6) & 3;
7660 op = (modrm >> 3) & 7;
7661 switch(op) {
7662 case 0: /* fxsave */
5fafdf24 7663 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
09d85fb8 7664 (s->prefix & PREFIX_LOCK))
14ce26e7 7665 goto illegal_op;
09d85fb8 7666 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
0fd14b72
FB
7667 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7668 break;
7669 }
4eeb3939 7670 gen_lea_modrm(env, s, modrm);
ab4e4aec 7671 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
664e0f19
FB
7672 break;
7673 case 1: /* fxrstor */
5fafdf24 7674 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
09d85fb8 7675 (s->prefix & PREFIX_LOCK))
14ce26e7 7676 goto illegal_op;
09d85fb8 7677 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
0fd14b72
FB
7678 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7679 break;
7680 }
4eeb3939 7681 gen_lea_modrm(env, s, modrm);
ab4e4aec 7682 gen_helper_fxrstor(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
664e0f19
FB
7683 break;
7684 case 2: /* ldmxcsr */
7685 case 3: /* stmxcsr */
7686 if (s->flags & HF_TS_MASK) {
7687 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7688 break;
14ce26e7 7689 }
664e0f19
FB
7690 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7691 mod == 3)
14ce26e7 7692 goto illegal_op;
4eeb3939 7693 gen_lea_modrm(env, s, modrm);
664e0f19 7694 if (op == 2) {
80b02013
RH
7695 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
7696 s->mem_index, MO_LEUL);
d3eb5eae 7697 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
14ce26e7 7698 } else {
651ba608 7699 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
fd8ca9f6 7700 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
14ce26e7 7701 }
664e0f19
FB
7702 break;
7703 case 5: /* lfence */
7704 case 6: /* mfence */
8001c294 7705 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
664e0f19
FB
7706 goto illegal_op;
7707 break;
8f091a59
FB
7708 case 7: /* sfence / clflush */
7709 if ((modrm & 0xc7) == 0xc0) {
7710 /* sfence */
a35f3ec7 7711 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
8f091a59
FB
7712 if (!(s->cpuid_features & CPUID_SSE))
7713 goto illegal_op;
7714 } else {
7715 /* clflush */
7716 if (!(s->cpuid_features & CPUID_CLFLUSH))
7717 goto illegal_op;
4eeb3939 7718 gen_lea_modrm(env, s, modrm);
8f091a59
FB
7719 }
7720 break;
664e0f19 7721 default:
14ce26e7
FB
7722 goto illegal_op;
7723 }
7724 break;
a35f3ec7 7725 case 0x10d: /* 3DNow! prefetch(w) */
0af10c86 7726 modrm = cpu_ldub_code(env, s->pc++);
a35f3ec7
AJ
7727 mod = (modrm >> 6) & 3;
7728 if (mod == 3)
7729 goto illegal_op;
4eeb3939 7730 gen_lea_modrm(env, s, modrm);
8f091a59
FB
7731 /* ignore for now */
7732 break;
3b21e03e 7733 case 0x1aa: /* rsm */
872929aa 7734 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
3b21e03e
FB
7735 if (!(s->flags & HF_SMM_MASK))
7736 goto illegal_op;
728d803b 7737 gen_update_cc_op(s);
3b21e03e 7738 gen_jmp_im(s->pc - s->cs_base);
608badfc 7739 gen_helper_rsm(cpu_env);
3b21e03e
FB
7740 gen_eob(s);
7741 break;
222a3336
AZ
7742 case 0x1b8: /* SSE4.2 popcnt */
7743 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7744 PREFIX_REPZ)
7745 goto illegal_op;
7746 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7747 goto illegal_op;
7748
0af10c86 7749 modrm = cpu_ldub_code(env, s->pc++);
8b4a3df8 7750 reg = ((modrm >> 3) & 7) | rex_r;
222a3336 7751
ab4e4aec 7752 if (s->prefix & PREFIX_DATA) {
4ba9938c 7753 ot = MO_16;
ab4e4aec
RH
7754 } else {
7755 ot = mo_64_32(dflag);
7756 }
222a3336 7757
0af10c86 7758 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
d3eb5eae 7759 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
480a762d 7760 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
fdb0d09d 7761
3ca51d07 7762 set_cc_op(s, CC_OP_EFLAGS);
222a3336 7763 break;
a35f3ec7
AJ
7764 case 0x10e ... 0x10f:
7765 /* 3DNow! instructions, ignore prefixes */
7766 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
664e0f19
FB
7767 case 0x110 ... 0x117:
7768 case 0x128 ... 0x12f:
4242b1bd 7769 case 0x138 ... 0x13a:
d9f4bb27 7770 case 0x150 ... 0x179:
664e0f19
FB
7771 case 0x17c ... 0x17f:
7772 case 0x1c2:
7773 case 0x1c4 ... 0x1c6:
7774 case 0x1d0 ... 0x1fe:
0af10c86 7775 gen_sse(env, s, b, pc_start, rex_r);
664e0f19 7776 break;
2c0262af
FB
7777 default:
7778 goto illegal_op;
7779 }
7780 /* lock generation */
7781 if (s->prefix & PREFIX_LOCK)
a7812ae4 7782 gen_helper_unlock();
2c0262af
FB
7783 return s->pc;
7784 illegal_op:
ab1f142b 7785 if (s->prefix & PREFIX_LOCK)
a7812ae4 7786 gen_helper_unlock();
2c0262af
FB
7787 /* XXX: ensure that no lock was generated */
7788 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7789 return s->pc;
7790}
7791
2c0262af
FB
7792void optimize_flags_init(void)
7793{
fac0aff9
RH
7794 static const char reg_names[CPU_NB_REGS][4] = {
7795#ifdef TARGET_X86_64
7796 [R_EAX] = "rax",
7797 [R_EBX] = "rbx",
7798 [R_ECX] = "rcx",
7799 [R_EDX] = "rdx",
7800 [R_ESI] = "rsi",
7801 [R_EDI] = "rdi",
7802 [R_EBP] = "rbp",
7803 [R_ESP] = "rsp",
7804 [8] = "r8",
7805 [9] = "r9",
7806 [10] = "r10",
7807 [11] = "r11",
7808 [12] = "r12",
7809 [13] = "r13",
7810 [14] = "r14",
7811 [15] = "r15",
7812#else
7813 [R_EAX] = "eax",
7814 [R_EBX] = "ebx",
7815 [R_ECX] = "ecx",
7816 [R_EDX] = "edx",
7817 [R_ESI] = "esi",
7818 [R_EDI] = "edi",
7819 [R_EBP] = "ebp",
7820 [R_ESP] = "esp",
7821#endif
7822 };
7823 int i;
7824
a7812ae4
PB
7825 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7826 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
317ac620 7827 offsetof(CPUX86State, cc_op), "cc_op");
317ac620 7828 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
a7812ae4 7829 "cc_dst");
a3251186
RH
7830 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
7831 "cc_src");
988c3eb0
RH
7832 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src2),
7833 "cc_src2");
437a88a5 7834
fac0aff9
RH
7835 for (i = 0; i < CPU_NB_REGS; ++i) {
7836 cpu_regs[i] = tcg_global_mem_new(TCG_AREG0,
7837 offsetof(CPUX86State, regs[i]),
7838 reg_names[i]);
7839 }
677ef623
FK
7840
7841 helper_lock_init();
2c0262af
FB
7842}
7843
7844/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7845 basic block 'tb'. If search_pc is TRUE, also generate PC
7846 information for each intermediate instruction. */
467215c2 7847static inline void gen_intermediate_code_internal(X86CPU *cpu,
2cfc5f17 7848 TranslationBlock *tb,
467215c2 7849 bool search_pc)
2c0262af 7850{
ed2803da 7851 CPUState *cs = CPU(cpu);
467215c2 7852 CPUX86State *env = &cpu->env;
2c0262af 7853 DisasContext dc1, *dc = &dc1;
14ce26e7 7854 target_ulong pc_ptr;
a1d1bb31 7855 CPUBreakpoint *bp;
7f5b7d3e 7856 int j, lj;
c068688b 7857 uint64_t flags;
14ce26e7
FB
7858 target_ulong pc_start;
7859 target_ulong cs_base;
2e70f6ef
PB
7860 int num_insns;
7861 int max_insns;
3b46e624 7862
2c0262af 7863 /* generate intermediate code */
14ce26e7
FB
7864 pc_start = tb->pc;
7865 cs_base = tb->cs_base;
2c0262af 7866 flags = tb->flags;
3a1d9b8b 7867
4f31916f 7868 dc->pe = (flags >> HF_PE_SHIFT) & 1;
2c0262af
FB
7869 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7870 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7871 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7872 dc->f_st = 0;
7873 dc->vm86 = (flags >> VM_SHIFT) & 1;
7874 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7875 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7876 dc->tf = (flags >> TF_SHIFT) & 1;
ed2803da 7877 dc->singlestep_enabled = cs->singlestep_enabled;
2c0262af 7878 dc->cc_op = CC_OP_DYNAMIC;
e207582f 7879 dc->cc_op_dirty = false;
2c0262af
FB
7880 dc->cs_base = cs_base;
7881 dc->tb = tb;
7882 dc->popl_esp_hack = 0;
7883 /* select memory access functions */
7884 dc->mem_index = 0;
7885 if (flags & HF_SOFTMMU_MASK) {
97ed5ccd 7886 dc->mem_index = cpu_mmu_index(env, false);
2c0262af 7887 }
0514ef2f
EH
7888 dc->cpuid_features = env->features[FEAT_1_EDX];
7889 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
7890 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
7891 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
7892 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
14ce26e7
FB
7893#ifdef TARGET_X86_64
7894 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7895 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7896#endif
7eee2a50 7897 dc->flags = flags;
ed2803da 7898 dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
a2cc3b24 7899 (flags & HF_INHIBIT_IRQ_MASK)
415fa2ea 7900#ifndef CONFIG_SOFTMMU
2c0262af
FB
7901 || (flags & HF_SOFTMMU_MASK)
7902#endif
7903 );
c4d4525c
PD
7904 /* Do not optimize repz jumps at all in icount mode, because
7905 rep movsS instructions are execured with different paths
7906 in !repz_opt and repz_opt modes. The first one was used
7907 always except single step mode. And this setting
7908 disables jumps optimization and control paths become
7909 equivalent in run and single step modes.
7910 Now there will be no jump optimization for repz in
7911 record/replay modes and there will always be an
7912 additional step for ecx=0 when icount is enabled.
7913 */
bd79255d 7914 dc->repz_opt = !dc->jmp_opt && !(tb->cflags & CF_USE_ICOUNT);
4f31916f
FB
7915#if 0
7916 /* check addseg logic */
dc196a57 7917 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
4f31916f
FB
7918 printf("ERROR addseg\n");
7919#endif
7920
a7812ae4
PB
7921 cpu_T[0] = tcg_temp_new();
7922 cpu_T[1] = tcg_temp_new();
7923 cpu_A0 = tcg_temp_new();
a7812ae4
PB
7924
7925 cpu_tmp0 = tcg_temp_new();
7926 cpu_tmp1_i64 = tcg_temp_new_i64();
7927 cpu_tmp2_i32 = tcg_temp_new_i32();
7928 cpu_tmp3_i32 = tcg_temp_new_i32();
7929 cpu_tmp4 = tcg_temp_new();
a7812ae4
PB
7930 cpu_ptr0 = tcg_temp_new_ptr();
7931 cpu_ptr1 = tcg_temp_new_ptr();
a3251186 7932 cpu_cc_srcT = tcg_temp_local_new();
57fec1fe 7933
2c0262af
FB
7934 dc->is_jmp = DISAS_NEXT;
7935 pc_ptr = pc_start;
7936 lj = -1;
2e70f6ef
PB
7937 num_insns = 0;
7938 max_insns = tb->cflags & CF_COUNT_MASK;
7939 if (max_insns == 0)
7940 max_insns = CF_COUNT_MASK;
2c0262af 7941
cd42d5b2 7942 gen_tb_start(tb);
2c0262af 7943 for(;;) {
f0c3c505
AF
7944 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
7945 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a2397807
JK
7946 if (bp->pc == pc_ptr &&
7947 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
2c0262af 7948 gen_debug(dc, pc_ptr - dc->cs_base);
e64e3535 7949 goto done_generating;
2c0262af
FB
7950 }
7951 }
7952 }
7953 if (search_pc) {
fe700adb 7954 j = tcg_op_buf_count();
2c0262af
FB
7955 if (lj < j) {
7956 lj++;
7957 while (lj < j)
ab1103de 7958 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 7959 }
25983cad 7960 tcg_ctx.gen_opc_pc[lj] = pc_ptr;
2c0262af 7961 gen_opc_cc_op[lj] = dc->cc_op;
ab1103de 7962 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 7963 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 7964 }
2e70f6ef
PB
7965 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
7966 gen_io_start();
7967
0af10c86 7968 pc_ptr = disas_insn(env, dc, pc_ptr);
2e70f6ef 7969 num_insns++;
2c0262af
FB
7970 /* stop translation if indicated */
7971 if (dc->is_jmp)
7972 break;
7973 /* if single step mode, we generate only one instruction and
7974 generate an exception */
a2cc3b24
FB
7975 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7976 the flag and abort the translation to give the irqs a
7977 change to be happen */
5fafdf24 7978 if (dc->tf || dc->singlestep_enabled ||
2e70f6ef 7979 (flags & HF_INHIBIT_IRQ_MASK)) {
14ce26e7 7980 gen_jmp_im(pc_ptr - dc->cs_base);
2c0262af
FB
7981 gen_eob(dc);
7982 break;
7983 }
5b9efc39
PD
7984 /* Do not cross the boundary of the pages in icount mode,
7985 it can cause an exception. Do it only when boundary is
7986 crossed by the first instruction in the block.
7987 If current instruction already crossed the bound - it's ok,
7988 because an exception hasn't stopped this code.
7989 */
bd79255d 7990 if ((tb->cflags & CF_USE_ICOUNT)
5b9efc39
PD
7991 && ((pc_ptr & TARGET_PAGE_MASK)
7992 != ((pc_ptr + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK)
7993 || (pc_ptr & ~TARGET_PAGE_MASK) == 0)) {
7994 gen_jmp_im(pc_ptr - dc->cs_base);
7995 gen_eob(dc);
7996 break;
7997 }
2c0262af 7998 /* if too long translation, stop generation too */
fe700adb 7999 if (tcg_op_buf_full() ||
2e70f6ef
PB
8000 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8001 num_insns >= max_insns) {
14ce26e7 8002 gen_jmp_im(pc_ptr - dc->cs_base);
2c0262af
FB
8003 gen_eob(dc);
8004 break;
8005 }
1b530a6d
AJ
8006 if (singlestep) {
8007 gen_jmp_im(pc_ptr - dc->cs_base);
8008 gen_eob(dc);
8009 break;
8010 }
2c0262af 8011 }
2e70f6ef
PB
8012 if (tb->cflags & CF_LAST_IO)
8013 gen_io_end();
e64e3535 8014done_generating:
806f352d 8015 gen_tb_end(tb, num_insns);
0a7df5da 8016
2c0262af
FB
8017 /* we don't forget to fill the last values */
8018 if (search_pc) {
fe700adb 8019 j = tcg_op_buf_count();
2c0262af
FB
8020 lj++;
8021 while (lj <= j)
ab1103de 8022 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 8023 }
3b46e624 8024
2c0262af 8025#ifdef DEBUG_DISAS
8fec2b8c 8026 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
14ce26e7 8027 int disas_flags;
93fcfe39
AL
8028 qemu_log("----------------\n");
8029 qemu_log("IN: %s\n", lookup_symbol(pc_start));
14ce26e7
FB
8030#ifdef TARGET_X86_64
8031 if (dc->code64)
8032 disas_flags = 2;
8033 else
8034#endif
8035 disas_flags = !dc->code32;
d49190c4 8036 log_target_disas(cs, pc_start, pc_ptr - pc_start, disas_flags);
93fcfe39 8037 qemu_log("\n");
2c0262af
FB
8038 }
8039#endif
8040
2e70f6ef 8041 if (!search_pc) {
2c0262af 8042 tb->size = pc_ptr - pc_start;
2e70f6ef
PB
8043 tb->icount = num_insns;
8044 }
2c0262af
FB
8045}
8046
317ac620 8047void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
2c0262af 8048{
467215c2 8049 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, false);
2c0262af
FB
8050}
8051
317ac620 8052void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
2c0262af 8053{
467215c2 8054 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, true);
2c0262af
FB
8055}
8056
317ac620 8057void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
8058{
8059 int cc_op;
8060#ifdef DEBUG_DISAS
8fec2b8c 8061 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
d2856f1a 8062 int i;
93fcfe39 8063 qemu_log("RESTORE:\n");
d2856f1a 8064 for(i = 0;i <= pc_pos; i++) {
ab1103de 8065 if (tcg_ctx.gen_opc_instr_start[i]) {
25983cad
EV
8066 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
8067 tcg_ctx.gen_opc_pc[i]);
d2856f1a
AJ
8068 }
8069 }
e87b7cb0 8070 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
25983cad 8071 pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
d2856f1a
AJ
8072 (uint32_t)tb->cs_base);
8073 }
8074#endif
25983cad 8075 env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
d2856f1a
AJ
8076 cc_op = gen_opc_cc_op[pc_pos];
8077 if (cc_op != CC_OP_DYNAMIC)
8078 env->cc_op = cc_op;
8079}
This page took 2.501091 seconds and 4 git commands to generate.