2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "../tcg-ldst.c.inc"
26 #include "../tcg-pool.c.inc"
28 #ifdef CONFIG_DEBUG_TCG
29 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
30 #if TCG_TARGET_REG_BITS == 64
31 "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
33 "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
35 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
36 "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7",
37 #if TCG_TARGET_REG_BITS == 64
38 "%xmm8", "%xmm9", "%xmm10", "%xmm11",
39 "%xmm12", "%xmm13", "%xmm14", "%xmm15",
44 static const int tcg_target_reg_alloc_order[] = {
45 #if TCG_TARGET_REG_BITS == 64
77 /* The Win64 ABI has xmm6-xmm15 as caller-saves, and we do not save
78 any of them. Therefore only allow xmm0-xmm5 to be allocated. */
81 #if TCG_TARGET_REG_BITS == 64
94 static const int tcg_target_call_iarg_regs[] = {
95 #if TCG_TARGET_REG_BITS == 64
108 /* 32 bit mode uses stack based calling convention (GCC default). */
112 static const int tcg_target_call_oarg_regs[] = {
114 #if TCG_TARGET_REG_BITS == 32
119 /* Constants we accept. */
120 #define TCG_CT_CONST_S32 0x100
121 #define TCG_CT_CONST_U32 0x200
122 #define TCG_CT_CONST_I32 0x400
123 #define TCG_CT_CONST_WSZ 0x800
125 /* Registers used with L constraint, which are the first argument
126 registers on x86_64, and two random call clobbered registers on
128 #if TCG_TARGET_REG_BITS == 64
129 # define TCG_REG_L0 tcg_target_call_iarg_regs[0]
130 # define TCG_REG_L1 tcg_target_call_iarg_regs[1]
132 # define TCG_REG_L0 TCG_REG_EAX
133 # define TCG_REG_L1 TCG_REG_EDX
136 #define ALL_BYTEH_REGS 0x0000000fu
137 #if TCG_TARGET_REG_BITS == 64
138 # define ALL_GENERAL_REGS 0x0000ffffu
139 # define ALL_VECTOR_REGS 0xffff0000u
140 # define ALL_BYTEL_REGS ALL_GENERAL_REGS
142 # define ALL_GENERAL_REGS 0x000000ffu
143 # define ALL_VECTOR_REGS 0x00ff0000u
144 # define ALL_BYTEL_REGS ALL_BYTEH_REGS
146 #ifdef CONFIG_SOFTMMU
147 # define SOFTMMU_RESERVE_REGS ((1 << TCG_REG_L0) | (1 << TCG_REG_L1))
149 # define SOFTMMU_RESERVE_REGS 0
152 /* The host compiler should supply <cpuid.h> to enable runtime features
153 detection, as we're not going to go so far as our own inline assembly.
154 If not available, default values will be assumed. */
155 #if defined(CONFIG_CPUID_H)
156 #include "qemu/cpuid.h"
159 /* For 64-bit, we always know that CMOV is available. */
160 #if TCG_TARGET_REG_BITS == 64
162 #elif defined(CONFIG_CPUID_H)
163 static bool have_cmov;
168 /* We need these symbols in tcg-target.h, and we can't properly conditionalize
169 it there. Therefore we always define the variable. */
176 bool have_avx512vbmi2;
180 #ifdef CONFIG_CPUID_H
181 static bool have_bmi2;
182 static bool have_lzcnt;
185 # define have_lzcnt 0
188 static const tcg_insn_unit *tb_ret_addr;
190 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
191 intptr_t value, intptr_t addend)
196 value -= (uintptr_t)tcg_splitwx_to_rx(code_ptr);
197 if (value != (int32_t)value) {
202 tcg_patch32(code_ptr, value);
205 value -= (uintptr_t)tcg_splitwx_to_rx(code_ptr);
206 if (value != (int8_t)value) {
209 tcg_patch8(code_ptr, value);
217 /* test if a constant matches the constraint */
218 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
220 if (ct & TCG_CT_CONST) {
223 if (type == TCG_TYPE_I32) {
224 if (ct & (TCG_CT_CONST_S32 | TCG_CT_CONST_U32 | TCG_CT_CONST_I32)) {
228 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
231 if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
234 if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) {
238 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
244 # define LOWREGMASK(x) ((x) & 7)
246 #define P_EXT 0x100 /* 0x0f opcode prefix */
247 #define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */
248 #define P_DATA16 0x400 /* 0x66 opcode prefix */
249 #define P_VEXW 0x1000 /* Set VEX.W = 1 */
250 #if TCG_TARGET_REG_BITS == 64
251 # define P_REXW P_VEXW /* Set REX.W = 1; match VEXW */
252 # define P_REXB_R 0x2000 /* REG field as byte register */
253 # define P_REXB_RM 0x4000 /* R/M field as byte register */
254 # define P_GS 0x8000 /* gs segment override */
261 #define P_EXT3A 0x10000 /* 0x0f 0x3a opcode prefix */
262 #define P_SIMDF3 0x20000 /* 0xf3 opcode prefix */
263 #define P_SIMDF2 0x40000 /* 0xf2 opcode prefix */
264 #define P_VEXL 0x80000 /* Set VEX.L = 1 */
265 #define P_EVEX 0x100000 /* Requires EVEX encoding */
267 #define OPC_ARITH_EvIz (0x81)
268 #define OPC_ARITH_EvIb (0x83)
269 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
270 #define OPC_ANDN (0xf2 | P_EXT38)
271 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
272 #define OPC_AND_GvEv (OPC_ARITH_GvEv | (ARITH_AND << 3))
273 #define OPC_BLENDPS (0x0c | P_EXT3A | P_DATA16)
274 #define OPC_BSF (0xbc | P_EXT)
275 #define OPC_BSR (0xbd | P_EXT)
276 #define OPC_BSWAP (0xc8 | P_EXT)
277 #define OPC_CALL_Jz (0xe8)
278 #define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */
279 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
280 #define OPC_DEC_r32 (0x48)
281 #define OPC_IMUL_GvEv (0xaf | P_EXT)
282 #define OPC_IMUL_GvEvIb (0x6b)
283 #define OPC_IMUL_GvEvIz (0x69)
284 #define OPC_INC_r32 (0x40)
285 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
286 #define OPC_JCC_short (0x70) /* ... plus condition code */
287 #define OPC_JMP_long (0xe9)
288 #define OPC_JMP_short (0xeb)
289 #define OPC_LEA (0x8d)
290 #define OPC_LZCNT (0xbd | P_EXT | P_SIMDF3)
291 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
292 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
293 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
294 #define OPC_MOVB_EvIz (0xc6)
295 #define OPC_MOVL_EvIz (0xc7)
296 #define OPC_MOVL_Iv (0xb8)
297 #define OPC_MOVBE_GyMy (0xf0 | P_EXT38)
298 #define OPC_MOVBE_MyGy (0xf1 | P_EXT38)
299 #define OPC_MOVD_VyEy (0x6e | P_EXT | P_DATA16)
300 #define OPC_MOVD_EyVy (0x7e | P_EXT | P_DATA16)
301 #define OPC_MOVDDUP (0x12 | P_EXT | P_SIMDF2)
302 #define OPC_MOVDQA_VxWx (0x6f | P_EXT | P_DATA16)
303 #define OPC_MOVDQA_WxVx (0x7f | P_EXT | P_DATA16)
304 #define OPC_MOVDQU_VxWx (0x6f | P_EXT | P_SIMDF3)
305 #define OPC_MOVDQU_WxVx (0x7f | P_EXT | P_SIMDF3)
306 #define OPC_MOVQ_VqWq (0x7e | P_EXT | P_SIMDF3)
307 #define OPC_MOVQ_WqVq (0xd6 | P_EXT | P_DATA16)
308 #define OPC_MOVSBL (0xbe | P_EXT)
309 #define OPC_MOVSWL (0xbf | P_EXT)
310 #define OPC_MOVSLQ (0x63 | P_REXW)
311 #define OPC_MOVZBL (0xb6 | P_EXT)
312 #define OPC_MOVZWL (0xb7 | P_EXT)
313 #define OPC_PABSB (0x1c | P_EXT38 | P_DATA16)
314 #define OPC_PABSW (0x1d | P_EXT38 | P_DATA16)
315 #define OPC_PABSD (0x1e | P_EXT38 | P_DATA16)
316 #define OPC_VPABSQ (0x1f | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
317 #define OPC_PACKSSDW (0x6b | P_EXT | P_DATA16)
318 #define OPC_PACKSSWB (0x63 | P_EXT | P_DATA16)
319 #define OPC_PACKUSDW (0x2b | P_EXT38 | P_DATA16)
320 #define OPC_PACKUSWB (0x67 | P_EXT | P_DATA16)
321 #define OPC_PADDB (0xfc | P_EXT | P_DATA16)
322 #define OPC_PADDW (0xfd | P_EXT | P_DATA16)
323 #define OPC_PADDD (0xfe | P_EXT | P_DATA16)
324 #define OPC_PADDQ (0xd4 | P_EXT | P_DATA16)
325 #define OPC_PADDSB (0xec | P_EXT | P_DATA16)
326 #define OPC_PADDSW (0xed | P_EXT | P_DATA16)
327 #define OPC_PADDUB (0xdc | P_EXT | P_DATA16)
328 #define OPC_PADDUW (0xdd | P_EXT | P_DATA16)
329 #define OPC_PAND (0xdb | P_EXT | P_DATA16)
330 #define OPC_PANDN (0xdf | P_EXT | P_DATA16)
331 #define OPC_PBLENDW (0x0e | P_EXT3A | P_DATA16)
332 #define OPC_PCMPEQB (0x74 | P_EXT | P_DATA16)
333 #define OPC_PCMPEQW (0x75 | P_EXT | P_DATA16)
334 #define OPC_PCMPEQD (0x76 | P_EXT | P_DATA16)
335 #define OPC_PCMPEQQ (0x29 | P_EXT38 | P_DATA16)
336 #define OPC_PCMPGTB (0x64 | P_EXT | P_DATA16)
337 #define OPC_PCMPGTW (0x65 | P_EXT | P_DATA16)
338 #define OPC_PCMPGTD (0x66 | P_EXT | P_DATA16)
339 #define OPC_PCMPGTQ (0x37 | P_EXT38 | P_DATA16)
340 #define OPC_PMAXSB (0x3c | P_EXT38 | P_DATA16)
341 #define OPC_PMAXSW (0xee | P_EXT | P_DATA16)
342 #define OPC_PMAXSD (0x3d | P_EXT38 | P_DATA16)
343 #define OPC_VPMAXSQ (0x3d | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
344 #define OPC_PMAXUB (0xde | P_EXT | P_DATA16)
345 #define OPC_PMAXUW (0x3e | P_EXT38 | P_DATA16)
346 #define OPC_PMAXUD (0x3f | P_EXT38 | P_DATA16)
347 #define OPC_VPMAXUQ (0x3f | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
348 #define OPC_PMINSB (0x38 | P_EXT38 | P_DATA16)
349 #define OPC_PMINSW (0xea | P_EXT | P_DATA16)
350 #define OPC_PMINSD (0x39 | P_EXT38 | P_DATA16)
351 #define OPC_VPMINSQ (0x39 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
352 #define OPC_PMINUB (0xda | P_EXT | P_DATA16)
353 #define OPC_PMINUW (0x3a | P_EXT38 | P_DATA16)
354 #define OPC_PMINUD (0x3b | P_EXT38 | P_DATA16)
355 #define OPC_VPMINUQ (0x3b | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
356 #define OPC_PMOVSXBW (0x20 | P_EXT38 | P_DATA16)
357 #define OPC_PMOVSXWD (0x23 | P_EXT38 | P_DATA16)
358 #define OPC_PMOVSXDQ (0x25 | P_EXT38 | P_DATA16)
359 #define OPC_PMOVZXBW (0x30 | P_EXT38 | P_DATA16)
360 #define OPC_PMOVZXWD (0x33 | P_EXT38 | P_DATA16)
361 #define OPC_PMOVZXDQ (0x35 | P_EXT38 | P_DATA16)
362 #define OPC_PMULLW (0xd5 | P_EXT | P_DATA16)
363 #define OPC_PMULLD (0x40 | P_EXT38 | P_DATA16)
364 #define OPC_VPMULLQ (0x40 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
365 #define OPC_POR (0xeb | P_EXT | P_DATA16)
366 #define OPC_PSHUFB (0x00 | P_EXT38 | P_DATA16)
367 #define OPC_PSHUFD (0x70 | P_EXT | P_DATA16)
368 #define OPC_PSHUFLW (0x70 | P_EXT | P_SIMDF2)
369 #define OPC_PSHUFHW (0x70 | P_EXT | P_SIMDF3)
370 #define OPC_PSHIFTW_Ib (0x71 | P_EXT | P_DATA16) /* /2 /6 /4 */
371 #define OPC_PSHIFTD_Ib (0x72 | P_EXT | P_DATA16) /* /1 /2 /6 /4 */
372 #define OPC_PSHIFTQ_Ib (0x73 | P_EXT | P_DATA16) /* /2 /6 /4 */
373 #define OPC_PSLLW (0xf1 | P_EXT | P_DATA16)
374 #define OPC_PSLLD (0xf2 | P_EXT | P_DATA16)
375 #define OPC_PSLLQ (0xf3 | P_EXT | P_DATA16)
376 #define OPC_PSRAW (0xe1 | P_EXT | P_DATA16)
377 #define OPC_PSRAD (0xe2 | P_EXT | P_DATA16)
378 #define OPC_VPSRAQ (0xe2 | P_EXT | P_DATA16 | P_VEXW | P_EVEX)
379 #define OPC_PSRLW (0xd1 | P_EXT | P_DATA16)
380 #define OPC_PSRLD (0xd2 | P_EXT | P_DATA16)
381 #define OPC_PSRLQ (0xd3 | P_EXT | P_DATA16)
382 #define OPC_PSUBB (0xf8 | P_EXT | P_DATA16)
383 #define OPC_PSUBW (0xf9 | P_EXT | P_DATA16)
384 #define OPC_PSUBD (0xfa | P_EXT | P_DATA16)
385 #define OPC_PSUBQ (0xfb | P_EXT | P_DATA16)
386 #define OPC_PSUBSB (0xe8 | P_EXT | P_DATA16)
387 #define OPC_PSUBSW (0xe9 | P_EXT | P_DATA16)
388 #define OPC_PSUBUB (0xd8 | P_EXT | P_DATA16)
389 #define OPC_PSUBUW (0xd9 | P_EXT | P_DATA16)
390 #define OPC_PUNPCKLBW (0x60 | P_EXT | P_DATA16)
391 #define OPC_PUNPCKLWD (0x61 | P_EXT | P_DATA16)
392 #define OPC_PUNPCKLDQ (0x62 | P_EXT | P_DATA16)
393 #define OPC_PUNPCKLQDQ (0x6c | P_EXT | P_DATA16)
394 #define OPC_PUNPCKHBW (0x68 | P_EXT | P_DATA16)
395 #define OPC_PUNPCKHWD (0x69 | P_EXT | P_DATA16)
396 #define OPC_PUNPCKHDQ (0x6a | P_EXT | P_DATA16)
397 #define OPC_PUNPCKHQDQ (0x6d | P_EXT | P_DATA16)
398 #define OPC_PXOR (0xef | P_EXT | P_DATA16)
399 #define OPC_POP_r32 (0x58)
400 #define OPC_POPCNT (0xb8 | P_EXT | P_SIMDF3)
401 #define OPC_PUSH_r32 (0x50)
402 #define OPC_PUSH_Iv (0x68)
403 #define OPC_PUSH_Ib (0x6a)
404 #define OPC_RET (0xc3)
405 #define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */
406 #define OPC_SHIFT_1 (0xd1)
407 #define OPC_SHIFT_Ib (0xc1)
408 #define OPC_SHIFT_cl (0xd3)
409 #define OPC_SARX (0xf7 | P_EXT38 | P_SIMDF3)
410 #define OPC_SHUFPS (0xc6 | P_EXT)
411 #define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16)
412 #define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2)
413 #define OPC_SHRD_Ib (0xac | P_EXT)
414 #define OPC_TESTL (0x85)
415 #define OPC_TZCNT (0xbc | P_EXT | P_SIMDF3)
416 #define OPC_UD2 (0x0b | P_EXT)
417 #define OPC_VPBLENDD (0x02 | P_EXT3A | P_DATA16)
418 #define OPC_VPBLENDVB (0x4c | P_EXT3A | P_DATA16)
419 #define OPC_VPINSRB (0x20 | P_EXT3A | P_DATA16)
420 #define OPC_VPINSRW (0xc4 | P_EXT | P_DATA16)
421 #define OPC_VBROADCASTSS (0x18 | P_EXT38 | P_DATA16)
422 #define OPC_VBROADCASTSD (0x19 | P_EXT38 | P_DATA16)
423 #define OPC_VPBROADCASTB (0x78 | P_EXT38 | P_DATA16)
424 #define OPC_VPBROADCASTW (0x79 | P_EXT38 | P_DATA16)
425 #define OPC_VPBROADCASTD (0x58 | P_EXT38 | P_DATA16)
426 #define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16)
427 #define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_VEXW)
428 #define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL)
429 #define OPC_VPROLVD (0x15 | P_EXT38 | P_DATA16 | P_EVEX)
430 #define OPC_VPROLVQ (0x15 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
431 #define OPC_VPRORVD (0x14 | P_EXT38 | P_DATA16 | P_EVEX)
432 #define OPC_VPRORVQ (0x14 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
433 #define OPC_VPSHLDW (0x70 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX)
434 #define OPC_VPSHLDD (0x71 | P_EXT3A | P_DATA16 | P_EVEX)
435 #define OPC_VPSHLDQ (0x71 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX)
436 #define OPC_VPSHLDVW (0x70 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
437 #define OPC_VPSHLDVD (0x71 | P_EXT38 | P_DATA16 | P_EVEX)
438 #define OPC_VPSHLDVQ (0x71 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
439 #define OPC_VPSHRDVW (0x72 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
440 #define OPC_VPSHRDVD (0x73 | P_EXT38 | P_DATA16 | P_EVEX)
441 #define OPC_VPSHRDVQ (0x73 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
442 #define OPC_VPSLLVW (0x12 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
443 #define OPC_VPSLLVD (0x47 | P_EXT38 | P_DATA16)
444 #define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_VEXW)
445 #define OPC_VPSRAVW (0x11 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
446 #define OPC_VPSRAVD (0x46 | P_EXT38 | P_DATA16)
447 #define OPC_VPSRAVQ (0x46 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
448 #define OPC_VPSRLVW (0x10 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
449 #define OPC_VPSRLVD (0x45 | P_EXT38 | P_DATA16)
450 #define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_VEXW)
451 #define OPC_VPTERNLOGQ (0x25 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX)
452 #define OPC_VZEROUPPER (0x77 | P_EXT)
453 #define OPC_XCHG_ax_r32 (0x90)
455 #define OPC_GRP3_Eb (0xf6)
456 #define OPC_GRP3_Ev (0xf7)
457 #define OPC_GRP5 (0xff)
458 #define OPC_GRP14 (0x73 | P_EXT | P_DATA16)
460 /* Group 1 opcode extensions for 0x80-0x83.
461 These are also used as modifiers for OPC_ARITH. */
471 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
478 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
487 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
488 #define EXT5_INC_Ev 0
489 #define EXT5_DEC_Ev 1
490 #define EXT5_CALLN_Ev 2
491 #define EXT5_JMPN_Ev 4
493 /* Condition codes to be added to OPC_JCC_{long,short}. */
512 static const uint8_t tcg_cond_to_jcc[] = {
513 [TCG_COND_EQ] = JCC_JE,
514 [TCG_COND_NE] = JCC_JNE,
515 [TCG_COND_LT] = JCC_JL,
516 [TCG_COND_GE] = JCC_JGE,
517 [TCG_COND_LE] = JCC_JLE,
518 [TCG_COND_GT] = JCC_JG,
519 [TCG_COND_LTU] = JCC_JB,
520 [TCG_COND_GEU] = JCC_JAE,
521 [TCG_COND_LEU] = JCC_JBE,
522 [TCG_COND_GTU] = JCC_JA,
525 #if TCG_TARGET_REG_BITS == 64
526 static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x)
533 if (opc & P_DATA16) {
534 /* We should never be asking for both 16 and 64-bit operation. */
535 tcg_debug_assert((opc & P_REXW) == 0);
538 if (opc & P_SIMDF3) {
540 } else if (opc & P_SIMDF2) {
545 rex |= (opc & P_REXW) ? 0x8 : 0x0; /* REX.W */
546 rex |= (r & 8) >> 1; /* REX.R */
547 rex |= (x & 8) >> 2; /* REX.X */
548 rex |= (rm & 8) >> 3; /* REX.B */
550 /* P_REXB_{R,RM} indicates that the given register is the low byte.
551 For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do,
552 as otherwise the encoding indicates %[abcd]h. Note that the values
553 that are ORed in merely indicate that the REX byte must be present;
554 those bits get discarded in output. */
555 rex |= opc & (r >= 4 ? P_REXB_R : 0);
556 rex |= opc & (rm >= 4 ? P_REXB_RM : 0);
559 tcg_out8(s, (uint8_t)(rex | 0x40));
562 if (opc & (P_EXT | P_EXT38 | P_EXT3A)) {
566 } else if (opc & P_EXT3A) {
574 static void tcg_out_opc(TCGContext *s, int opc)
576 if (opc & P_DATA16) {
579 if (opc & P_SIMDF3) {
581 } else if (opc & P_SIMDF2) {
584 if (opc & (P_EXT | P_EXT38 | P_EXT3A)) {
588 } else if (opc & P_EXT3A) {
594 /* Discard the register arguments to tcg_out_opc early, so as not to penalize
595 the 32-bit compilation paths. This method works with all versions of gcc,
596 whereas relying on optimization may not be able to exclude them. */
597 #define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc)
600 static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
602 tcg_out_opc(s, opc, r, rm, 0);
603 tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
606 static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v,
611 /* Use the two byte form if possible, which cannot encode
612 VEX.W, VEX.B, VEX.X, or an m-mmmm field other than P_EXT. */
613 if ((opc & (P_EXT | P_EXT38 | P_EXT3A | P_VEXW)) == P_EXT
614 && ((rm | index) & 8) == 0) {
615 /* Two byte VEX prefix. */
618 tmp = (r & 8 ? 0 : 0x80); /* VEX.R */
620 /* Three byte VEX prefix. */
626 } else if (opc & P_EXT38) {
628 } else if (opc & P_EXT) {
631 g_assert_not_reached();
633 tmp |= (r & 8 ? 0 : 0x80); /* VEX.R */
634 tmp |= (index & 8 ? 0 : 0x40); /* VEX.X */
635 tmp |= (rm & 8 ? 0 : 0x20); /* VEX.B */
638 tmp = (opc & P_VEXW ? 0x80 : 0); /* VEX.W */
641 tmp |= (opc & P_VEXL ? 0x04 : 0); /* VEX.L */
643 if (opc & P_DATA16) {
645 } else if (opc & P_SIMDF3) {
647 } else if (opc & P_SIMDF2) {
650 tmp |= (~v & 15) << 3; /* VEX.vvvv */
655 static void tcg_out_evex_opc(TCGContext *s, int opc, int r, int v,
658 /* The entire 4-byte evex prefix; with R' and V' set. */
659 uint32_t p = 0x08041062;
662 tcg_debug_assert(have_avx512vl);
667 } else if (opc & P_EXT38) {
669 } else if (opc & P_EXT) {
672 g_assert_not_reached();
676 if (opc & P_DATA16) {
678 } else if (opc & P_SIMDF3) {
680 } else if (opc & P_SIMDF2) {
686 p = deposit32(p, 8, 2, mm);
687 p = deposit32(p, 13, 1, (rm & 8) == 0); /* EVEX.RXB.B */
688 p = deposit32(p, 14, 1, (index & 8) == 0); /* EVEX.RXB.X */
689 p = deposit32(p, 15, 1, (r & 8) == 0); /* EVEX.RXB.R */
690 p = deposit32(p, 16, 2, pp);
691 p = deposit32(p, 19, 4, ~v);
692 p = deposit32(p, 23, 1, (opc & P_VEXW) != 0);
693 p = deposit32(p, 29, 2, (opc & P_VEXL) != 0);
699 static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm)
702 tcg_out_evex_opc(s, opc, r, v, rm, 0);
704 tcg_out_vex_opc(s, opc, r, v, rm, 0);
706 tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
709 /* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
710 We handle either RM and INDEX missing with a negative value. In 64-bit
711 mode for absolute addresses, ~RM is the size of the immediate operand
712 that will follow the instruction. */
714 static void tcg_out_sib_offset(TCGContext *s, int r, int rm, int index,
715 int shift, intptr_t offset)
719 if (index < 0 && rm < 0) {
720 if (TCG_TARGET_REG_BITS == 64) {
721 /* Try for a rip-relative addressing mode. This has replaced
722 the 32-bit-mode absolute addressing encoding. */
723 intptr_t pc = (intptr_t)s->code_ptr + 5 + ~rm;
724 intptr_t disp = offset - pc;
725 if (disp == (int32_t)disp) {
726 tcg_out8(s, (LOWREGMASK(r) << 3) | 5);
731 /* Try for an absolute address encoding. This requires the
732 use of the MODRM+SIB encoding and is therefore larger than
733 rip-relative addressing. */
734 if (offset == (int32_t)offset) {
735 tcg_out8(s, (LOWREGMASK(r) << 3) | 4);
736 tcg_out8(s, (4 << 3) | 5);
737 tcg_out32(s, offset);
741 /* ??? The memory isn't directly addressable. */
742 g_assert_not_reached();
744 /* Absolute address. */
745 tcg_out8(s, (r << 3) | 5);
746 tcg_out32(s, offset);
751 /* Find the length of the immediate addend. Note that the encoding
752 that would be used for (%ebp) indicates absolute addressing. */
754 mod = 0, len = 4, rm = 5;
755 } else if (offset == 0 && LOWREGMASK(rm) != TCG_REG_EBP) {
757 } else if (offset == (int8_t)offset) {
763 /* Use a single byte MODRM format if possible. Note that the encoding
764 that would be used for %esp is the escape to the two byte form. */
765 if (index < 0 && LOWREGMASK(rm) != TCG_REG_ESP) {
766 /* Single byte MODRM format. */
767 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
769 /* Two byte MODRM+SIB format. */
771 /* Note that the encoding that would place %esp into the index
772 field indicates no index register. In 64-bit mode, the REX.X
773 bit counts, so %r12 can be used as the index. */
777 tcg_debug_assert(index != TCG_REG_ESP);
780 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | 4);
781 tcg_out8(s, (shift << 6) | (LOWREGMASK(index) << 3) | LOWREGMASK(rm));
786 } else if (len == 4) {
787 tcg_out32(s, offset);
791 static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm,
792 int index, int shift, intptr_t offset)
794 tcg_out_opc(s, opc, r, rm < 0 ? 0 : rm, index < 0 ? 0 : index);
795 tcg_out_sib_offset(s, r, rm, index, shift, offset);
798 static void tcg_out_vex_modrm_sib_offset(TCGContext *s, int opc, int r, int v,
799 int rm, int index, int shift,
802 tcg_out_vex_opc(s, opc, r, v, rm < 0 ? 0 : rm, index < 0 ? 0 : index);
803 tcg_out_sib_offset(s, r, rm, index, shift, offset);
806 /* A simplification of the above with no index or shift. */
807 static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r,
808 int rm, intptr_t offset)
810 tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset);
813 static inline void tcg_out_vex_modrm_offset(TCGContext *s, int opc, int r,
814 int v, int rm, intptr_t offset)
816 tcg_out_vex_modrm_sib_offset(s, opc, r, v, rm, -1, 0, offset);
819 /* Output an opcode with an expected reference to the constant pool. */
820 static inline void tcg_out_modrm_pool(TCGContext *s, int opc, int r)
822 tcg_out_opc(s, opc, r, 0, 0);
823 /* Absolute for 32-bit, pc-relative for 64-bit. */
824 tcg_out8(s, LOWREGMASK(r) << 3 | 5);
828 /* Output an opcode with an expected reference to the constant pool. */
829 static inline void tcg_out_vex_modrm_pool(TCGContext *s, int opc, int r)
831 tcg_out_vex_opc(s, opc, r, 0, 0, 0);
832 /* Absolute for 32-bit, pc-relative for 64-bit. */
833 tcg_out8(s, LOWREGMASK(r) << 3 | 5);
837 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
838 static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src)
840 /* Propagate an opcode prefix, such as P_REXW. */
841 int ext = subop & ~0x7;
844 tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3) + ext, dest, src);
847 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
861 tcg_out_modrm(s, OPC_MOVL_GvEv + rexw, ret, arg);
863 tcg_out_vex_modrm(s, OPC_MOVD_EyVy + rexw, arg, 0, ret);
867 tcg_out_vex_modrm(s, OPC_MOVD_VyEy + rexw, ret, 0, arg);
869 tcg_out_vex_modrm(s, OPC_MOVQ_VqWq, ret, 0, arg);
875 tcg_debug_assert(ret >= 16 && arg >= 16);
876 tcg_out_vex_modrm(s, OPC_MOVQ_VqWq, ret, 0, arg);
879 tcg_debug_assert(ret >= 16 && arg >= 16);
880 tcg_out_vex_modrm(s, OPC_MOVDQA_VxWx, ret, 0, arg);
883 tcg_debug_assert(ret >= 16 && arg >= 16);
884 tcg_out_vex_modrm(s, OPC_MOVDQA_VxWx | P_VEXL, ret, 0, arg);
888 g_assert_not_reached();
893 static const int avx2_dup_insn[4] = {
894 OPC_VPBROADCASTB, OPC_VPBROADCASTW,
895 OPC_VPBROADCASTD, OPC_VPBROADCASTQ,
898 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
902 int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0);
903 tcg_out_vex_modrm(s, avx2_dup_insn[vece] + vex_l, r, 0, a);
907 /* ??? With zero in a register, use PSHUFB. */
908 tcg_out_vex_modrm(s, OPC_PUNPCKLBW, r, a, a);
912 tcg_out_vex_modrm(s, OPC_PUNPCKLWD, r, a, a);
916 tcg_out_vex_modrm(s, OPC_PSHUFD, r, 0, a);
917 /* imm8 operand: all output lanes selected from input lane 0. */
921 tcg_out_vex_modrm(s, OPC_PUNPCKLQDQ, r, a, a);
924 g_assert_not_reached();
930 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
931 TCGReg r, TCGReg base, intptr_t offset)
934 int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0);
935 tcg_out_vex_modrm_offset(s, avx2_dup_insn[vece] + vex_l,
940 tcg_out_vex_modrm_offset(s, OPC_MOVDDUP, r, 0, base, offset);
943 tcg_out_vex_modrm_offset(s, OPC_VBROADCASTSS, r, 0, base, offset);
946 tcg_out_vex_modrm_offset(s, OPC_VPINSRW, r, r, base, offset);
947 tcg_out8(s, 0); /* imm8 */
948 tcg_out_dup_vec(s, type, vece, r, r);
951 tcg_out_vex_modrm_offset(s, OPC_VPINSRB, r, r, base, offset);
952 tcg_out8(s, 0); /* imm8 */
953 tcg_out_dup_vec(s, type, vece, r, r);
956 g_assert_not_reached();
962 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
963 TCGReg ret, int64_t arg)
965 int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0);
968 tcg_out_vex_modrm(s, OPC_PXOR, ret, ret, ret);
972 tcg_out_vex_modrm(s, OPC_PCMPEQB + vex_l, ret, ret, ret);
976 if (TCG_TARGET_REG_BITS == 32 && vece < MO_64) {
978 tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTD + vex_l, ret);
980 tcg_out_vex_modrm_pool(s, OPC_VBROADCASTSS, ret);
982 new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0);
984 if (type == TCG_TYPE_V64) {
985 tcg_out_vex_modrm_pool(s, OPC_MOVQ_VqWq, ret);
986 } else if (have_avx2) {
987 tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTQ + vex_l, ret);
989 tcg_out_vex_modrm_pool(s, OPC_MOVDDUP, ret);
991 if (TCG_TARGET_REG_BITS == 64) {
992 new_pool_label(s, arg, R_386_PC32, s->code_ptr - 4, -4);
994 new_pool_l2(s, R_386_32, s->code_ptr - 4, 0, arg, arg >> 32);
999 static void tcg_out_movi_vec(TCGContext *s, TCGType type,
1000 TCGReg ret, tcg_target_long arg)
1003 tcg_out_vex_modrm(s, OPC_PXOR, ret, ret, ret);
1007 tcg_out_vex_modrm(s, OPC_PCMPEQB, ret, ret, ret);
1011 int rexw = (type == TCG_TYPE_I32 ? 0 : P_REXW);
1012 tcg_out_vex_modrm_pool(s, OPC_MOVD_VyEy + rexw, ret);
1013 if (TCG_TARGET_REG_BITS == 64) {
1014 new_pool_label(s, arg, R_386_PC32, s->code_ptr - 4, -4);
1016 new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0);
1020 static void tcg_out_movi_int(TCGContext *s, TCGType type,
1021 TCGReg ret, tcg_target_long arg)
1023 tcg_target_long diff;
1026 tgen_arithr(s, ARITH_XOR, ret, ret);
1029 if (arg == (uint32_t)arg || type == TCG_TYPE_I32) {
1030 tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0);
1034 if (arg == (int32_t)arg) {
1035 tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret);
1040 /* Try a 7 byte pc-relative lea before the 10 byte movq. */
1041 diff = tcg_pcrel_diff(s, (const void *)arg) - 7;
1042 if (diff == (int32_t)diff) {
1043 tcg_out_opc(s, OPC_LEA | P_REXW, ret, 0, 0);
1044 tcg_out8(s, (LOWREGMASK(ret) << 3) | 5);
1049 tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0);
1053 static void tcg_out_movi(TCGContext *s, TCGType type,
1054 TCGReg ret, tcg_target_long arg)
1058 #if TCG_TARGET_REG_BITS == 64
1062 tcg_out_movi_int(s, type, ret, arg);
1064 tcg_out_movi_vec(s, type, ret, arg);
1068 g_assert_not_reached();
1072 static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
1074 if (val == (int8_t)val) {
1075 tcg_out_opc(s, OPC_PUSH_Ib, 0, 0, 0);
1077 } else if (val == (int32_t)val) {
1078 tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0);
1085 static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
1087 /* Given the strength of x86 memory ordering, we only need care for
1088 store-load ordering. Experimentally, "lock orl $0,0(%esp)" is
1089 faster than "mfence", so don't bother with the sse insn. */
1090 if (a0 & TCG_MO_ST_LD) {
1092 tcg_out_modrm_offset(s, OPC_ARITH_EvIb, ARITH_OR, TCG_REG_ESP, 0);
1097 static inline void tcg_out_push(TCGContext *s, int reg)
1099 tcg_out_opc(s, OPC_PUSH_r32 + LOWREGMASK(reg), 0, reg, 0);
1102 static inline void tcg_out_pop(TCGContext *s, int reg)
1104 tcg_out_opc(s, OPC_POP_r32 + LOWREGMASK(reg), 0, reg, 0);
1107 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1108 TCGReg arg1, intptr_t arg2)
1113 tcg_out_modrm_offset(s, OPC_MOVL_GvEv, ret, arg1, arg2);
1115 tcg_out_vex_modrm_offset(s, OPC_MOVD_VyEy, ret, 0, arg1, arg2);
1120 tcg_out_modrm_offset(s, OPC_MOVL_GvEv | P_REXW, ret, arg1, arg2);
1125 /* There is no instruction that can validate 8-byte alignment. */
1126 tcg_debug_assert(ret >= 16);
1127 tcg_out_vex_modrm_offset(s, OPC_MOVQ_VqWq, ret, 0, arg1, arg2);
1131 * The gvec infrastructure is asserts that v128 vector loads
1132 * and stores use a 16-byte aligned offset. Validate that the
1133 * final pointer is aligned by using an insn that will SIGSEGV.
1135 tcg_debug_assert(ret >= 16);
1136 tcg_out_vex_modrm_offset(s, OPC_MOVDQA_VxWx, ret, 0, arg1, arg2);
1140 * The gvec infrastructure only requires 16-byte alignment,
1141 * so here we must use an unaligned load.
1143 tcg_debug_assert(ret >= 16);
1144 tcg_out_vex_modrm_offset(s, OPC_MOVDQU_VxWx | P_VEXL,
1145 ret, 0, arg1, arg2);
1148 g_assert_not_reached();
1152 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1153 TCGReg arg1, intptr_t arg2)
1158 tcg_out_modrm_offset(s, OPC_MOVL_EvGv, arg, arg1, arg2);
1160 tcg_out_vex_modrm_offset(s, OPC_MOVD_EyVy, arg, 0, arg1, arg2);
1165 tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_REXW, arg, arg1, arg2);
1170 /* There is no instruction that can validate 8-byte alignment. */
1171 tcg_debug_assert(arg >= 16);
1172 tcg_out_vex_modrm_offset(s, OPC_MOVQ_WqVq, arg, 0, arg1, arg2);
1176 * The gvec infrastructure is asserts that v128 vector loads
1177 * and stores use a 16-byte aligned offset. Validate that the
1178 * final pointer is aligned by using an insn that will SIGSEGV.
1180 tcg_debug_assert(arg >= 16);
1181 tcg_out_vex_modrm_offset(s, OPC_MOVDQA_WxVx, arg, 0, arg1, arg2);
1185 * The gvec infrastructure only requires 16-byte alignment,
1186 * so here we must use an unaligned store.
1188 tcg_debug_assert(arg >= 16);
1189 tcg_out_vex_modrm_offset(s, OPC_MOVDQU_WxVx | P_VEXL,
1190 arg, 0, arg1, arg2);
1193 g_assert_not_reached();
1197 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1198 TCGReg base, intptr_t ofs)
1201 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64) {
1202 if (val != (int32_t)val) {
1206 } else if (type != TCG_TYPE_I32) {
1209 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | rexw, 0, base, ofs);
1214 static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
1216 /* Propagate an opcode prefix, such as P_DATA16. */
1217 int ext = subopc & ~0x7;
1221 tcg_out_modrm(s, OPC_SHIFT_1 + ext, subopc, reg);
1223 tcg_out_modrm(s, OPC_SHIFT_Ib + ext, subopc, reg);
1228 static inline void tcg_out_bswap32(TCGContext *s, int reg)
1230 tcg_out_opc(s, OPC_BSWAP + LOWREGMASK(reg), 0, reg, 0);
1233 static inline void tcg_out_rolw_8(TCGContext *s, int reg)
1235 tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8);
1238 static inline void tcg_out_ext8u(TCGContext *s, int dest, int src)
1241 tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
1242 tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src);
1245 static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw)
1248 tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
1249 tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src);
1252 static inline void tcg_out_ext16u(TCGContext *s, int dest, int src)
1255 tcg_out_modrm(s, OPC_MOVZWL, dest, src);
1258 static inline void tcg_out_ext16s(TCGContext *s, int dest, int src, int rexw)
1261 tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src);
1264 static inline void tcg_out_ext32u(TCGContext *s, int dest, int src)
1266 /* 32-bit mov zero extends. */
1267 tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src);
1270 static inline void tcg_out_ext32s(TCGContext *s, int dest, int src)
1272 tcg_out_modrm(s, OPC_MOVSLQ, dest, src);
1275 static inline void tcg_out_bswap64(TCGContext *s, int reg)
1277 tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0);
1280 static void tgen_arithi(TCGContext *s, int c, int r0,
1281 tcg_target_long val, int cf)
1285 if (TCG_TARGET_REG_BITS == 64) {
1290 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
1291 partial flags update stalls on Pentium4 and are not recommended
1292 by current Intel optimization manuals. */
1293 if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) {
1294 int is_inc = (c == ARITH_ADD) ^ (val < 0);
1295 if (TCG_TARGET_REG_BITS == 64) {
1296 /* The single-byte increment encodings are re-tasked as the
1297 REX prefixes. Use the MODRM encoding. */
1298 tcg_out_modrm(s, OPC_GRP5 + rexw,
1299 (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0);
1301 tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0);
1306 if (c == ARITH_AND) {
1307 if (TCG_TARGET_REG_BITS == 64) {
1308 if (val == 0xffffffffu) {
1309 tcg_out_ext32u(s, r0, r0);
1312 if (val == (uint32_t)val) {
1313 /* AND with no high bits set can use a 32-bit operation. */
1317 if (val == 0xffu && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) {
1318 tcg_out_ext8u(s, r0, r0);
1321 if (val == 0xffffu) {
1322 tcg_out_ext16u(s, r0, r0);
1327 if (val == (int8_t)val) {
1328 tcg_out_modrm(s, OPC_ARITH_EvIb + rexw, c, r0);
1332 if (rexw == 0 || val == (int32_t)val) {
1333 tcg_out_modrm(s, OPC_ARITH_EvIz + rexw, c, r0);
1341 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
1344 tgen_arithi(s, ARITH_ADD + P_REXW, reg, val, 0);
1348 /* Use SMALL != 0 to force a short forward branch. */
1349 static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, int small)
1354 val = tcg_pcrel_diff(s, l->u.value_ptr);
1356 if ((int8_t)val1 == val1) {
1358 tcg_out8(s, OPC_JMP_short);
1360 tcg_out8(s, OPC_JCC_short + opc);
1368 tcg_out8(s, OPC_JMP_long);
1369 tcg_out32(s, val - 5);
1371 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
1372 tcg_out32(s, val - 6);
1377 tcg_out8(s, OPC_JMP_short);
1379 tcg_out8(s, OPC_JCC_short + opc);
1381 tcg_out_reloc(s, s->code_ptr, R_386_PC8, l, -1);
1385 tcg_out8(s, OPC_JMP_long);
1387 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
1389 tcg_out_reloc(s, s->code_ptr, R_386_PC32, l, -4);
1394 static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
1395 int const_arg2, int rexw)
1400 tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1);
1402 tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0);
1405 tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2);
1409 static void tcg_out_brcond32(TCGContext *s, TCGCond cond,
1410 TCGArg arg1, TCGArg arg2, int const_arg2,
1411 TCGLabel *label, int small)
1413 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
1414 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
1417 #if TCG_TARGET_REG_BITS == 64
1418 static void tcg_out_brcond64(TCGContext *s, TCGCond cond,
1419 TCGArg arg1, TCGArg arg2, int const_arg2,
1420 TCGLabel *label, int small)
1422 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
1423 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
1426 /* XXX: we implement it at the target level to avoid having to
1427 handle cross basic blocks temporaries */
1428 static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
1429 const int *const_args, int small)
1431 TCGLabel *label_next = gen_new_label();
1432 TCGLabel *label_this = arg_label(args[5]);
1436 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
1438 tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3],
1442 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
1444 tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3],
1448 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
1450 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1451 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
1455 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
1457 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1458 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
1462 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
1464 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1465 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
1469 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
1471 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1472 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
1476 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
1478 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1479 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
1483 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
1485 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1486 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
1490 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
1492 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1493 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
1497 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
1499 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1500 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
1506 tcg_out_label(s, label_next);
1510 static void tcg_out_setcond32(TCGContext *s, TCGCond cond, TCGArg dest,
1511 TCGArg arg1, TCGArg arg2, int const_arg2)
1513 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
1514 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
1515 tcg_out_ext8u(s, dest, dest);
1518 #if TCG_TARGET_REG_BITS == 64
1519 static void tcg_out_setcond64(TCGContext *s, TCGCond cond, TCGArg dest,
1520 TCGArg arg1, TCGArg arg2, int const_arg2)
1522 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
1523 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
1524 tcg_out_ext8u(s, dest, dest);
1527 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
1528 const int *const_args)
1531 TCGLabel *label_true, *label_over;
1533 memcpy(new_args, args+1, 5*sizeof(TCGArg));
1535 if (args[0] == args[1] || args[0] == args[2]
1536 || (!const_args[3] && args[0] == args[3])
1537 || (!const_args[4] && args[0] == args[4])) {
1538 /* When the destination overlaps with one of the argument
1539 registers, don't do anything tricky. */
1540 label_true = gen_new_label();
1541 label_over = gen_new_label();
1543 new_args[5] = label_arg(label_true);
1544 tcg_out_brcond2(s, new_args, const_args+1, 1);
1546 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
1547 tcg_out_jxx(s, JCC_JMP, label_over, 1);
1548 tcg_out_label(s, label_true);
1550 tcg_out_movi(s, TCG_TYPE_I32, args[0], 1);
1551 tcg_out_label(s, label_over);
1553 /* When the destination does not overlap one of the arguments,
1554 clear the destination first, jump if cond false, and emit an
1555 increment in the true case. This results in smaller code. */
1557 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
1559 label_over = gen_new_label();
1560 new_args[4] = tcg_invert_cond(new_args[4]);
1561 new_args[5] = label_arg(label_over);
1562 tcg_out_brcond2(s, new_args, const_args+1, 1);
1564 tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
1565 tcg_out_label(s, label_over);
1570 static void tcg_out_cmov(TCGContext *s, TCGCond cond, int rexw,
1571 TCGReg dest, TCGReg v1)
1574 tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | rexw, dest, v1);
1576 TCGLabel *over = gen_new_label();
1577 tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1);
1578 tcg_out_mov(s, TCG_TYPE_I32, dest, v1);
1579 tcg_out_label(s, over);
1583 static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGReg dest,
1584 TCGReg c1, TCGArg c2, int const_c2,
1587 tcg_out_cmp(s, c1, c2, const_c2, 0);
1588 tcg_out_cmov(s, cond, 0, dest, v1);
1591 #if TCG_TARGET_REG_BITS == 64
1592 static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGReg dest,
1593 TCGReg c1, TCGArg c2, int const_c2,
1596 tcg_out_cmp(s, c1, c2, const_c2, P_REXW);
1597 tcg_out_cmov(s, cond, P_REXW, dest, v1);
1601 static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
1602 TCGArg arg2, bool const_a2)
1605 tcg_out_modrm(s, OPC_TZCNT + rexw, dest, arg1);
1607 tcg_debug_assert(arg2 == (rexw ? 64 : 32));
1609 tcg_debug_assert(dest != arg2);
1610 tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2);
1613 tcg_debug_assert(dest != arg2);
1614 tcg_out_modrm(s, OPC_BSF + rexw, dest, arg1);
1615 tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2);
1619 static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
1620 TCGArg arg2, bool const_a2)
1623 tcg_out_modrm(s, OPC_LZCNT + rexw, dest, arg1);
1625 tcg_debug_assert(arg2 == (rexw ? 64 : 32));
1627 tcg_debug_assert(dest != arg2);
1628 tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2);
1631 tcg_debug_assert(!const_a2);
1632 tcg_debug_assert(dest != arg1);
1633 tcg_debug_assert(dest != arg2);
1635 /* Recall that the output of BSR is the index not the count. */
1636 tcg_out_modrm(s, OPC_BSR + rexw, dest, arg1);
1637 tgen_arithi(s, ARITH_XOR + rexw, dest, rexw ? 63 : 31, 0);
1639 /* Since we have destroyed the flags from BSR, we have to re-test. */
1640 tcg_out_cmp(s, arg1, 0, 1, rexw);
1641 tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2);
1645 static void tcg_out_branch(TCGContext *s, int call, const tcg_insn_unit *dest)
1647 intptr_t disp = tcg_pcrel_diff(s, dest) - 5;
1649 if (disp == (int32_t)disp) {
1650 tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0);
1653 /* rip-relative addressing into the constant pool.
1654 This is 6 + 8 = 14 bytes, as compared to using an
1655 immediate load 10 + 6 = 16 bytes, plus we may
1656 be able to re-use the pool constant for more calls. */
1657 tcg_out_opc(s, OPC_GRP5, 0, 0, 0);
1658 tcg_out8(s, (call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev) << 3 | 5);
1659 new_pool_label(s, (uintptr_t)dest, R_386_PC32, s->code_ptr, -4);
1664 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
1665 const TCGHelperInfo *info)
1667 tcg_out_branch(s, 1, dest);
1670 static void tcg_out_jmp(TCGContext *s, const tcg_insn_unit *dest)
1672 tcg_out_branch(s, 0, dest);
1675 static void tcg_out_nopn(TCGContext *s, int n)
1678 /* Emit 1 or 2 operand size prefixes for the standard one byte nop,
1679 * "xchg %eax,%eax", forming "xchg %ax,%ax". All cores accept the
1680 * duplicate prefix, and all of the interesting recent cores can
1681 * decode and discard the duplicates in a single cycle.
1683 tcg_debug_assert(n >= 1);
1684 for (i = 1; i < n; ++i) {
1690 #if defined(CONFIG_SOFTMMU)
1691 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1692 * int mmu_idx, uintptr_t ra)
1694 static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
1695 [MO_UB] = helper_ret_ldub_mmu,
1696 [MO_LEUW] = helper_le_lduw_mmu,
1697 [MO_LEUL] = helper_le_ldul_mmu,
1698 [MO_LEUQ] = helper_le_ldq_mmu,
1699 [MO_BEUW] = helper_be_lduw_mmu,
1700 [MO_BEUL] = helper_be_ldul_mmu,
1701 [MO_BEUQ] = helper_be_ldq_mmu,
1704 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1705 * uintxx_t val, int mmu_idx, uintptr_t ra)
1707 static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
1708 [MO_UB] = helper_ret_stb_mmu,
1709 [MO_LEUW] = helper_le_stw_mmu,
1710 [MO_LEUL] = helper_le_stl_mmu,
1711 [MO_LEUQ] = helper_le_stq_mmu,
1712 [MO_BEUW] = helper_be_stw_mmu,
1713 [MO_BEUL] = helper_be_stl_mmu,
1714 [MO_BEUQ] = helper_be_stq_mmu,
1717 /* Perform the TLB load and compare.
1720 ADDRLO and ADDRHI contain the low and high part of the address.
1722 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1724 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1725 This should be offsetof addr_read or addr_write.
1728 LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses)
1729 positions of the displacements of forward jumps to the TLB miss case.
1731 Second argument register is loaded with the low part of the address.
1732 In the TLB hit case, it has been adjusted as indicated by the TLB
1733 and so is a host address. In the TLB miss case, it continues to
1734 hold a guest address.
1736 First argument register is clobbered. */
1738 static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1739 int mem_index, MemOp opc,
1740 tcg_insn_unit **label_ptr, int which)
1742 const TCGReg r0 = TCG_REG_L0;
1743 const TCGReg r1 = TCG_REG_L1;
1744 TCGType ttype = TCG_TYPE_I32;
1745 TCGType tlbtype = TCG_TYPE_I32;
1746 int trexw = 0, hrexw = 0, tlbrexw = 0;
1747 unsigned a_bits = get_alignment_bits(opc);
1748 unsigned s_bits = opc & MO_SIZE;
1749 unsigned a_mask = (1 << a_bits) - 1;
1750 unsigned s_mask = (1 << s_bits) - 1;
1751 target_ulong tlb_mask;
1753 if (TCG_TARGET_REG_BITS == 64) {
1754 if (TARGET_LONG_BITS == 64) {
1755 ttype = TCG_TYPE_I64;
1758 if (TCG_TYPE_PTR == TCG_TYPE_I64) {
1760 if (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32) {
1761 tlbtype = TCG_TYPE_I64;
1767 tcg_out_mov(s, tlbtype, r0, addrlo);
1768 tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0,
1769 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1771 tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, r0, TCG_AREG0,
1772 TLB_MASK_TABLE_OFS(mem_index) +
1773 offsetof(CPUTLBDescFast, mask));
1775 tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r0, TCG_AREG0,
1776 TLB_MASK_TABLE_OFS(mem_index) +
1777 offsetof(CPUTLBDescFast, table));
1779 /* If the required alignment is at least as large as the access, simply
1780 copy the address and mask. For lesser alignments, check that we don't
1781 cross pages for the complete access. */
1782 if (a_bits >= s_bits) {
1783 tcg_out_mov(s, ttype, r1, addrlo);
1785 tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask - a_mask);
1787 tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask;
1788 tgen_arithi(s, ARITH_AND + trexw, r1, tlb_mask, 0);
1791 tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, which);
1793 /* Prepare for both the fast path add of the tlb addend, and the slow
1794 path function argument setup. */
1795 tcg_out_mov(s, ttype, r1, addrlo);
1798 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1799 label_ptr[0] = s->code_ptr;
1802 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1803 /* cmp 4(r0), addrhi */
1804 tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, which + 4);
1807 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1808 label_ptr[1] = s->code_ptr;
1814 /* add addend(r0), r1 */
1815 tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0,
1816 offsetof(CPUTLBEntry, addend));
1820 * Record the context of a call to the out of line helper code for the slow path
1821 * for a load or store, so that we can later generate the correct helper code
1823 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
1825 TCGReg datalo, TCGReg datahi,
1826 TCGReg addrlo, TCGReg addrhi,
1827 tcg_insn_unit *raddr,
1828 tcg_insn_unit **label_ptr)
1830 TCGLabelQemuLdst *label = new_ldst_label(s);
1832 label->is_ld = is_ld;
1834 label->type = is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
1835 label->datalo_reg = datalo;
1836 label->datahi_reg = datahi;
1837 label->addrlo_reg = addrlo;
1838 label->addrhi_reg = addrhi;
1839 label->raddr = tcg_splitwx_to_rx(raddr);
1840 label->label_ptr[0] = label_ptr[0];
1841 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1842 label->label_ptr[1] = label_ptr[1];
1847 * Generate code for the slow path for a load at the end of block
1849 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1851 MemOpIdx oi = l->oi;
1852 MemOp opc = get_memop(oi);
1854 tcg_insn_unit **label_ptr = &l->label_ptr[0];
1855 int rexw = (l->type == TCG_TYPE_I64 ? P_REXW : 0);
1857 /* resolve label address */
1858 tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
1859 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1860 tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
1863 if (TCG_TARGET_REG_BITS == 32) {
1866 tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
1869 tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
1872 if (TARGET_LONG_BITS == 64) {
1873 tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
1877 tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs);
1880 tcg_out_sti(s, TCG_TYPE_PTR, (uintptr_t)l->raddr, TCG_REG_ESP, ofs);
1882 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1883 /* The second argument is already loaded with addrlo. */
1884 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], oi);
1885 tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
1886 (uintptr_t)l->raddr);
1889 tcg_out_branch(s, 1, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1891 data_reg = l->datalo_reg;
1892 switch (opc & MO_SSIZE) {
1894 tcg_out_ext8s(s, data_reg, TCG_REG_EAX, rexw);
1897 tcg_out_ext16s(s, data_reg, TCG_REG_EAX, rexw);
1899 #if TCG_TARGET_REG_BITS == 64
1901 tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
1906 /* Note that the helpers have zero-extended to tcg_target_long. */
1908 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1911 if (TCG_TARGET_REG_BITS == 64) {
1912 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
1913 } else if (data_reg == TCG_REG_EDX) {
1914 /* xchg %edx, %eax */
1915 tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
1916 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX);
1918 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1919 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX);
1926 /* Jump to the code corresponding to next IR of qemu_st */
1927 tcg_out_jmp(s, l->raddr);
1932 * Generate code for the slow path for a store at the end of block
1934 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1936 MemOpIdx oi = l->oi;
1937 MemOp opc = get_memop(oi);
1938 MemOp s_bits = opc & MO_SIZE;
1939 tcg_insn_unit **label_ptr = &l->label_ptr[0];
1942 /* resolve label address */
1943 tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
1944 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1945 tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
1948 if (TCG_TARGET_REG_BITS == 32) {
1951 tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
1954 tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
1957 if (TARGET_LONG_BITS == 64) {
1958 tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
1962 tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs);
1965 if (s_bits == MO_64) {
1966 tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs);
1970 tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs);
1973 retaddr = TCG_REG_EAX;
1974 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1975 tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, ofs);
1977 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1978 /* The second argument is already loaded with addrlo. */
1979 tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
1980 tcg_target_call_iarg_regs[2], l->datalo_reg);
1981 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], oi);
1983 if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) {
1984 retaddr = tcg_target_call_iarg_regs[4];
1985 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1987 retaddr = TCG_REG_RAX;
1988 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1989 tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP,
1990 TCG_TARGET_CALL_STACK_OFFSET);
1994 /* "Tail call" to the helper, with the return address back inline. */
1995 tcg_out_push(s, retaddr);
1996 tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
2001 static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
2002 TCGReg addrhi, unsigned a_bits)
2004 unsigned a_mask = (1 << a_bits) - 1;
2005 TCGLabelQemuLdst *label;
2008 * We are expecting a_bits to max out at 7, so we can usually use testb.
2009 * For i686, we have to use testl for %esi/%edi.
2011 if (a_mask <= 0xff && (TCG_TARGET_REG_BITS == 64 || addrlo < 4)) {
2012 tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, addrlo);
2013 tcg_out8(s, a_mask);
2015 tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, addrlo);
2016 tcg_out32(s, a_mask);
2020 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
2022 label = new_ldst_label(s);
2023 label->is_ld = is_ld;
2024 label->addrlo_reg = addrlo;
2025 label->addrhi_reg = addrhi;
2026 label->raddr = tcg_splitwx_to_rx(s->code_ptr + 4);
2027 label->label_ptr[0] = s->code_ptr;
2032 static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
2034 /* resolve label address */
2035 tcg_patch32(l->label_ptr[0], s->code_ptr - l->label_ptr[0] - 4);
2037 if (TCG_TARGET_REG_BITS == 32) {
2040 tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
2043 tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
2045 if (TARGET_LONG_BITS == 64) {
2046 tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
2050 tcg_out_pushi(s, (uintptr_t)l->raddr);
2052 tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
2054 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
2056 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RAX, (uintptr_t)l->raddr);
2057 tcg_out_push(s, TCG_REG_RAX);
2060 /* "Tail call" to the helper, with the return address back inline. */
2061 tcg_out_jmp(s, (const void *)(l->is_ld ? helper_unaligned_ld
2062 : helper_unaligned_st));
2066 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
2068 return tcg_out_fail_alignment(s, l);
2071 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
2073 return tcg_out_fail_alignment(s, l);
2076 #if TCG_TARGET_REG_BITS == 32
2077 # define x86_guest_base_seg 0
2078 # define x86_guest_base_index -1
2079 # define x86_guest_base_offset guest_base
2081 static int x86_guest_base_seg;
2082 static int x86_guest_base_index = -1;
2083 static int32_t x86_guest_base_offset;
2084 # if defined(__x86_64__) && defined(__linux__)
2085 # include <asm/prctl.h>
2086 # include <sys/prctl.h>
2087 int arch_prctl(int code, unsigned long addr);
2088 static inline int setup_guest_base_seg(void)
2090 if (arch_prctl(ARCH_SET_GS, guest_base) == 0) {
2095 # elif defined (__FreeBSD__) || defined (__FreeBSD_kernel__)
2096 # include <machine/sysarch.h>
2097 static inline int setup_guest_base_seg(void)
2099 if (sysarch(AMD64_SET_GSBASE, &guest_base) == 0) {
2105 static inline int setup_guest_base_seg(void)
2111 #endif /* SOFTMMU */
2113 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
2114 TCGReg base, int index, intptr_t ofs,
2115 int seg, bool is64, MemOp memop)
2117 bool use_movbe = false;
2118 int rexw = is64 * P_REXW;
2119 int movop = OPC_MOVL_GvEv;
2121 /* Do big-endian loads with movbe. */
2122 if (memop & MO_BSWAP) {
2123 tcg_debug_assert(have_movbe);
2125 movop = OPC_MOVBE_GyMy;
2128 switch (memop & MO_SSIZE) {
2130 tcg_out_modrm_sib_offset(s, OPC_MOVZBL + seg, datalo,
2131 base, index, 0, ofs);
2134 tcg_out_modrm_sib_offset(s, OPC_MOVSBL + rexw + seg, datalo,
2135 base, index, 0, ofs);
2139 /* There is no extending movbe; only low 16-bits are modified. */
2140 if (datalo != base && datalo != index) {
2141 /* XOR breaks dependency chains. */
2142 tgen_arithr(s, ARITH_XOR, datalo, datalo);
2143 tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
2144 datalo, base, index, 0, ofs);
2146 tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
2147 datalo, base, index, 0, ofs);
2148 tcg_out_ext16u(s, datalo, datalo);
2151 tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo,
2152 base, index, 0, ofs);
2157 tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
2158 datalo, base, index, 0, ofs);
2159 tcg_out_ext16s(s, datalo, datalo, rexw);
2161 tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + seg,
2162 datalo, base, index, 0, ofs);
2166 tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs);
2168 #if TCG_TARGET_REG_BITS == 64
2171 tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + seg, datalo,
2172 base, index, 0, ofs);
2173 tcg_out_ext32s(s, datalo, datalo);
2175 tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + seg, datalo,
2176 base, index, 0, ofs);
2181 if (TCG_TARGET_REG_BITS == 64) {
2182 tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
2183 base, index, 0, ofs);
2190 if (base != datalo) {
2191 tcg_out_modrm_sib_offset(s, movop + seg, datalo,
2192 base, index, 0, ofs);
2193 tcg_out_modrm_sib_offset(s, movop + seg, datahi,
2194 base, index, 0, ofs + 4);
2196 tcg_out_modrm_sib_offset(s, movop + seg, datahi,
2197 base, index, 0, ofs + 4);
2198 tcg_out_modrm_sib_offset(s, movop + seg, datalo,
2199 base, index, 0, ofs);
2204 g_assert_not_reached();
2208 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
2209 EAX. It will be useful once fixed registers globals are less
2211 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
2213 TCGReg datalo, datahi, addrlo;
2214 TCGReg addrhi __attribute__((unused));
2217 #if defined(CONFIG_SOFTMMU)
2219 tcg_insn_unit *label_ptr[2];
2225 datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
2227 addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
2229 opc = get_memop(oi);
2231 #if defined(CONFIG_SOFTMMU)
2232 mem_index = get_mmuidx(oi);
2234 tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
2235 label_ptr, offsetof(CPUTLBEntry, addr_read));
2238 tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, is64, opc);
2240 /* Record the current context of a load into ldst label */
2241 add_qemu_ldst_label(s, true, is64, oi, datalo, datahi, addrlo, addrhi,
2242 s->code_ptr, label_ptr);
2244 a_bits = get_alignment_bits(opc);
2246 tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
2249 tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
2250 x86_guest_base_offset, x86_guest_base_seg,
2255 static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
2256 TCGReg base, int index, intptr_t ofs,
2257 int seg, MemOp memop)
2259 bool use_movbe = false;
2260 int movop = OPC_MOVL_EvGv;
2263 * Do big-endian stores with movbe or softmmu.
2264 * User-only without movbe will have its swapping done generically.
2266 if (memop & MO_BSWAP) {
2267 tcg_debug_assert(have_movbe);
2269 movop = OPC_MOVBE_MyGy;
2272 switch (memop & MO_SIZE) {
2274 /* This is handled with constraints on INDEX_op_qemu_st8_i32. */
2275 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || datalo < 4);
2276 tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg,
2277 datalo, base, index, 0, ofs);
2280 tcg_out_modrm_sib_offset(s, movop + P_DATA16 + seg, datalo,
2281 base, index, 0, ofs);
2284 tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs);
2287 if (TCG_TARGET_REG_BITS == 64) {
2288 tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
2289 base, index, 0, ofs);
2296 tcg_out_modrm_sib_offset(s, movop + seg, datalo,
2297 base, index, 0, ofs);
2298 tcg_out_modrm_sib_offset(s, movop + seg, datahi,
2299 base, index, 0, ofs + 4);
2303 g_assert_not_reached();
2307 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
2309 TCGReg datalo, datahi, addrlo;
2310 TCGReg addrhi __attribute__((unused));
2313 #if defined(CONFIG_SOFTMMU)
2315 tcg_insn_unit *label_ptr[2];
2321 datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
2323 addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
2325 opc = get_memop(oi);
2327 #if defined(CONFIG_SOFTMMU)
2328 mem_index = get_mmuidx(oi);
2330 tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
2331 label_ptr, offsetof(CPUTLBEntry, addr_write));
2334 tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc);
2336 /* Record the current context of a store into ldst label */
2337 add_qemu_ldst_label(s, false, is64, oi, datalo, datahi, addrlo, addrhi,
2338 s->code_ptr, label_ptr);
2340 a_bits = get_alignment_bits(opc);
2342 tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
2345 tcg_out_qemu_st_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
2346 x86_guest_base_offset, x86_guest_base_seg, opc);
2350 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2351 const TCGArg args[TCG_MAX_OP_ARGS],
2352 const int const_args[TCG_MAX_OP_ARGS])
2355 int c, const_a2, vexop, rexw = 0;
2357 #if TCG_TARGET_REG_BITS == 64
2358 # define OP_32_64(x) \
2359 case glue(glue(INDEX_op_, x), _i64): \
2360 rexw = P_REXW; /* FALLTHRU */ \
2361 case glue(glue(INDEX_op_, x), _i32)
2363 # define OP_32_64(x) \
2364 case glue(glue(INDEX_op_, x), _i32)
2367 /* Hoist the loads of the most common arguments. */
2371 const_a2 = const_args[2];
2374 case INDEX_op_exit_tb:
2375 /* Reuse the zeroing that exists for goto_ptr. */
2377 tcg_out_jmp(s, tcg_code_gen_epilogue);
2379 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, a0);
2380 tcg_out_jmp(s, tb_ret_addr);
2383 case INDEX_op_goto_tb:
2384 if (s->tb_jmp_insn_offset) {
2385 /* direct jump method */
2387 /* jump displacement must be aligned for atomic patching;
2388 * see if we need to add extra nops before jump
2390 gap = QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4) - s->code_ptr;
2392 tcg_out_nopn(s, gap - 1);
2394 tcg_out8(s, OPC_JMP_long); /* jmp im */
2395 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
2398 /* indirect jump method */
2399 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
2400 (intptr_t)(s->tb_jmp_target_addr + a0));
2402 set_jmp_reset_offset(s, a0);
2404 case INDEX_op_goto_ptr:
2405 /* jmp to the given host address (could be epilogue) */
2406 tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0);
2409 tcg_out_jxx(s, JCC_JMP, arg_label(a0), 0);
2412 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
2413 tcg_out_modrm_offset(s, OPC_MOVZBL, a0, a1, a2);
2416 tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, a0, a1, a2);
2419 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
2420 tcg_out_modrm_offset(s, OPC_MOVZWL, a0, a1, a2);
2423 tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, a0, a1, a2);
2425 #if TCG_TARGET_REG_BITS == 64
2426 case INDEX_op_ld32u_i64:
2428 case INDEX_op_ld_i32:
2429 tcg_out_ld(s, TCG_TYPE_I32, a0, a1, a2);
2433 if (const_args[0]) {
2434 tcg_out_modrm_offset(s, OPC_MOVB_EvIz, 0, a1, a2);
2437 tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R, a0, a1, a2);
2441 if (const_args[0]) {
2442 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16, 0, a1, a2);
2445 tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16, a0, a1, a2);
2448 #if TCG_TARGET_REG_BITS == 64
2449 case INDEX_op_st32_i64:
2451 case INDEX_op_st_i32:
2452 if (const_args[0]) {
2453 tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, a1, a2);
2456 tcg_out_st(s, TCG_TYPE_I32, a0, a1, a2);
2461 /* For 3-operand addition, use LEA. */
2466 } else if (a0 == a2) {
2467 /* Watch out for dest = src + dest, since we've removed
2468 the matching constraint on the add. */
2469 tgen_arithr(s, ARITH_ADD + rexw, a0, a1);
2473 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3);
2492 tgen_arithi(s, c + rexw, a0, a2, 0);
2494 tgen_arithr(s, c + rexw, a0, a2);
2500 tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1);
2501 tgen_arithi(s, ARITH_AND + rexw, a0, ~a2, 0);
2503 tcg_out_vex_modrm(s, OPC_ANDN + rexw, a0, a2, a1);
2511 if (val == (int8_t)val) {
2512 tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, a0, a0);
2515 tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, a0, a0);
2519 tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, a0, a2);
2524 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]);
2527 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]);
2531 /* For small constant 3-operand shift, use LEA. */
2532 if (const_a2 && a0 != a1 && (a2 - 1) < 3) {
2534 /* shl $1,a1,a0 -> lea (a1,a1),a0 */
2535 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a1, 0, 0);
2537 /* shl $n,a1,a0 -> lea 0(,a1,n),a0 */
2538 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, -1, a1, a2, 0);
2544 goto gen_shift_maybe_vex;
2548 goto gen_shift_maybe_vex;
2552 goto gen_shift_maybe_vex;
2559 gen_shift_maybe_vex:
2562 tcg_out_vex_modrm(s, vexop + rexw, a0, a2, a1);
2565 tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1);
2570 tcg_out_shifti(s, c + rexw, a0, a2);
2572 tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, a0);
2577 tcg_out_ctz(s, rexw, args[0], args[1], args[2], const_args[2]);
2580 tcg_out_clz(s, rexw, args[0], args[1], args[2], const_args[2]);
2583 tcg_out_modrm(s, OPC_POPCNT + rexw, a0, a1);
2586 case INDEX_op_brcond_i32:
2587 tcg_out_brcond32(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0);
2589 case INDEX_op_setcond_i32:
2590 tcg_out_setcond32(s, args[3], a0, a1, a2, const_a2);
2592 case INDEX_op_movcond_i32:
2593 tcg_out_movcond32(s, args[5], a0, a1, a2, const_a2, args[3]);
2597 if (a2 & TCG_BSWAP_OS) {
2598 /* Output must be sign-extended. */
2600 tcg_out_bswap64(s, a0);
2601 tcg_out_shifti(s, SHIFT_SAR + rexw, a0, 48);
2603 tcg_out_bswap32(s, a0);
2604 tcg_out_shifti(s, SHIFT_SAR, a0, 16);
2606 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
2607 /* Output must be zero-extended, but input isn't. */
2608 tcg_out_bswap32(s, a0);
2609 tcg_out_shifti(s, SHIFT_SHR, a0, 16);
2611 tcg_out_rolw_8(s, a0);
2615 tcg_out_bswap32(s, a0);
2616 if (rexw && (a2 & TCG_BSWAP_OS)) {
2617 tcg_out_ext32s(s, a0, a0);
2622 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, a0);
2625 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0);
2629 tcg_out_ext8s(s, a0, a1, rexw);
2632 tcg_out_ext16s(s, a0, a1, rexw);
2635 tcg_out_ext8u(s, a0, a1);
2638 tcg_out_ext16u(s, a0, a1);
2641 case INDEX_op_qemu_ld_i32:
2642 tcg_out_qemu_ld(s, args, 0);
2644 case INDEX_op_qemu_ld_i64:
2645 tcg_out_qemu_ld(s, args, 1);
2647 case INDEX_op_qemu_st_i32:
2648 case INDEX_op_qemu_st8_i32:
2649 tcg_out_qemu_st(s, args, 0);
2651 case INDEX_op_qemu_st_i64:
2652 tcg_out_qemu_st(s, args, 1);
2656 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]);
2659 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, args[3]);
2662 if (const_args[4]) {
2663 tgen_arithi(s, ARITH_ADD + rexw, a0, args[4], 1);
2665 tgen_arithr(s, ARITH_ADD + rexw, a0, args[4]);
2667 if (const_args[5]) {
2668 tgen_arithi(s, ARITH_ADC + rexw, a1, args[5], 1);
2670 tgen_arithr(s, ARITH_ADC + rexw, a1, args[5]);
2674 if (const_args[4]) {
2675 tgen_arithi(s, ARITH_SUB + rexw, a0, args[4], 1);
2677 tgen_arithr(s, ARITH_SUB + rexw, a0, args[4]);
2679 if (const_args[5]) {
2680 tgen_arithi(s, ARITH_SBB + rexw, a1, args[5], 1);
2682 tgen_arithr(s, ARITH_SBB + rexw, a1, args[5]);
2686 #if TCG_TARGET_REG_BITS == 32
2687 case INDEX_op_brcond2_i32:
2688 tcg_out_brcond2(s, args, const_args, 0);
2690 case INDEX_op_setcond2_i32:
2691 tcg_out_setcond2(s, args, const_args);
2693 #else /* TCG_TARGET_REG_BITS == 64 */
2694 case INDEX_op_ld32s_i64:
2695 tcg_out_modrm_offset(s, OPC_MOVSLQ, a0, a1, a2);
2697 case INDEX_op_ld_i64:
2698 tcg_out_ld(s, TCG_TYPE_I64, a0, a1, a2);
2700 case INDEX_op_st_i64:
2701 if (const_args[0]) {
2702 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW, 0, a1, a2);
2705 tcg_out_st(s, TCG_TYPE_I64, a0, a1, a2);
2709 case INDEX_op_brcond_i64:
2710 tcg_out_brcond64(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0);
2712 case INDEX_op_setcond_i64:
2713 tcg_out_setcond64(s, args[3], a0, a1, a2, const_a2);
2715 case INDEX_op_movcond_i64:
2716 tcg_out_movcond64(s, args[5], a0, a1, a2, const_a2, args[3]);
2719 case INDEX_op_bswap64_i64:
2720 tcg_out_bswap64(s, a0);
2722 case INDEX_op_extu_i32_i64:
2723 case INDEX_op_ext32u_i64:
2724 case INDEX_op_extrl_i64_i32:
2725 tcg_out_ext32u(s, a0, a1);
2727 case INDEX_op_ext_i32_i64:
2728 case INDEX_op_ext32s_i64:
2729 tcg_out_ext32s(s, a0, a1);
2731 case INDEX_op_extrh_i64_i32:
2732 tcg_out_shifti(s, SHIFT_SHR + P_REXW, a0, 32);
2737 if (args[3] == 0 && args[4] == 8) {
2738 /* load bits 0..7 */
2739 tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, a2, a0);
2740 } else if (args[3] == 8 && args[4] == 8) {
2741 /* load bits 8..15 */
2742 tcg_out_modrm(s, OPC_MOVB_EvGv, a2, a0 + 4);
2743 } else if (args[3] == 0 && args[4] == 16) {
2744 /* load bits 0..15 */
2745 tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, a2, a0);
2751 case INDEX_op_extract_i64:
2752 if (a2 + args[3] == 32) {
2753 /* This is a 32-bit zero-extending right shift. */
2754 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2755 tcg_out_shifti(s, SHIFT_SHR, a0, a2);
2759 case INDEX_op_extract_i32:
2760 /* On the off-chance that we can use the high-byte registers.
2761 Otherwise we emit the same ext16 + shift pattern that we
2762 would have gotten from the normal tcg-op.c expansion. */
2763 tcg_debug_assert(a2 == 8 && args[3] == 8);
2764 if (a1 < 4 && a0 < 8) {
2765 tcg_out_modrm(s, OPC_MOVZBL, a0, a1 + 4);
2767 tcg_out_ext16u(s, a0, a1);
2768 tcg_out_shifti(s, SHIFT_SHR, a0, 8);
2772 case INDEX_op_sextract_i32:
2773 /* We don't implement sextract_i64, as we cannot sign-extend to
2774 64-bits without using the REX prefix that explicitly excludes
2775 access to the high-byte registers. */
2776 tcg_debug_assert(a2 == 8 && args[3] == 8);
2777 if (a1 < 4 && a0 < 8) {
2778 tcg_out_modrm(s, OPC_MOVSBL, a0, a1 + 4);
2780 tcg_out_ext16s(s, a0, a1, 0);
2781 tcg_out_shifti(s, SHIFT_SAR, a0, 8);
2786 /* Note that SHRD outputs to the r/m operand. */
2787 tcg_out_modrm(s, OPC_SHRD_Ib + rexw, a2, a0);
2788 tcg_out8(s, args[3]);
2794 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2795 case INDEX_op_mov_i64:
2796 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2804 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2805 unsigned vecl, unsigned vece,
2806 const TCGArg args[TCG_MAX_OP_ARGS],
2807 const int const_args[TCG_MAX_OP_ARGS])
2809 static int const add_insn[4] = {
2810 OPC_PADDB, OPC_PADDW, OPC_PADDD, OPC_PADDQ
2812 static int const ssadd_insn[4] = {
2813 OPC_PADDSB, OPC_PADDSW, OPC_UD2, OPC_UD2
2815 static int const usadd_insn[4] = {
2816 OPC_PADDUB, OPC_PADDUW, OPC_UD2, OPC_UD2
2818 static int const sub_insn[4] = {
2819 OPC_PSUBB, OPC_PSUBW, OPC_PSUBD, OPC_PSUBQ
2821 static int const sssub_insn[4] = {
2822 OPC_PSUBSB, OPC_PSUBSW, OPC_UD2, OPC_UD2
2824 static int const ussub_insn[4] = {
2825 OPC_PSUBUB, OPC_PSUBUW, OPC_UD2, OPC_UD2
2827 static int const mul_insn[4] = {
2828 OPC_UD2, OPC_PMULLW, OPC_PMULLD, OPC_VPMULLQ
2830 static int const shift_imm_insn[4] = {
2831 OPC_UD2, OPC_PSHIFTW_Ib, OPC_PSHIFTD_Ib, OPC_PSHIFTQ_Ib
2833 static int const cmpeq_insn[4] = {
2834 OPC_PCMPEQB, OPC_PCMPEQW, OPC_PCMPEQD, OPC_PCMPEQQ
2836 static int const cmpgt_insn[4] = {
2837 OPC_PCMPGTB, OPC_PCMPGTW, OPC_PCMPGTD, OPC_PCMPGTQ
2839 static int const punpckl_insn[4] = {
2840 OPC_PUNPCKLBW, OPC_PUNPCKLWD, OPC_PUNPCKLDQ, OPC_PUNPCKLQDQ
2842 static int const punpckh_insn[4] = {
2843 OPC_PUNPCKHBW, OPC_PUNPCKHWD, OPC_PUNPCKHDQ, OPC_PUNPCKHQDQ
2845 static int const packss_insn[4] = {
2846 OPC_PACKSSWB, OPC_PACKSSDW, OPC_UD2, OPC_UD2
2848 static int const packus_insn[4] = {
2849 OPC_PACKUSWB, OPC_PACKUSDW, OPC_UD2, OPC_UD2
2851 static int const smin_insn[4] = {
2852 OPC_PMINSB, OPC_PMINSW, OPC_PMINSD, OPC_VPMINSQ
2854 static int const smax_insn[4] = {
2855 OPC_PMAXSB, OPC_PMAXSW, OPC_PMAXSD, OPC_VPMAXSQ
2857 static int const umin_insn[4] = {
2858 OPC_PMINUB, OPC_PMINUW, OPC_PMINUD, OPC_VPMINUQ
2860 static int const umax_insn[4] = {
2861 OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_VPMAXUQ
2863 static int const rotlv_insn[4] = {
2864 OPC_UD2, OPC_UD2, OPC_VPROLVD, OPC_VPROLVQ
2866 static int const rotrv_insn[4] = {
2867 OPC_UD2, OPC_UD2, OPC_VPRORVD, OPC_VPRORVQ
2869 static int const shlv_insn[4] = {
2870 OPC_UD2, OPC_VPSLLVW, OPC_VPSLLVD, OPC_VPSLLVQ
2872 static int const shrv_insn[4] = {
2873 OPC_UD2, OPC_VPSRLVW, OPC_VPSRLVD, OPC_VPSRLVQ
2875 static int const sarv_insn[4] = {
2876 OPC_UD2, OPC_VPSRAVW, OPC_VPSRAVD, OPC_VPSRAVQ
2878 static int const shls_insn[4] = {
2879 OPC_UD2, OPC_PSLLW, OPC_PSLLD, OPC_PSLLQ
2881 static int const shrs_insn[4] = {
2882 OPC_UD2, OPC_PSRLW, OPC_PSRLD, OPC_PSRLQ
2884 static int const sars_insn[4] = {
2885 OPC_UD2, OPC_PSRAW, OPC_PSRAD, OPC_VPSRAQ
2887 static int const vpshldi_insn[4] = {
2888 OPC_UD2, OPC_VPSHLDW, OPC_VPSHLDD, OPC_VPSHLDQ
2890 static int const vpshldv_insn[4] = {
2891 OPC_UD2, OPC_VPSHLDVW, OPC_VPSHLDVD, OPC_VPSHLDVQ
2893 static int const vpshrdv_insn[4] = {
2894 OPC_UD2, OPC_VPSHRDVW, OPC_VPSHRDVD, OPC_VPSHRDVQ
2896 static int const abs_insn[4] = {
2897 OPC_PABSB, OPC_PABSW, OPC_PABSD, OPC_VPABSQ
2900 TCGType type = vecl + TCG_TYPE_V64;
2902 TCGArg a0, a1, a2, a3;
2909 case INDEX_op_add_vec:
2910 insn = add_insn[vece];
2912 case INDEX_op_ssadd_vec:
2913 insn = ssadd_insn[vece];
2915 case INDEX_op_usadd_vec:
2916 insn = usadd_insn[vece];
2918 case INDEX_op_sub_vec:
2919 insn = sub_insn[vece];
2921 case INDEX_op_sssub_vec:
2922 insn = sssub_insn[vece];
2924 case INDEX_op_ussub_vec:
2925 insn = ussub_insn[vece];
2927 case INDEX_op_mul_vec:
2928 insn = mul_insn[vece];
2930 case INDEX_op_and_vec:
2933 case INDEX_op_or_vec:
2936 case INDEX_op_xor_vec:
2939 case INDEX_op_smin_vec:
2940 insn = smin_insn[vece];
2942 case INDEX_op_umin_vec:
2943 insn = umin_insn[vece];
2945 case INDEX_op_smax_vec:
2946 insn = smax_insn[vece];
2948 case INDEX_op_umax_vec:
2949 insn = umax_insn[vece];
2951 case INDEX_op_shlv_vec:
2952 insn = shlv_insn[vece];
2954 case INDEX_op_shrv_vec:
2955 insn = shrv_insn[vece];
2957 case INDEX_op_sarv_vec:
2958 insn = sarv_insn[vece];
2960 case INDEX_op_rotlv_vec:
2961 insn = rotlv_insn[vece];
2963 case INDEX_op_rotrv_vec:
2964 insn = rotrv_insn[vece];
2966 case INDEX_op_shls_vec:
2967 insn = shls_insn[vece];
2969 case INDEX_op_shrs_vec:
2970 insn = shrs_insn[vece];
2972 case INDEX_op_sars_vec:
2973 insn = sars_insn[vece];
2975 case INDEX_op_x86_punpckl_vec:
2976 insn = punpckl_insn[vece];
2978 case INDEX_op_x86_punpckh_vec:
2979 insn = punpckh_insn[vece];
2981 case INDEX_op_x86_packss_vec:
2982 insn = packss_insn[vece];
2984 case INDEX_op_x86_packus_vec:
2985 insn = packus_insn[vece];
2987 case INDEX_op_x86_vpshldv_vec:
2988 insn = vpshldv_insn[vece];
2992 case INDEX_op_x86_vpshrdv_vec:
2993 insn = vpshrdv_insn[vece];
2997 #if TCG_TARGET_REG_BITS == 32
2998 case INDEX_op_dup2_vec:
2999 /* First merge the two 32-bit inputs to a single 64-bit element. */
3000 tcg_out_vex_modrm(s, OPC_PUNPCKLDQ, a0, a1, a2);
3001 /* Then replicate the 64-bit elements across the rest of the vector. */
3002 if (type != TCG_TYPE_V64) {
3003 tcg_out_dup_vec(s, type, MO_64, a0, a0);
3007 case INDEX_op_abs_vec:
3008 insn = abs_insn[vece];
3013 tcg_debug_assert(insn != OPC_UD2);
3014 if (type == TCG_TYPE_V256) {
3017 tcg_out_vex_modrm(s, insn, a0, a1, a2);
3020 case INDEX_op_cmp_vec:
3022 if (sub == TCG_COND_EQ) {
3023 insn = cmpeq_insn[vece];
3024 } else if (sub == TCG_COND_GT) {
3025 insn = cmpgt_insn[vece];
3027 g_assert_not_reached();
3031 case INDEX_op_andc_vec:
3033 if (type == TCG_TYPE_V256) {
3036 tcg_out_vex_modrm(s, insn, a0, a2, a1);
3039 case INDEX_op_shli_vec:
3040 insn = shift_imm_insn[vece];
3043 case INDEX_op_shri_vec:
3044 insn = shift_imm_insn[vece];
3047 case INDEX_op_sari_vec:
3048 if (vece == MO_64) {
3049 insn = OPC_PSHIFTD_Ib | P_VEXW | P_EVEX;
3051 insn = shift_imm_insn[vece];
3055 case INDEX_op_rotli_vec:
3056 insn = OPC_PSHIFTD_Ib | P_EVEX; /* VPROL[DQ] */
3057 if (vece == MO_64) {
3063 tcg_debug_assert(vece != MO_8);
3064 if (type == TCG_TYPE_V256) {
3067 tcg_out_vex_modrm(s, insn, sub, a0, a1);
3071 case INDEX_op_ld_vec:
3072 tcg_out_ld(s, type, a0, a1, a2);
3074 case INDEX_op_st_vec:
3075 tcg_out_st(s, type, a0, a1, a2);
3077 case INDEX_op_dupm_vec:
3078 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
3081 case INDEX_op_x86_shufps_vec:
3085 case INDEX_op_x86_blend_vec:
3086 if (vece == MO_16) {
3088 } else if (vece == MO_32) {
3089 insn = (have_avx2 ? OPC_VPBLENDD : OPC_BLENDPS);
3091 g_assert_not_reached();
3095 case INDEX_op_x86_vperm2i128_vec:
3096 insn = OPC_VPERM2I128;
3099 case INDEX_op_x86_vpshldi_vec:
3100 insn = vpshldi_insn[vece];
3104 case INDEX_op_not_vec:
3105 insn = OPC_VPTERNLOGQ;
3107 sub = 0x33; /* !B */
3109 case INDEX_op_nor_vec:
3110 insn = OPC_VPTERNLOGQ;
3111 sub = 0x11; /* norCB */
3113 case INDEX_op_nand_vec:
3114 insn = OPC_VPTERNLOGQ;
3115 sub = 0x77; /* nandCB */
3117 case INDEX_op_eqv_vec:
3118 insn = OPC_VPTERNLOGQ;
3119 sub = 0x99; /* xnorCB */
3121 case INDEX_op_orc_vec:
3122 insn = OPC_VPTERNLOGQ;
3123 sub = 0xdd; /* orB!C */
3126 case INDEX_op_bitsel_vec:
3127 insn = OPC_VPTERNLOGQ;
3132 sub = 0xca; /* A?B:C */
3133 } else if (a0 == a2) {
3135 sub = 0xe2; /* B?A:C */
3137 tcg_out_mov(s, type, a0, a3);
3138 sub = 0xb8; /* B?C:A */
3143 tcg_debug_assert(insn != OPC_UD2);
3144 if (type == TCG_TYPE_V256) {
3147 tcg_out_vex_modrm(s, insn, a0, a1, a2);
3151 case INDEX_op_x86_vpblendvb_vec:
3152 insn = OPC_VPBLENDVB;
3153 if (type == TCG_TYPE_V256) {
3156 tcg_out_vex_modrm(s, insn, a0, a1, a2);
3157 tcg_out8(s, args[3] << 4);
3160 case INDEX_op_x86_psrldq_vec:
3161 tcg_out_vex_modrm(s, OPC_GRP14, 3, a0, a1);
3165 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
3166 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
3168 g_assert_not_reached();
3172 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
3175 case INDEX_op_goto_ptr:
3178 case INDEX_op_ld8u_i32:
3179 case INDEX_op_ld8u_i64:
3180 case INDEX_op_ld8s_i32:
3181 case INDEX_op_ld8s_i64:
3182 case INDEX_op_ld16u_i32:
3183 case INDEX_op_ld16u_i64:
3184 case INDEX_op_ld16s_i32:
3185 case INDEX_op_ld16s_i64:
3186 case INDEX_op_ld_i32:
3187 case INDEX_op_ld32u_i64:
3188 case INDEX_op_ld32s_i64:
3189 case INDEX_op_ld_i64:
3190 return C_O1_I1(r, r);
3192 case INDEX_op_st8_i32:
3193 case INDEX_op_st8_i64:
3194 return C_O0_I2(qi, r);
3196 case INDEX_op_st16_i32:
3197 case INDEX_op_st16_i64:
3198 case INDEX_op_st_i32:
3199 case INDEX_op_st32_i64:
3200 return C_O0_I2(ri, r);
3202 case INDEX_op_st_i64:
3203 return C_O0_I2(re, r);
3205 case INDEX_op_add_i32:
3206 case INDEX_op_add_i64:
3207 return C_O1_I2(r, r, re);
3209 case INDEX_op_sub_i32:
3210 case INDEX_op_sub_i64:
3211 case INDEX_op_mul_i32:
3212 case INDEX_op_mul_i64:
3213 case INDEX_op_or_i32:
3214 case INDEX_op_or_i64:
3215 case INDEX_op_xor_i32:
3216 case INDEX_op_xor_i64:
3217 return C_O1_I2(r, 0, re);
3219 case INDEX_op_and_i32:
3220 case INDEX_op_and_i64:
3221 return C_O1_I2(r, 0, reZ);
3223 case INDEX_op_andc_i32:
3224 case INDEX_op_andc_i64:
3225 return C_O1_I2(r, r, rI);
3227 case INDEX_op_shl_i32:
3228 case INDEX_op_shl_i64:
3229 case INDEX_op_shr_i32:
3230 case INDEX_op_shr_i64:
3231 case INDEX_op_sar_i32:
3232 case INDEX_op_sar_i64:
3233 return have_bmi2 ? C_O1_I2(r, r, ri) : C_O1_I2(r, 0, ci);
3235 case INDEX_op_rotl_i32:
3236 case INDEX_op_rotl_i64:
3237 case INDEX_op_rotr_i32:
3238 case INDEX_op_rotr_i64:
3239 return C_O1_I2(r, 0, ci);
3241 case INDEX_op_brcond_i32:
3242 case INDEX_op_brcond_i64:
3243 return C_O0_I2(r, re);
3245 case INDEX_op_bswap16_i32:
3246 case INDEX_op_bswap16_i64:
3247 case INDEX_op_bswap32_i32:
3248 case INDEX_op_bswap32_i64:
3249 case INDEX_op_bswap64_i64:
3250 case INDEX_op_neg_i32:
3251 case INDEX_op_neg_i64:
3252 case INDEX_op_not_i32:
3253 case INDEX_op_not_i64:
3254 case INDEX_op_extrh_i64_i32:
3255 return C_O1_I1(r, 0);
3257 case INDEX_op_ext8s_i32:
3258 case INDEX_op_ext8s_i64:
3259 case INDEX_op_ext8u_i32:
3260 case INDEX_op_ext8u_i64:
3261 return C_O1_I1(r, q);
3263 case INDEX_op_ext16s_i32:
3264 case INDEX_op_ext16s_i64:
3265 case INDEX_op_ext16u_i32:
3266 case INDEX_op_ext16u_i64:
3267 case INDEX_op_ext32s_i64:
3268 case INDEX_op_ext32u_i64:
3269 case INDEX_op_ext_i32_i64:
3270 case INDEX_op_extu_i32_i64:
3271 case INDEX_op_extrl_i64_i32:
3272 case INDEX_op_extract_i32:
3273 case INDEX_op_extract_i64:
3274 case INDEX_op_sextract_i32:
3275 case INDEX_op_ctpop_i32:
3276 case INDEX_op_ctpop_i64:
3277 return C_O1_I1(r, r);
3279 case INDEX_op_extract2_i32:
3280 case INDEX_op_extract2_i64:
3281 return C_O1_I2(r, 0, r);
3283 case INDEX_op_deposit_i32:
3284 case INDEX_op_deposit_i64:
3285 return C_O1_I2(Q, 0, Q);
3287 case INDEX_op_setcond_i32:
3288 case INDEX_op_setcond_i64:
3289 return C_O1_I2(q, r, re);
3291 case INDEX_op_movcond_i32:
3292 case INDEX_op_movcond_i64:
3293 return C_O1_I4(r, r, re, r, 0);
3295 case INDEX_op_div2_i32:
3296 case INDEX_op_div2_i64:
3297 case INDEX_op_divu2_i32:
3298 case INDEX_op_divu2_i64:
3299 return C_O2_I3(a, d, 0, 1, r);
3301 case INDEX_op_mulu2_i32:
3302 case INDEX_op_mulu2_i64:
3303 case INDEX_op_muls2_i32:
3304 case INDEX_op_muls2_i64:
3305 return C_O2_I2(a, d, a, r);
3307 case INDEX_op_add2_i32:
3308 case INDEX_op_add2_i64:
3309 case INDEX_op_sub2_i32:
3310 case INDEX_op_sub2_i64:
3311 return C_O2_I4(r, r, 0, 1, re, re);
3313 case INDEX_op_ctz_i32:
3314 case INDEX_op_ctz_i64:
3315 return have_bmi1 ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r);
3317 case INDEX_op_clz_i32:
3318 case INDEX_op_clz_i64:
3319 return have_lzcnt ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r);
3321 case INDEX_op_qemu_ld_i32:
3322 return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
3323 ? C_O1_I1(r, L) : C_O1_I2(r, L, L));
3325 case INDEX_op_qemu_st_i32:
3326 return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
3327 ? C_O0_I2(L, L) : C_O0_I3(L, L, L));
3328 case INDEX_op_qemu_st8_i32:
3329 return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
3330 ? C_O0_I2(s, L) : C_O0_I3(s, L, L));
3332 case INDEX_op_qemu_ld_i64:
3333 return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L)
3334 : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L)
3335 : C_O2_I2(r, r, L, L));
3337 case INDEX_op_qemu_st_i64:
3338 return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L)
3339 : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(L, L, L)
3340 : C_O0_I4(L, L, L, L));
3342 case INDEX_op_brcond2_i32:
3343 return C_O0_I4(r, r, ri, ri);
3345 case INDEX_op_setcond2_i32:
3346 return C_O1_I4(r, r, r, ri, ri);
3348 case INDEX_op_ld_vec:
3349 case INDEX_op_dupm_vec:
3350 return C_O1_I1(x, r);
3352 case INDEX_op_st_vec:
3353 return C_O0_I2(x, r);
3355 case INDEX_op_add_vec:
3356 case INDEX_op_sub_vec:
3357 case INDEX_op_mul_vec:
3358 case INDEX_op_and_vec:
3359 case INDEX_op_or_vec:
3360 case INDEX_op_xor_vec:
3361 case INDEX_op_andc_vec:
3362 case INDEX_op_orc_vec:
3363 case INDEX_op_nand_vec:
3364 case INDEX_op_nor_vec:
3365 case INDEX_op_eqv_vec:
3366 case INDEX_op_ssadd_vec:
3367 case INDEX_op_usadd_vec:
3368 case INDEX_op_sssub_vec:
3369 case INDEX_op_ussub_vec:
3370 case INDEX_op_smin_vec:
3371 case INDEX_op_umin_vec:
3372 case INDEX_op_smax_vec:
3373 case INDEX_op_umax_vec:
3374 case INDEX_op_shlv_vec:
3375 case INDEX_op_shrv_vec:
3376 case INDEX_op_sarv_vec:
3377 case INDEX_op_rotlv_vec:
3378 case INDEX_op_rotrv_vec:
3379 case INDEX_op_shls_vec:
3380 case INDEX_op_shrs_vec:
3381 case INDEX_op_sars_vec:
3382 case INDEX_op_cmp_vec:
3383 case INDEX_op_x86_shufps_vec:
3384 case INDEX_op_x86_blend_vec:
3385 case INDEX_op_x86_packss_vec:
3386 case INDEX_op_x86_packus_vec:
3387 case INDEX_op_x86_vperm2i128_vec:
3388 case INDEX_op_x86_punpckl_vec:
3389 case INDEX_op_x86_punpckh_vec:
3390 case INDEX_op_x86_vpshldi_vec:
3391 #if TCG_TARGET_REG_BITS == 32
3392 case INDEX_op_dup2_vec:
3394 return C_O1_I2(x, x, x);
3396 case INDEX_op_abs_vec:
3397 case INDEX_op_dup_vec:
3398 case INDEX_op_not_vec:
3399 case INDEX_op_shli_vec:
3400 case INDEX_op_shri_vec:
3401 case INDEX_op_sari_vec:
3402 case INDEX_op_rotli_vec:
3403 case INDEX_op_x86_psrldq_vec:
3404 return C_O1_I1(x, x);
3406 case INDEX_op_x86_vpshldv_vec:
3407 case INDEX_op_x86_vpshrdv_vec:
3408 return C_O1_I3(x, 0, x, x);
3410 case INDEX_op_bitsel_vec:
3411 case INDEX_op_x86_vpblendvb_vec:
3412 return C_O1_I3(x, x, x, x);
3415 g_assert_not_reached();
3419 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
3422 case INDEX_op_add_vec:
3423 case INDEX_op_sub_vec:
3424 case INDEX_op_and_vec:
3425 case INDEX_op_or_vec:
3426 case INDEX_op_xor_vec:
3427 case INDEX_op_andc_vec:
3428 case INDEX_op_orc_vec:
3429 case INDEX_op_nand_vec:
3430 case INDEX_op_nor_vec:
3431 case INDEX_op_eqv_vec:
3432 case INDEX_op_not_vec:
3433 case INDEX_op_bitsel_vec:
3435 case INDEX_op_cmp_vec:
3436 case INDEX_op_cmpsel_vec:
3439 case INDEX_op_rotli_vec:
3440 return have_avx512vl && vece >= MO_32 ? 1 : -1;
3442 case INDEX_op_shli_vec:
3443 case INDEX_op_shri_vec:
3444 /* We must expand the operation for MO_8. */
3445 return vece == MO_8 ? -1 : 1;
3447 case INDEX_op_sari_vec:
3455 if (have_avx512vl) {
3459 * We can emulate this for MO_64, but it does not pay off
3460 * unless we're producing at least 4 values.
3462 return type >= TCG_TYPE_V256 ? -1 : 0;
3466 case INDEX_op_shls_vec:
3467 case INDEX_op_shrs_vec:
3468 return vece >= MO_16;
3469 case INDEX_op_sars_vec:
3475 return have_avx512vl;
3478 case INDEX_op_rotls_vec:
3479 return vece >= MO_16 ? -1 : 0;
3481 case INDEX_op_shlv_vec:
3482 case INDEX_op_shrv_vec:
3485 return have_avx512bw;
3491 case INDEX_op_sarv_vec:
3494 return have_avx512bw;
3498 return have_avx512vl;
3501 case INDEX_op_rotlv_vec:
3502 case INDEX_op_rotrv_vec:
3505 return have_avx512vbmi2 ? -1 : 0;
3508 return have_avx512vl ? 1 : have_avx2 ? -1 : 0;
3512 case INDEX_op_mul_vec:
3517 return have_avx512dq;
3521 case INDEX_op_ssadd_vec:
3522 case INDEX_op_usadd_vec:
3523 case INDEX_op_sssub_vec:
3524 case INDEX_op_ussub_vec:
3525 return vece <= MO_16;
3526 case INDEX_op_smin_vec:
3527 case INDEX_op_smax_vec:
3528 case INDEX_op_umin_vec:
3529 case INDEX_op_umax_vec:
3530 case INDEX_op_abs_vec:
3531 return vece <= MO_32 || have_avx512vl;
3538 static void expand_vec_shi(TCGType type, unsigned vece, TCGOpcode opc,
3539 TCGv_vec v0, TCGv_vec v1, TCGArg imm)
3543 tcg_debug_assert(vece == MO_8);
3545 t1 = tcg_temp_new_vec(type);
3546 t2 = tcg_temp_new_vec(type);
3549 * Unpack to W, shift, and repack. Tricky bits:
3550 * (1) Use punpck*bw x,x to produce DDCCBBAA,
3551 * i.e. duplicate in other half of the 16-bit lane.
3552 * (2) For right-shift, add 8 so that the high half of the lane
3553 * becomes zero. For left-shift, and left-rotate, we must
3554 * shift up and down again.
3555 * (3) Step 2 leaves high half zero such that PACKUSWB
3556 * (pack with unsigned saturation) does not modify
3559 vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8,
3560 tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(v1));
3561 vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8,
3562 tcgv_vec_arg(t2), tcgv_vec_arg(v1), tcgv_vec_arg(v1));
3564 if (opc != INDEX_op_rotli_vec) {
3567 if (opc == INDEX_op_shri_vec) {
3568 tcg_gen_shri_vec(MO_16, t1, t1, imm);
3569 tcg_gen_shri_vec(MO_16, t2, t2, imm);
3571 tcg_gen_shli_vec(MO_16, t1, t1, imm);
3572 tcg_gen_shli_vec(MO_16, t2, t2, imm);
3573 tcg_gen_shri_vec(MO_16, t1, t1, 8);
3574 tcg_gen_shri_vec(MO_16, t2, t2, 8);
3577 vec_gen_3(INDEX_op_x86_packus_vec, type, MO_8,
3578 tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3579 tcg_temp_free_vec(t1);
3580 tcg_temp_free_vec(t2);
3583 static void expand_vec_sari(TCGType type, unsigned vece,
3584 TCGv_vec v0, TCGv_vec v1, TCGArg imm)
3590 /* Unpack to W, shift, and repack, as in expand_vec_shi. */
3591 t1 = tcg_temp_new_vec(type);
3592 t2 = tcg_temp_new_vec(type);
3593 vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8,
3594 tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(v1));
3595 vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8,
3596 tcgv_vec_arg(t2), tcgv_vec_arg(v1), tcgv_vec_arg(v1));
3597 tcg_gen_sari_vec(MO_16, t1, t1, imm + 8);
3598 tcg_gen_sari_vec(MO_16, t2, t2, imm + 8);
3599 vec_gen_3(INDEX_op_x86_packss_vec, type, MO_8,
3600 tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3601 tcg_temp_free_vec(t1);
3602 tcg_temp_free_vec(t2);
3608 * We can emulate a small sign extend by performing an arithmetic
3609 * 32-bit shift and overwriting the high half of a 64-bit logical
3610 * shift. Note that the ISA says shift of 32 is valid, but TCG
3611 * does not, so we have to bound the smaller shift -- we get the
3612 * same result in the high half either way.
3614 t1 = tcg_temp_new_vec(type);
3615 tcg_gen_sari_vec(MO_32, t1, v1, MIN(imm, 31));
3616 tcg_gen_shri_vec(MO_64, v0, v1, imm);
3617 vec_gen_4(INDEX_op_x86_blend_vec, type, MO_32,
3618 tcgv_vec_arg(v0), tcgv_vec_arg(v0),
3619 tcgv_vec_arg(t1), 0xaa);
3620 tcg_temp_free_vec(t1);
3622 /* Otherwise we will need to use a compare vs 0 to produce
3623 * the sign-extend, shift and merge.
3625 t1 = tcg_const_zeros_vec(type);
3626 tcg_gen_cmp_vec(TCG_COND_GT, MO_64, t1, t1, v1);
3627 tcg_gen_shri_vec(MO_64, v0, v1, imm);
3628 tcg_gen_shli_vec(MO_64, t1, t1, 64 - imm);
3629 tcg_gen_or_vec(MO_64, v0, v0, t1);
3630 tcg_temp_free_vec(t1);
3635 g_assert_not_reached();
3639 static void expand_vec_rotli(TCGType type, unsigned vece,
3640 TCGv_vec v0, TCGv_vec v1, TCGArg imm)
3645 expand_vec_shi(type, vece, INDEX_op_rotli_vec, v0, v1, imm);
3649 if (have_avx512vbmi2) {
3650 vec_gen_4(INDEX_op_x86_vpshldi_vec, type, vece,
3651 tcgv_vec_arg(v0), tcgv_vec_arg(v1), tcgv_vec_arg(v1), imm);
3655 t = tcg_temp_new_vec(type);
3656 tcg_gen_shli_vec(vece, t, v1, imm);
3657 tcg_gen_shri_vec(vece, v0, v1, (8 << vece) - imm);
3658 tcg_gen_or_vec(vece, v0, v0, t);
3659 tcg_temp_free_vec(t);
3662 static void expand_vec_rotv(TCGType type, unsigned vece, TCGv_vec v0,
3663 TCGv_vec v1, TCGv_vec sh, bool right)
3667 if (have_avx512vbmi2) {
3668 vec_gen_4(right ? INDEX_op_x86_vpshrdv_vec : INDEX_op_x86_vpshldv_vec,
3669 type, vece, tcgv_vec_arg(v0), tcgv_vec_arg(v1),
3670 tcgv_vec_arg(v1), tcgv_vec_arg(sh));
3674 t = tcg_temp_new_vec(type);
3675 tcg_gen_dupi_vec(vece, t, 8 << vece);
3676 tcg_gen_sub_vec(vece, t, t, sh);
3678 tcg_gen_shlv_vec(vece, t, v1, t);
3679 tcg_gen_shrv_vec(vece, v0, v1, sh);
3681 tcg_gen_shrv_vec(vece, t, v1, t);
3682 tcg_gen_shlv_vec(vece, v0, v1, sh);
3684 tcg_gen_or_vec(vece, v0, v0, t);
3685 tcg_temp_free_vec(t);
3688 static void expand_vec_rotls(TCGType type, unsigned vece,
3689 TCGv_vec v0, TCGv_vec v1, TCGv_i32 lsh)
3691 TCGv_vec t = tcg_temp_new_vec(type);
3693 tcg_debug_assert(vece != MO_8);
3695 if (vece >= MO_32 ? have_avx512vl : have_avx512vbmi2) {
3696 tcg_gen_dup_i32_vec(vece, t, lsh);
3697 if (vece >= MO_32) {
3698 tcg_gen_rotlv_vec(vece, v0, v1, t);
3700 expand_vec_rotv(type, vece, v0, v1, t, false);
3703 TCGv_i32 rsh = tcg_temp_new_i32();
3705 tcg_gen_neg_i32(rsh, lsh);
3706 tcg_gen_andi_i32(rsh, rsh, (8 << vece) - 1);
3707 tcg_gen_shls_vec(vece, t, v1, lsh);
3708 tcg_gen_shrs_vec(vece, v0, v1, rsh);
3709 tcg_gen_or_vec(vece, v0, v0, t);
3711 tcg_temp_free_i32(rsh);
3714 tcg_temp_free_vec(t);
3717 static void expand_vec_mul(TCGType type, unsigned vece,
3718 TCGv_vec v0, TCGv_vec v1, TCGv_vec v2)
3720 TCGv_vec t1, t2, t3, t4, zero;
3722 tcg_debug_assert(vece == MO_8);
3725 * Unpack v1 bytes to words, 0 | x.
3726 * Unpack v2 bytes to words, y | 0.
3727 * This leaves the 8-bit result, x * y, with 8 bits of right padding.
3728 * Shift logical right by 8 bits to clear the high 8 bytes before
3729 * using an unsigned saturated pack.
3731 * The difference between the V64, V128 and V256 cases is merely how
3732 * we distribute the expansion between temporaries.
3736 t1 = tcg_temp_new_vec(TCG_TYPE_V128);
3737 t2 = tcg_temp_new_vec(TCG_TYPE_V128);
3738 zero = tcg_constant_vec(TCG_TYPE_V128, MO_8, 0);
3739 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8,
3740 tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(zero));
3741 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8,
3742 tcgv_vec_arg(t2), tcgv_vec_arg(zero), tcgv_vec_arg(v2));
3743 tcg_gen_mul_vec(MO_16, t1, t1, t2);
3744 tcg_gen_shri_vec(MO_16, t1, t1, 8);
3745 vec_gen_3(INDEX_op_x86_packus_vec, TCG_TYPE_V128, MO_8,
3746 tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(t1));
3747 tcg_temp_free_vec(t1);
3748 tcg_temp_free_vec(t2);
3753 t1 = tcg_temp_new_vec(type);
3754 t2 = tcg_temp_new_vec(type);
3755 t3 = tcg_temp_new_vec(type);
3756 t4 = tcg_temp_new_vec(type);
3757 zero = tcg_constant_vec(TCG_TYPE_V128, MO_8, 0);
3758 vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8,
3759 tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(zero));
3760 vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8,
3761 tcgv_vec_arg(t2), tcgv_vec_arg(zero), tcgv_vec_arg(v2));
3762 vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8,
3763 tcgv_vec_arg(t3), tcgv_vec_arg(v1), tcgv_vec_arg(zero));
3764 vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8,
3765 tcgv_vec_arg(t4), tcgv_vec_arg(zero), tcgv_vec_arg(v2));
3766 tcg_gen_mul_vec(MO_16, t1, t1, t2);
3767 tcg_gen_mul_vec(MO_16, t3, t3, t4);
3768 tcg_gen_shri_vec(MO_16, t1, t1, 8);
3769 tcg_gen_shri_vec(MO_16, t3, t3, 8);
3770 vec_gen_3(INDEX_op_x86_packus_vec, type, MO_8,
3771 tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(t3));
3772 tcg_temp_free_vec(t1);
3773 tcg_temp_free_vec(t2);
3774 tcg_temp_free_vec(t3);
3775 tcg_temp_free_vec(t4);
3779 g_assert_not_reached();
3783 static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0,
3784 TCGv_vec v1, TCGv_vec v2, TCGCond cond)
3793 TCGv_vec t1, t2, t3;
3809 fixup = NEED_SWAP | NEED_INV;
3812 if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece)) {
3815 fixup = NEED_BIAS | NEED_INV;
3819 if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece)) {
3820 fixup = NEED_UMIN | NEED_INV;
3826 if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece)) {
3829 fixup = NEED_BIAS | NEED_SWAP | NEED_INV;
3833 if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece)) {
3834 fixup = NEED_UMAX | NEED_INV;
3836 fixup = NEED_BIAS | NEED_SWAP;
3840 g_assert_not_reached();
3843 if (fixup & NEED_INV) {
3844 cond = tcg_invert_cond(cond);
3846 if (fixup & NEED_SWAP) {
3847 t1 = v1, v1 = v2, v2 = t1;
3848 cond = tcg_swap_cond(cond);
3852 if (fixup & (NEED_UMIN | NEED_UMAX)) {
3853 t1 = tcg_temp_new_vec(type);
3854 if (fixup & NEED_UMIN) {
3855 tcg_gen_umin_vec(vece, t1, v1, v2);
3857 tcg_gen_umax_vec(vece, t1, v1, v2);
3861 } else if (fixup & NEED_BIAS) {
3862 t1 = tcg_temp_new_vec(type);
3863 t2 = tcg_temp_new_vec(type);
3864 t3 = tcg_constant_vec(type, vece, 1ull << ((8 << vece) - 1));
3865 tcg_gen_sub_vec(vece, t1, v1, t3);
3866 tcg_gen_sub_vec(vece, t2, v2, t3);
3869 cond = tcg_signed_cond(cond);
3872 tcg_debug_assert(cond == TCG_COND_EQ || cond == TCG_COND_GT);
3873 /* Expand directly; do not recurse. */
3874 vec_gen_4(INDEX_op_cmp_vec, type, vece,
3875 tcgv_vec_arg(v0), tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
3878 tcg_temp_free_vec(t1);
3880 tcg_temp_free_vec(t2);
3883 return fixup & NEED_INV;
3886 static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
3887 TCGv_vec v1, TCGv_vec v2, TCGCond cond)
3889 if (expand_vec_cmp_noinv(type, vece, v0, v1, v2, cond)) {
3890 tcg_gen_not_vec(vece, v0, v0);
3894 static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGv_vec v0,
3895 TCGv_vec c1, TCGv_vec c2,
3896 TCGv_vec v3, TCGv_vec v4, TCGCond cond)
3898 TCGv_vec t = tcg_temp_new_vec(type);
3900 if (expand_vec_cmp_noinv(type, vece, t, c1, c2, cond)) {
3901 /* Invert the sense of the compare by swapping arguments. */
3903 x = v3, v3 = v4, v4 = x;
3905 vec_gen_4(INDEX_op_x86_vpblendvb_vec, type, vece,
3906 tcgv_vec_arg(v0), tcgv_vec_arg(v4),
3907 tcgv_vec_arg(v3), tcgv_vec_arg(t));
3908 tcg_temp_free_vec(t);
3911 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3916 TCGv_vec v0, v1, v2, v3, v4;
3919 v0 = temp_tcgv_vec(arg_temp(a0));
3920 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3921 a2 = va_arg(va, TCGArg);
3924 case INDEX_op_shli_vec:
3925 case INDEX_op_shri_vec:
3926 expand_vec_shi(type, vece, opc, v0, v1, a2);
3929 case INDEX_op_sari_vec:
3930 expand_vec_sari(type, vece, v0, v1, a2);
3933 case INDEX_op_rotli_vec:
3934 expand_vec_rotli(type, vece, v0, v1, a2);
3937 case INDEX_op_rotls_vec:
3938 expand_vec_rotls(type, vece, v0, v1, temp_tcgv_i32(arg_temp(a2)));
3941 case INDEX_op_rotlv_vec:
3942 v2 = temp_tcgv_vec(arg_temp(a2));
3943 expand_vec_rotv(type, vece, v0, v1, v2, false);
3945 case INDEX_op_rotrv_vec:
3946 v2 = temp_tcgv_vec(arg_temp(a2));
3947 expand_vec_rotv(type, vece, v0, v1, v2, true);
3950 case INDEX_op_mul_vec:
3951 v2 = temp_tcgv_vec(arg_temp(a2));
3952 expand_vec_mul(type, vece, v0, v1, v2);
3955 case INDEX_op_cmp_vec:
3956 v2 = temp_tcgv_vec(arg_temp(a2));
3957 expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
3960 case INDEX_op_cmpsel_vec:
3961 v2 = temp_tcgv_vec(arg_temp(a2));
3962 v3 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3963 v4 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3964 expand_vec_cmpsel(type, vece, v0, v1, v2, v3, v4, va_arg(va, TCGArg));
3974 static const int tcg_target_callee_save_regs[] = {
3975 #if TCG_TARGET_REG_BITS == 64
3984 TCG_REG_R14, /* Currently used for the global env. */
3987 TCG_REG_EBP, /* Currently used for the global env. */
3994 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
3995 and tcg_register_jit. */
3998 ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \
3999 * (TCG_TARGET_REG_BITS / 8))
4001 #define FRAME_SIZE \
4003 + TCG_STATIC_CALL_ARGS_SIZE \
4004 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
4005 + TCG_TARGET_STACK_ALIGN - 1) \
4006 & ~(TCG_TARGET_STACK_ALIGN - 1))
4008 /* Generate global QEMU prologue and epilogue code */
4009 static void tcg_target_qemu_prologue(TCGContext *s)
4011 int i, stack_addend;
4015 /* Reserve some stack space, also for TCG temps. */
4016 stack_addend = FRAME_SIZE - PUSH_SIZE;
4017 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
4018 CPU_TEMP_BUF_NLONGS * sizeof(long));
4020 /* Save all callee saved registers. */
4021 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
4022 tcg_out_push(s, tcg_target_callee_save_regs[i]);
4025 #if TCG_TARGET_REG_BITS == 32
4026 tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
4027 (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
4028 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
4030 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
4031 (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
4034 # if !defined(CONFIG_SOFTMMU) && TCG_TARGET_REG_BITS == 64
4036 int seg = setup_guest_base_seg();
4038 x86_guest_base_seg = seg;
4039 } else if (guest_base == (int32_t)guest_base) {
4040 x86_guest_base_offset = guest_base;
4042 /* Choose R12 because, as a base, it requires a SIB byte. */
4043 x86_guest_base_index = TCG_REG_R12;
4044 tcg_out_movi(s, TCG_TYPE_PTR, x86_guest_base_index, guest_base);
4045 tcg_regset_set_reg(s->reserved_regs, x86_guest_base_index);
4049 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
4050 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
4052 tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
4056 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
4057 * and fall through to the rest of the epilogue.
4059 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
4060 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_EAX, 0);
4063 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
4065 tcg_out_addi(s, TCG_REG_CALL_STACK, stack_addend);
4068 tcg_out_vex_opc(s, OPC_VZEROUPPER, 0, 0, 0, 0);
4070 for (i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
4071 tcg_out_pop(s, tcg_target_callee_save_regs[i]);
4073 tcg_out_opc(s, OPC_RET, 0, 0, 0);
4076 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
4078 memset(p, 0x90, count);
4081 static void tcg_target_init(TCGContext *s)
4083 #ifdef CONFIG_CPUID_H
4084 unsigned a, b, c, d, b7 = 0, c7 = 0;
4085 unsigned max = __get_cpuid_max(0, 0);
4088 /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */
4089 __cpuid_count(7, 0, a, b7, c7, d);
4090 have_bmi1 = (b7 & bit_BMI) != 0;
4091 have_bmi2 = (b7 & bit_BMI2) != 0;
4095 __cpuid(1, a, b, c, d);
4097 /* For 32-bit, 99% certainty that we're running on hardware that
4098 supports cmov, but we still need to check. In case cmov is not
4099 available, we'll use a small forward branch. */
4100 have_cmov = (d & bit_CMOV) != 0;
4103 /* MOVBE is only available on Intel Atom and Haswell CPUs, so we
4104 need to probe for it. */
4105 have_movbe = (c & bit_MOVBE) != 0;
4106 have_popcnt = (c & bit_POPCNT) != 0;
4108 /* There are a number of things we must check before we can be
4109 sure of not hitting invalid opcode. */
4110 if (c & bit_OSXSAVE) {
4111 unsigned xcrl, xcrh;
4112 /* The xgetbv instruction is not available to older versions of
4113 * the assembler, so we encode the instruction manually.
4115 asm(".byte 0x0f, 0x01, 0xd0" : "=a" (xcrl), "=d" (xcrh) : "c" (0));
4116 if ((xcrl & 6) == 6) {
4117 have_avx1 = (c & bit_AVX) != 0;
4118 have_avx2 = (b7 & bit_AVX2) != 0;
4121 * There are interesting instructions in AVX512, so long
4122 * as we have AVX512VL, which indicates support for EVEX
4123 * on sizes smaller than 512 bits. We are required to
4124 * check that OPMASK and all extended ZMM state are enabled
4125 * even if we're not using them -- the insns will fault.
4127 if ((xcrl & 0xe0) == 0xe0
4128 && (b7 & bit_AVX512F)
4129 && (b7 & bit_AVX512VL)) {
4130 have_avx512vl = true;
4131 have_avx512bw = (b7 & bit_AVX512BW) != 0;
4132 have_avx512dq = (b7 & bit_AVX512DQ) != 0;
4133 have_avx512vbmi2 = (c7 & bit_AVX512VBMI2) != 0;
4139 max = __get_cpuid_max(0x8000000, 0);
4141 __cpuid(0x80000001, a, b, c, d);
4142 /* LZCNT was introduced with AMD Barcelona and Intel Haswell CPUs. */
4143 have_lzcnt = (c & bit_LZCNT) != 0;
4145 #endif /* CONFIG_CPUID_H */
4147 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
4148 if (TCG_TARGET_REG_BITS == 64) {
4149 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
4152 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
4153 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
4156 tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS;
4159 tcg_target_call_clobber_regs = ALL_VECTOR_REGS;
4160 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EAX);
4161 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EDX);
4162 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_ECX);
4163 if (TCG_TARGET_REG_BITS == 64) {
4164 #if !defined(_WIN64)
4165 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RDI);
4166 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RSI);
4168 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
4169 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
4170 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
4171 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
4174 s->reserved_regs = 0;
4175 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
4180 uint8_t fde_def_cfa[4];
4181 uint8_t fde_reg_ofs[14];
4184 /* We're expecting a 2 byte uleb128 encoded value. */
4185 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
4187 #if !defined(__ELF__)
4188 /* Host machine without ELF. */
4189 #elif TCG_TARGET_REG_BITS == 64
4190 #define ELF_HOST_MACHINE EM_X86_64
4191 static const DebugFrame debug_frame = {
4192 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
4195 .h.cie.code_align = 1,
4196 .h.cie.data_align = 0x78, /* sleb128 -8 */
4197 .h.cie.return_column = 16,
4199 /* Total FDE size does not include the "len" member. */
4200 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
4203 12, 7, /* DW_CFA_def_cfa %rsp, ... */
4204 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
4208 0x90, 1, /* DW_CFA_offset, %rip, -8 */
4209 /* The following ordering must match tcg_target_callee_save_regs. */
4210 0x86, 2, /* DW_CFA_offset, %rbp, -16 */
4211 0x83, 3, /* DW_CFA_offset, %rbx, -24 */
4212 0x8c, 4, /* DW_CFA_offset, %r12, -32 */
4213 0x8d, 5, /* DW_CFA_offset, %r13, -40 */
4214 0x8e, 6, /* DW_CFA_offset, %r14, -48 */
4215 0x8f, 7, /* DW_CFA_offset, %r15, -56 */
4219 #define ELF_HOST_MACHINE EM_386
4220 static const DebugFrame debug_frame = {
4221 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
4224 .h.cie.code_align = 1,
4225 .h.cie.data_align = 0x7c, /* sleb128 -4 */
4226 .h.cie.return_column = 8,
4228 /* Total FDE size does not include the "len" member. */
4229 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
4232 12, 4, /* DW_CFA_def_cfa %esp, ... */
4233 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
4237 0x88, 1, /* DW_CFA_offset, %eip, -4 */
4238 /* The following ordering must match tcg_target_callee_save_regs. */
4239 0x85, 2, /* DW_CFA_offset, %ebp, -8 */
4240 0x83, 3, /* DW_CFA_offset, %ebx, -12 */
4241 0x86, 4, /* DW_CFA_offset, %esi, -16 */
4242 0x87, 5, /* DW_CFA_offset, %edi, -20 */
4247 #if defined(ELF_HOST_MACHINE)
4248 void tcg_register_jit(const void *buf, size_t buf_size)
4250 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));