* THE SOFTWARE.
*/
+#include "tcg-be-ldst.h"
+
#ifndef NDEBUG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
#if TCG_TARGET_REG_BITS == 64
#endif
};
+/* Constants we accept. */
+#define TCG_CT_CONST_S32 0x100
+#define TCG_CT_CONST_U32 0x200
+#define TCG_CT_CONST_I32 0x400
+
/* Registers used with L constraint, which are the first argument
registers on x86_64, and two random call clobbered registers on
i386. */
# define TCG_REG_L1 TCG_REG_EDX
#endif
+/* The host compiler should supply <cpuid.h> to enable runtime features
+ detection, as we're not going to go so far as our own inline assembly.
+ If not available, default values will be assumed. */
+#if defined(CONFIG_CPUID_H)
+#include <cpuid.h>
+#endif
+
/* For 32-bit, we are going to attempt to determine at runtime whether cmov
- is available. However, the host compiler must supply <cpuid.h>, as we're
- not going to go so far as our own inline assembly. */
+ is available. */
#if TCG_TARGET_REG_BITS == 64
# define have_cmov 1
-#elif defined(CONFIG_CPUID_H)
-#include <cpuid.h>
+#elif defined(CONFIG_CPUID_H) && defined(bit_CMOV)
static bool have_cmov;
#else
# define have_cmov 0
#endif
-static uint8_t *tb_ret_addr;
+/* If bit_MOVBE is defined in cpuid.h (added in GCC version 4.6), we are
+ going to attempt to determine at runtime whether movbe is available. */
+#if defined(CONFIG_CPUID_H) && defined(bit_MOVBE)
+static bool have_movbe;
+#else
+# define have_movbe 0
+#endif
+
+/* We need this symbol in tcg-target.h, and we can't properly conditionalize
+ it there. Therefore we always define the variable. */
+bool have_bmi1;
+
+#if defined(CONFIG_CPUID_H) && defined(bit_BMI2)
+static bool have_bmi2;
+#else
+# define have_bmi2 0
+#endif
+
+static tcg_insn_unit *tb_ret_addr;
-static void patch_reloc(uint8_t *code_ptr, int type,
+static void patch_reloc(tcg_insn_unit *code_ptr, int type,
intptr_t value, intptr_t addend)
{
value += addend;
if (value != (int32_t)value) {
tcg_abort();
}
- *(uint32_t *)code_ptr = value;
+ tcg_patch32(code_ptr, value);
break;
case R_386_PC8:
value -= (uintptr_t)code_ptr;
if (value != (int8_t)value) {
tcg_abort();
}
- *(uint8_t *)code_ptr = value;
+ tcg_patch8(code_ptr, value);
break;
default:
tcg_abort();
tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
break;
case 'c':
+ case_c:
ct->ct |= TCG_CT_REG;
tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
break;
tcg_regset_set32(ct->u.regs, 0, 0xf);
break;
case 'r':
+ case_r:
ct->ct |= TCG_CT_REG;
if (TCG_TARGET_REG_BITS == 64) {
tcg_regset_set32(ct->u.regs, 0, 0xffff);
tcg_regset_set32(ct->u.regs, 0, 0xff);
}
break;
+ case 'C':
+ /* With SHRX et al, we need not use ECX as shift count register. */
+ if (have_bmi2) {
+ goto case_r;
+ } else {
+ goto case_c;
+ }
/* qemu_ld/st address constraint */
case 'L':
case 'Z':
ct->ct |= TCG_CT_CONST_U32;
break;
+ case 'I':
+ ct->ct |= TCG_CT_CONST_I32;
+ break;
default:
return -1;
}
/* test if a constant matches the constraint */
-static inline int tcg_target_const_match(tcg_target_long val,
+static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct)
{
int ct = arg_ct->ct;
if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
return 1;
}
+ if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) {
+ return 1;
+ }
return 0;
}
#endif
#define P_EXT 0x100 /* 0x0f opcode prefix */
-#define P_DATA16 0x200 /* 0x66 opcode prefix */
+#define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */
+#define P_DATA16 0x400 /* 0x66 opcode prefix */
#if TCG_TARGET_REG_BITS == 64
-# define P_ADDR32 0x400 /* 0x67 opcode prefix */
-# define P_REXW 0x800 /* Set REX.W = 1 */
-# define P_REXB_R 0x1000 /* REG field as byte register */
-# define P_REXB_RM 0x2000 /* R/M field as byte register */
-# define P_GS 0x4000 /* gs segment override */
+# define P_ADDR32 0x800 /* 0x67 opcode prefix */
+# define P_REXW 0x1000 /* Set REX.W = 1 */
+# define P_REXB_R 0x2000 /* REG field as byte register */
+# define P_REXB_RM 0x4000 /* R/M field as byte register */
+# define P_GS 0x8000 /* gs segment override */
#else
# define P_ADDR32 0
# define P_REXW 0
# define P_REXB_RM 0
# define P_GS 0
#endif
+#define P_SIMDF3 0x10000 /* 0xf3 opcode prefix */
+#define P_SIMDF2 0x20000 /* 0xf2 opcode prefix */
#define OPC_ARITH_EvIz (0x81)
#define OPC_ARITH_EvIb (0x83)
#define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
+#define OPC_ANDN (0xf2 | P_EXT38)
#define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
#define OPC_BSWAP (0xc8 | P_EXT)
#define OPC_CALL_Jz (0xe8)
#define OPC_MOVB_EvIz (0xc6)
#define OPC_MOVL_EvIz (0xc7)
#define OPC_MOVL_Iv (0xb8)
+#define OPC_MOVBE_GyMy (0xf0 | P_EXT38)
+#define OPC_MOVBE_MyGy (0xf1 | P_EXT38)
#define OPC_MOVSBL (0xbe | P_EXT)
#define OPC_MOVSWL (0xbf | P_EXT)
#define OPC_MOVSLQ (0x63 | P_REXW)
#define OPC_SHIFT_1 (0xd1)
#define OPC_SHIFT_Ib (0xc1)
#define OPC_SHIFT_cl (0xd3)
+#define OPC_SARX (0xf7 | P_EXT38 | P_SIMDF3)
+#define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16)
+#define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2)
#define OPC_TESTL (0x85)
#define OPC_XCHG_ax_r32 (0x90)
}
rex = 0;
- rex |= (opc & P_REXW) >> 8; /* REX.W */
- rex |= (r & 8) >> 1; /* REX.R */
- rex |= (x & 8) >> 2; /* REX.X */
- rex |= (rm & 8) >> 3; /* REX.B */
+ rex |= (opc & P_REXW) ? 0x8 : 0x0; /* REX.W */
+ rex |= (r & 8) >> 1; /* REX.R */
+ rex |= (x & 8) >> 2; /* REX.X */
+ rex |= (rm & 8) >> 3; /* REX.B */
/* P_REXB_{R,RM} indicates that the given register is the low byte.
For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do,
tcg_out8(s, (uint8_t)(rex | 0x40));
}
- if (opc & P_EXT) {
+ if (opc & (P_EXT | P_EXT38)) {
tcg_out8(s, 0x0f);
+ if (opc & P_EXT38) {
+ tcg_out8(s, 0x38);
+ }
}
+
tcg_out8(s, opc);
}
#else
if (opc & P_DATA16) {
tcg_out8(s, 0x66);
}
- if (opc & P_EXT) {
+ if (opc & (P_EXT | P_EXT38)) {
tcg_out8(s, 0x0f);
+ if (opc & P_EXT38) {
+ tcg_out8(s, 0x38);
+ }
}
tcg_out8(s, opc);
}
tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
}
+static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm)
+{
+ int tmp;
+
+ if ((opc & (P_REXW | P_EXT | P_EXT38)) || (rm & 8)) {
+ /* Three byte VEX prefix. */
+ tcg_out8(s, 0xc4);
+
+ /* VEX.m-mmmm */
+ if (opc & P_EXT38) {
+ tmp = 2;
+ } else if (opc & P_EXT) {
+ tmp = 1;
+ } else {
+ tcg_abort();
+ }
+ tmp |= 0x40; /* VEX.X */
+ tmp |= (r & 8 ? 0 : 0x80); /* VEX.R */
+ tmp |= (rm & 8 ? 0 : 0x20); /* VEX.B */
+ tcg_out8(s, tmp);
+
+ tmp = (opc & P_REXW ? 0x80 : 0); /* VEX.W */
+ } else {
+ /* Two byte VEX prefix. */
+ tcg_out8(s, 0xc5);
+
+ tmp = (r & 8 ? 0 : 0x80); /* VEX.R */
+ }
+ /* VEX.pp */
+ if (opc & P_DATA16) {
+ tmp |= 1; /* 0x66 */
+ } else if (opc & P_SIMDF3) {
+ tmp |= 2; /* 0xf3 */
+ } else if (opc & P_SIMDF2) {
+ tmp |= 3; /* 0xf2 */
+ }
+ tmp |= (~v & 15) << 3; /* VEX.vvvv */
+ tcg_out8(s, tmp);
+ tcg_out8(s, opc);
+ tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
+}
+
/* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
We handle either RM and INDEX missing with a negative value. In 64-bit
mode for absolute addresses, ~RM is the size of the immediate operand
}
/* Use SMALL != 0 to force a short forward branch. */
-static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small)
+static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, int small)
{
int32_t val, val1;
- TCGLabel *l = &s->labels[label_index];
if (l->has_value) {
- val = l->u.value - (intptr_t)s->code_ptr;
+ val = tcg_pcrel_diff(s, l->u.value_ptr);
val1 = val - 2;
if ((int8_t)val1 == val1) {
if (opc == -1) {
} else {
tcg_out8(s, OPC_JCC_short + opc);
}
- tcg_out_reloc(s, s->code_ptr, R_386_PC8, label_index, -1);
+ tcg_out_reloc(s, s->code_ptr, R_386_PC8, l, -1);
s->code_ptr += 1;
} else {
if (opc == -1) {
} else {
tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
}
- tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
+ tcg_out_reloc(s, s->code_ptr, R_386_PC32, l, -4);
s->code_ptr += 4;
}
}
static void tcg_out_brcond32(TCGContext *s, TCGCond cond,
TCGArg arg1, TCGArg arg2, int const_arg2,
- int label_index, int small)
+ TCGLabel *label, int small)
{
tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
- tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
+ tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
}
#if TCG_TARGET_REG_BITS == 64
static void tcg_out_brcond64(TCGContext *s, TCGCond cond,
TCGArg arg1, TCGArg arg2, int const_arg2,
- int label_index, int small)
+ TCGLabel *label, int small)
{
tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
- tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
+ tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
}
#else
/* XXX: we implement it at the target level to avoid having to
static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
const int *const_args, int small)
{
- int label_next;
- label_next = gen_new_label();
+ TCGLabel *label_next = gen_new_label();
+ TCGLabel *label_this = arg_label(args[5]);
+
switch(args[4]) {
case TCG_COND_EQ:
tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
label_next, 1);
tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
break;
case TCG_COND_NE:
tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
- args[5], small);
+ label_this, small);
tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
break;
case TCG_COND_LT:
tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
- args[5], small);
+ label_this, small);
break;
case TCG_COND_LE:
tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
- args[5], small);
+ label_this, small);
break;
case TCG_COND_GT:
tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
- args[5], small);
+ label_this, small);
break;
case TCG_COND_GE:
tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
- args[5], small);
+ label_this, small);
break;
case TCG_COND_LTU:
tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
- args[5], small);
+ label_this, small);
break;
case TCG_COND_LEU:
tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
- args[5], small);
+ label_this, small);
break;
case TCG_COND_GTU:
tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
- args[5], small);
+ label_this, small);
break;
case TCG_COND_GEU:
tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
- args[5], small);
+ label_this, small);
break;
default:
tcg_abort();
const int *const_args)
{
TCGArg new_args[6];
- int label_true, label_over;
+ TCGLabel *label_true, *label_over;
memcpy(new_args, args+1, 5*sizeof(TCGArg));
label_true = gen_new_label();
label_over = gen_new_label();
- new_args[5] = label_true;
+ new_args[5] = label_arg(label_true);
tcg_out_brcond2(s, new_args, const_args+1, 1);
tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
label_over = gen_new_label();
new_args[4] = tcg_invert_cond(new_args[4]);
- new_args[5] = label_over;
+ new_args[5] = label_arg(label_over);
tcg_out_brcond2(s, new_args, const_args+1, 1);
tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
if (have_cmov) {
tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond], dest, v1);
} else {
- int over = gen_new_label();
+ TCGLabel *over = gen_new_label();
tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1);
tcg_out_mov(s, TCG_TYPE_I32, dest, v1);
tcg_out_label(s, over, s->code_ptr);
}
#endif
-static void tcg_out_branch(TCGContext *s, int call, uintptr_t dest)
+static void tcg_out_branch(TCGContext *s, int call, tcg_insn_unit *dest)
{
- intptr_t disp = dest - (intptr_t)s->code_ptr - 5;
+ intptr_t disp = tcg_pcrel_diff(s, dest) - 5;
if (disp == (int32_t)disp) {
tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0);
tcg_out32(s, disp);
} else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R10, dest);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R10, (uintptr_t)dest);
tcg_out_modrm(s, OPC_GRP5,
call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev, TCG_REG_R10);
}
}
-static inline void tcg_out_calli(TCGContext *s, uintptr_t dest)
+static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
{
tcg_out_branch(s, 1, dest);
}
-static void tcg_out_jmp(TCGContext *s, uintptr_t dest)
+static void tcg_out_jmp(TCGContext *s, tcg_insn_unit *dest)
{
tcg_out_branch(s, 0, dest);
}
#if defined(CONFIG_SOFTMMU)
-
-#include "exec/softmmu_defs.h"
-
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* int mmu_idx, uintptr_t ra)
*/
-static const void * const qemu_ld_helpers[4] = {
- helper_ret_ldb_mmu,
- helper_ret_ldw_mmu,
- helper_ret_ldl_mmu,
- helper_ret_ldq_mmu,
+static void * const qemu_ld_helpers[16] = {
+ [MO_UB] = helper_ret_ldub_mmu,
+ [MO_LEUW] = helper_le_lduw_mmu,
+ [MO_LEUL] = helper_le_ldul_mmu,
+ [MO_LEQ] = helper_le_ldq_mmu,
+ [MO_BEUW] = helper_be_lduw_mmu,
+ [MO_BEUL] = helper_be_ldul_mmu,
+ [MO_BEQ] = helper_be_ldq_mmu,
};
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
* uintxx_t val, int mmu_idx, uintptr_t ra)
*/
-static const void * const qemu_st_helpers[4] = {
- helper_ret_stb_mmu,
- helper_ret_stw_mmu,
- helper_ret_stl_mmu,
- helper_ret_stq_mmu,
+static void * const qemu_st_helpers[16] = {
+ [MO_UB] = helper_ret_stb_mmu,
+ [MO_LEUW] = helper_le_stw_mmu,
+ [MO_LEUL] = helper_le_stl_mmu,
+ [MO_LEQ] = helper_le_stq_mmu,
+ [MO_BEUW] = helper_be_stw_mmu,
+ [MO_BEUL] = helper_be_stl_mmu,
+ [MO_BEQ] = helper_be_stq_mmu,
};
-static void add_qemu_ldst_label(TCGContext *s,
- int is_ld,
- int opc,
- int data_reg,
- int data_reg2,
- int addrlo_reg,
- int addrhi_reg,
- int mem_index,
- uint8_t *raddr,
- uint8_t **label_ptr);
-
/* Perform the TLB load and compare.
Inputs:
- ADDRLO_IDX contains the index into ARGS of the low part of the
- address; the high part of the address is at ADDR_LOW_IDX+1.
+ ADDRLO and ADDRHI contain the low and high part of the address.
MEM_INDEX and S_BITS are the memory context and log2 size of the load.
First argument register is clobbered. */
-static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx,
- int mem_index, int s_bits,
- const TCGArg *args,
- uint8_t **label_ptr, int which)
+static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
+ int mem_index, TCGMemOp s_bits,
+ tcg_insn_unit **label_ptr, int which)
{
- const int addrlo = args[addrlo_idx];
- const int r0 = TCG_REG_L0;
- const int r1 = TCG_REG_L1;
+ const TCGReg r0 = TCG_REG_L0;
+ const TCGReg r1 = TCG_REG_L1;
TCGType ttype = TCG_TYPE_I32;
TCGType htype = TCG_TYPE_I32;
int trexw = 0, hrexw = 0;
if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
/* cmp 4(r0), addrhi */
- tcg_out_modrm_offset(s, OPC_CMP_GvEv, args[addrlo_idx+1], r0, 4);
+ tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, 4);
/* jne slow_path */
tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0,
offsetof(CPUTLBEntry, addend) - which);
}
+
+/*
+ * Record the context of a call to the out of line helper code for the slow path
+ * for a load or store, so that we can later generate the correct helper code
+ */
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
+ TCGReg datalo, TCGReg datahi,
+ TCGReg addrlo, TCGReg addrhi,
+ tcg_insn_unit *raddr,
+ tcg_insn_unit **label_ptr)
+{
+ TCGLabelQemuLdst *label = new_ldst_label(s);
+
+ label->is_ld = is_ld;
+ label->oi = oi;
+ label->datalo_reg = datalo;
+ label->datahi_reg = datahi;
+ label->addrlo_reg = addrlo;
+ label->addrhi_reg = addrhi;
+ label->raddr = raddr;
+ label->label_ptr[0] = label_ptr[0];
+ if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
+ label->label_ptr[1] = label_ptr[1];
+ }
+}
+
+/*
+ * Generate code for the slow path for a load at the end of block
+ */
+static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ TCGMemOpIdx oi = l->oi;
+ TCGMemOp opc = get_memop(oi);
+ TCGReg data_reg;
+ tcg_insn_unit **label_ptr = &l->label_ptr[0];
+
+ /* resolve label address */
+ tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
+ if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
+ tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
+ }
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ int ofs = 0;
+
+ tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
+ ofs += 4;
+
+ tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
+ ofs += 4;
+
+ if (TARGET_LONG_BITS == 64) {
+ tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
+ ofs += 4;
+ }
+
+ tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, oi);
+ ofs += 4;
+
+ tcg_out_sti(s, TCG_TYPE_PTR, TCG_REG_ESP, ofs, (uintptr_t)l->raddr);
+ } else {
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
+ /* The second argument is already loaded with addrlo. */
+ tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], oi);
+ tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
+ (uintptr_t)l->raddr);
+ }
+
+ tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]);
+
+ data_reg = l->datalo_reg;
+ switch (opc & MO_SSIZE) {
+ case MO_SB:
+ tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW);
+ break;
+ case MO_SW:
+ tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW);
+ break;
+#if TCG_TARGET_REG_BITS == 64
+ case MO_SL:
+ tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
+ break;
+#endif
+ case MO_UB:
+ case MO_UW:
+ /* Note that the helpers have zero-extended to tcg_target_long. */
+ case MO_UL:
+ tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
+ break;
+ case MO_Q:
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
+ } else if (data_reg == TCG_REG_EDX) {
+ /* xchg %edx, %eax */
+ tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
+ tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX);
+ } else {
+ tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
+ tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX);
+ }
+ break;
+ default:
+ tcg_abort();
+ }
+
+ /* Jump to the code corresponding to next IR of qemu_st */
+ tcg_out_jmp(s, l->raddr);
+}
+
+/*
+ * Generate code for the slow path for a store at the end of block
+ */
+static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ TCGMemOpIdx oi = l->oi;
+ TCGMemOp opc = get_memop(oi);
+ TCGMemOp s_bits = opc & MO_SIZE;
+ tcg_insn_unit **label_ptr = &l->label_ptr[0];
+ TCGReg retaddr;
+
+ /* resolve label address */
+ tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
+ if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
+ tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
+ }
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ int ofs = 0;
+
+ tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
+ ofs += 4;
+
+ tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
+ ofs += 4;
+
+ if (TARGET_LONG_BITS == 64) {
+ tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
+ ofs += 4;
+ }
+
+ tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs);
+ ofs += 4;
+
+ if (s_bits == MO_64) {
+ tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs);
+ ofs += 4;
+ }
+
+ tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, oi);
+ ofs += 4;
+
+ retaddr = TCG_REG_EAX;
+ tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
+ tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, ofs);
+ } else {
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
+ /* The second argument is already loaded with addrlo. */
+ tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
+ tcg_target_call_iarg_regs[2], l->datalo_reg);
+ tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], oi);
+
+ if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) {
+ retaddr = tcg_target_call_iarg_regs[4];
+ tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
+ } else {
+ retaddr = TCG_REG_RAX;
+ tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
+ tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP,
+ TCG_TARGET_CALL_STACK_OFFSET);
+ }
+ }
+
+ /* "Tail call" to the helper, with the return address back inline. */
+ tcg_out_push(s, retaddr);
+ tcg_out_jmp(s, qemu_st_helpers[opc]);
+}
#elif defined(__x86_64__) && defined(__linux__)
# include <asm/prctl.h>
# include <sys/prctl.h>
static inline void setup_guest_base_seg(void) { }
#endif /* SOFTMMU */
-static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo, int datahi,
- int base, intptr_t ofs, int seg, int sizeop)
+static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
+ TCGReg base, intptr_t ofs, int seg,
+ TCGMemOp memop)
{
-#ifdef TARGET_WORDS_BIGENDIAN
- const int bswap = 1;
-#else
- const int bswap = 0;
-#endif
- switch (sizeop) {
- case 0:
+ const TCGMemOp real_bswap = memop & MO_BSWAP;
+ TCGMemOp bswap = real_bswap;
+ int movop = OPC_MOVL_GvEv;
+
+ if (have_movbe && real_bswap) {
+ bswap = 0;
+ movop = OPC_MOVBE_GyMy;
+ }
+
+ switch (memop & MO_SSIZE) {
+ case MO_UB:
tcg_out_modrm_offset(s, OPC_MOVZBL + seg, datalo, base, ofs);
break;
- case 0 | 4:
+ case MO_SB:
tcg_out_modrm_offset(s, OPC_MOVSBL + P_REXW + seg, datalo, base, ofs);
break;
- case 1:
+ case MO_UW:
tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs);
- if (bswap) {
+ if (real_bswap) {
tcg_out_rolw_8(s, datalo);
}
break;
- case 1 | 4:
- if (bswap) {
- tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs);
- tcg_out_rolw_8(s, datalo);
+ case MO_SW:
+ if (real_bswap) {
+ if (have_movbe) {
+ tcg_out_modrm_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
+ datalo, base, ofs);
+ } else {
+ tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs);
+ tcg_out_rolw_8(s, datalo);
+ }
tcg_out_modrm(s, OPC_MOVSWL + P_REXW, datalo, datalo);
} else {
tcg_out_modrm_offset(s, OPC_MOVSWL + P_REXW + seg,
datalo, base, ofs);
}
break;
- case 2:
- tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, datalo, base, ofs);
+ case MO_UL:
+ tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs);
if (bswap) {
tcg_out_bswap32(s, datalo);
}
break;
#if TCG_TARGET_REG_BITS == 64
- case 2 | 4:
- if (bswap) {
- tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, datalo, base, ofs);
- tcg_out_bswap32(s, datalo);
+ case MO_SL:
+ if (real_bswap) {
+ tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs);
+ if (bswap) {
+ tcg_out_bswap32(s, datalo);
+ }
tcg_out_ext32s(s, datalo, datalo);
} else {
tcg_out_modrm_offset(s, OPC_MOVSLQ + seg, datalo, base, ofs);
}
break;
#endif
- case 3:
+ case MO_Q:
if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_modrm_offset(s, OPC_MOVL_GvEv + P_REXW + seg,
- datalo, base, ofs);
+ tcg_out_modrm_offset(s, movop + P_REXW + seg, datalo, base, ofs);
if (bswap) {
tcg_out_bswap64(s, datalo);
}
} else {
- if (bswap) {
+ if (real_bswap) {
int t = datalo;
datalo = datahi;
datahi = t;
}
if (base != datalo) {
- tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
- datalo, base, ofs);
- tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
- datahi, base, ofs + 4);
+ tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs);
+ tcg_out_modrm_offset(s, movop + seg, datahi, base, ofs + 4);
} else {
- tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
- datahi, base, ofs + 4);
- tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
- datalo, base, ofs);
+ tcg_out_modrm_offset(s, movop + seg, datahi, base, ofs + 4);
+ tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs);
}
if (bswap) {
tcg_out_bswap32(s, datalo);
/* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
EAX. It will be useful once fixed registers globals are less
common. */
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
- int opc)
+static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
{
- int data_reg, data_reg2 = 0;
- int addrlo_idx;
+ TCGReg datalo, datahi, addrlo;
+ TCGReg addrhi __attribute__((unused));
+ TCGMemOpIdx oi;
+ TCGMemOp opc;
#if defined(CONFIG_SOFTMMU)
- int mem_index, s_bits;
- uint8_t *label_ptr[2];
+ int mem_index;
+ TCGMemOp s_bits;
+ tcg_insn_unit *label_ptr[2];
#endif
- data_reg = args[0];
- addrlo_idx = 1;
- if (TCG_TARGET_REG_BITS == 32 && opc == 3) {
- data_reg2 = args[1];
- addrlo_idx = 2;
- }
+ datalo = *args++;
+ datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
+ addrlo = *args++;
+ addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
+ oi = *args++;
+ opc = get_memop(oi);
#if defined(CONFIG_SOFTMMU)
- mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)];
- s_bits = opc & 3;
+ mem_index = get_mmuidx(oi);
+ s_bits = opc & MO_SIZE;
- tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args,
+ tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
label_ptr, offsetof(CPUTLBEntry, addr_read));
/* TLB Hit. */
- tcg_out_qemu_ld_direct(s, data_reg, data_reg2, TCG_REG_L1, 0, 0, opc);
+ tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
/* Record the current context of a load into ldst label */
- add_qemu_ldst_label(s,
- 1,
- opc,
- data_reg,
- data_reg2,
- args[addrlo_idx],
- args[addrlo_idx + 1],
- mem_index,
- s->code_ptr,
- label_ptr);
+ add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
+ s->code_ptr, label_ptr);
#else
{
int32_t offset = GUEST_BASE;
- int base = args[addrlo_idx];
+ TCGReg base = addrlo;
int seg = 0;
/* ??? We assume all operations have left us with register contents
offset = 0;
}
- tcg_out_qemu_ld_direct(s, data_reg, data_reg2, base, offset, seg, opc);
+ tcg_out_qemu_ld_direct(s, datalo, datahi, base, offset, seg, opc);
}
#endif
}
-static void tcg_out_qemu_st_direct(TCGContext *s, int datalo, int datahi,
- int base, intptr_t ofs, int seg,
- int sizeop)
+static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
+ TCGReg base, intptr_t ofs, int seg,
+ TCGMemOp memop)
{
-#ifdef TARGET_WORDS_BIGENDIAN
- const int bswap = 1;
-#else
- const int bswap = 0;
-#endif
/* ??? Ideally we wouldn't need a scratch register. For user-only,
we could perform the bswap twice to restore the original value
instead of moving to the scratch. But as it is, the L constraint
means that TCG_REG_L0 is definitely free here. */
- const int scratch = TCG_REG_L0;
+ const TCGReg scratch = TCG_REG_L0;
+ const TCGMemOp real_bswap = memop & MO_BSWAP;
+ TCGMemOp bswap = real_bswap;
+ int movop = OPC_MOVL_EvGv;
+
+ if (have_movbe && real_bswap) {
+ bswap = 0;
+ movop = OPC_MOVBE_MyGy;
+ }
- switch (sizeop) {
- case 0:
+ switch (memop & MO_SIZE) {
+ case MO_8:
+ /* In 32-bit mode, 8-bit stores can only happen from [abcd]x.
+ Use the scratch register if necessary. */
+ if (TCG_TARGET_REG_BITS == 32 && datalo >= 4) {
+ tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
+ datalo = scratch;
+ }
tcg_out_modrm_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg,
datalo, base, ofs);
break;
- case 1:
+ case MO_16:
if (bswap) {
tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
tcg_out_rolw_8(s, scratch);
datalo = scratch;
}
- tcg_out_modrm_offset(s, OPC_MOVL_EvGv + P_DATA16 + seg,
- datalo, base, ofs);
+ tcg_out_modrm_offset(s, movop + P_DATA16 + seg, datalo, base, ofs);
break;
- case 2:
+ case MO_32:
if (bswap) {
tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
tcg_out_bswap32(s, scratch);
datalo = scratch;
}
- tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datalo, base, ofs);
+ tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs);
break;
- case 3:
+ case MO_64:
if (TCG_TARGET_REG_BITS == 64) {
if (bswap) {
tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo);
tcg_out_bswap64(s, scratch);
datalo = scratch;
}
- tcg_out_modrm_offset(s, OPC_MOVL_EvGv + P_REXW + seg,
- datalo, base, ofs);
+ tcg_out_modrm_offset(s, movop + P_REXW + seg, datalo, base, ofs);
} else if (bswap) {
tcg_out_mov(s, TCG_TYPE_I32, scratch, datahi);
tcg_out_bswap32(s, scratch);
tcg_out_bswap32(s, scratch);
tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs+4);
} else {
- tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datalo, base, ofs);
- tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datahi, base, ofs+4);
+ if (real_bswap) {
+ int t = datalo;
+ datalo = datahi;
+ datahi = t;
+ }
+ tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs);
+ tcg_out_modrm_offset(s, movop + seg, datahi, base, ofs+4);
}
break;
default:
}
}
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
- int opc)
+static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
{
- int data_reg, data_reg2 = 0;
- int addrlo_idx;
+ TCGReg datalo, datahi, addrlo;
+ TCGReg addrhi __attribute__((unused));
+ TCGMemOpIdx oi;
+ TCGMemOp opc;
#if defined(CONFIG_SOFTMMU)
- int mem_index, s_bits;
- uint8_t *label_ptr[2];
+ int mem_index;
+ TCGMemOp s_bits;
+ tcg_insn_unit *label_ptr[2];
#endif
- data_reg = args[0];
- addrlo_idx = 1;
- if (TCG_TARGET_REG_BITS == 32 && opc == 3) {
- data_reg2 = args[1];
- addrlo_idx = 2;
- }
+ datalo = *args++;
+ datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
+ addrlo = *args++;
+ addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
+ oi = *args++;
+ opc = get_memop(oi);
#if defined(CONFIG_SOFTMMU)
- mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)];
- s_bits = opc;
+ mem_index = get_mmuidx(oi);
+ s_bits = opc & MO_SIZE;
- tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args,
+ tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
label_ptr, offsetof(CPUTLBEntry, addr_write));
/* TLB Hit. */
- tcg_out_qemu_st_direct(s, data_reg, data_reg2, TCG_REG_L1, 0, 0, opc);
+ tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
/* Record the current context of a store into ldst label */
- add_qemu_ldst_label(s,
- 0,
- opc,
- data_reg,
- data_reg2,
- args[addrlo_idx],
- args[addrlo_idx + 1],
- mem_index,
- s->code_ptr,
- label_ptr);
+ add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
+ s->code_ptr, label_ptr);
#else
{
int32_t offset = GUEST_BASE;
- int base = args[addrlo_idx];
+ TCGReg base = addrlo;
int seg = 0;
/* ??? We assume all operations have left us with register contents
offset = 0;
}
- tcg_out_qemu_st_direct(s, data_reg, data_reg2, base, offset, seg, opc);
- }
-#endif
-}
-
-#if defined(CONFIG_SOFTMMU)
-/*
- * Record the context of a call to the out of line helper code for the slow path
- * for a load or store, so that we can later generate the correct helper code
- */
-static void add_qemu_ldst_label(TCGContext *s,
- int is_ld,
- int opc,
- int data_reg,
- int data_reg2,
- int addrlo_reg,
- int addrhi_reg,
- int mem_index,
- uint8_t *raddr,
- uint8_t **label_ptr)
-{
- int idx;
- TCGLabelQemuLdst *label;
-
- if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) {
- tcg_abort();
- }
-
- idx = s->nb_qemu_ldst_labels++;
- label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[idx];
- label->is_ld = is_ld;
- label->opc = opc;
- label->datalo_reg = data_reg;
- label->datahi_reg = data_reg2;
- label->addrlo_reg = addrlo_reg;
- label->addrhi_reg = addrhi_reg;
- label->mem_index = mem_index;
- label->raddr = raddr;
- label->label_ptr[0] = label_ptr[0];
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- label->label_ptr[1] = label_ptr[1];
- }
-}
-
-/*
- * Generate code for the slow path for a load at the end of block
- */
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
-{
- int opc = l->opc;
- int s_bits = opc & 3;
- TCGReg data_reg;
- uint8_t **label_ptr = &l->label_ptr[0];
-
- /* resolve label address */
- *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
- }
-
- if (TCG_TARGET_REG_BITS == 32) {
- int ofs = 0;
-
- tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
- ofs += 4;
-
- tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
- ofs += 4;
-
- if (TARGET_LONG_BITS == 64) {
- tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
- ofs += 4;
- }
-
- tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
- ofs += 4;
-
- tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, (uintptr_t)l->raddr);
- } else {
- tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
- /* The second argument is already loaded with addrlo. */
- tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
- l->mem_index);
- tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
- (uintptr_t)l->raddr);
+ tcg_out_qemu_st_direct(s, datalo, datahi, base, offset, seg, opc);
}
-
- tcg_out_calli(s, (uintptr_t)qemu_ld_helpers[s_bits]);
-
- data_reg = l->datalo_reg;
- switch(opc) {
- case 0 | 4:
- tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW);
- break;
- case 1 | 4:
- tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW);
- break;
- case 0:
- tcg_out_ext8u(s, data_reg, TCG_REG_EAX);
- break;
- case 1:
- tcg_out_ext16u(s, data_reg, TCG_REG_EAX);
- break;
- case 2:
- tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
- break;
-#if TCG_TARGET_REG_BITS == 64
- case 2 | 4:
- tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
- break;
#endif
- case 3:
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
- } else if (data_reg == TCG_REG_EDX) {
- /* xchg %edx, %eax */
- tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
- tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX);
- } else {
- tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
- tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX);
- }
- break;
- default:
- tcg_abort();
- }
-
- /* Jump to the code corresponding to next IR of qemu_st */
- tcg_out_jmp(s, (uintptr_t)l->raddr);
}
-/*
- * Generate code for the slow path for a store at the end of block
- */
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
-{
- int opc = l->opc;
- int s_bits = opc & 3;
- uint8_t **label_ptr = &l->label_ptr[0];
- TCGReg retaddr;
-
- /* resolve label address */
- *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
- }
-
- if (TCG_TARGET_REG_BITS == 32) {
- int ofs = 0;
-
- tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
- ofs += 4;
-
- tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
- ofs += 4;
-
- if (TARGET_LONG_BITS == 64) {
- tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
- ofs += 4;
- }
-
- tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs);
- ofs += 4;
-
- if (opc == 3) {
- tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs);
- ofs += 4;
- }
-
- tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
- ofs += 4;
-
- retaddr = TCG_REG_EAX;
- tcg_out_movi(s, TCG_TYPE_I32, retaddr, (uintptr_t)l->raddr);
- tcg_out_st(s, TCG_TYPE_I32, retaddr, TCG_REG_ESP, ofs);
- } else {
- tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
- /* The second argument is already loaded with addrlo. */
- tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32),
- tcg_target_call_iarg_regs[2], l->datalo_reg);
- tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
- l->mem_index);
-
- if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) {
- retaddr = tcg_target_call_iarg_regs[4];
- tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
- } else {
- retaddr = TCG_REG_RAX;
- tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
- tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, 0);
- }
- }
-
- /* "Tail call" to the helper, with the return address back inline. */
- tcg_out_push(s, retaddr);
- tcg_out_jmp(s, (uintptr_t)qemu_st_helpers[s_bits]);
-}
-
-/*
- * Generate TB finalization at the end of block
- */
-void tcg_out_tb_finalize(TCGContext *s)
-{
- int i;
- TCGLabelQemuLdst *label;
-
- /* qemu_ld/st slow paths */
- for (i = 0; i < s->nb_qemu_ldst_labels; i++) {
- label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[i];
- if (label->is_ld) {
- tcg_out_qemu_ld_slow_path(s, label);
- } else {
- tcg_out_qemu_st_slow_path(s, label);
- }
- }
-}
-#endif /* CONFIG_SOFTMMU */
-
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg *args, const int *const_args)
{
- int c, rexw = 0;
+ int c, vexop, rexw = 0;
#if TCG_TARGET_REG_BITS == 64
# define OP_32_64(x) \
switch(opc) {
case INDEX_op_exit_tb:
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, args[0]);
- tcg_out_jmp(s, (uintptr_t)tb_ret_addr);
+ tcg_out_jmp(s, tb_ret_addr);
break;
case INDEX_op_goto_tb:
if (s->tb_jmp_offset) {
/* direct jump method */
tcg_out8(s, OPC_JMP_long); /* jmp im */
- s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
+ s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
tcg_out32(s, 0);
} else {
/* indirect jump method */
tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
(intptr_t)(s->tb_next + args[0]));
}
- s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
- break;
- case INDEX_op_call:
- if (const_args[0]) {
- tcg_out_calli(s, args[0]);
- } else {
- /* call *reg */
- tcg_out_modrm(s, OPC_GRP5, EXT5_CALLN_Ev, args[0]);
- }
+ s->tb_next_offset[args[0]] = tcg_current_code_size(s);
break;
case INDEX_op_br:
- tcg_out_jxx(s, JCC_JMP, args[0], 0);
- break;
- case INDEX_op_movi_i32:
- tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
+ tcg_out_jxx(s, JCC_JMP, arg_label(args[0]), 0);
break;
OP_32_64(ld8u):
/* Note that we can ignore REXW for the zero-extend to 64-bit. */
}
break;
+ OP_32_64(andc):
+ if (const_args[2]) {
+ tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32,
+ args[0], args[1]);
+ tgen_arithi(s, ARITH_AND + rexw, args[0], ~args[2], 0);
+ } else {
+ tcg_out_vex_modrm(s, OPC_ANDN + rexw, args[0], args[2], args[1]);
+ }
+ break;
+
OP_32_64(mul):
if (const_args[2]) {
int32_t val;
OP_32_64(shl):
c = SHIFT_SHL;
- goto gen_shift;
+ vexop = OPC_SHLX;
+ goto gen_shift_maybe_vex;
OP_32_64(shr):
c = SHIFT_SHR;
- goto gen_shift;
+ vexop = OPC_SHRX;
+ goto gen_shift_maybe_vex;
OP_32_64(sar):
c = SHIFT_SAR;
- goto gen_shift;
+ vexop = OPC_SARX;
+ goto gen_shift_maybe_vex;
OP_32_64(rotl):
c = SHIFT_ROL;
goto gen_shift;
OP_32_64(rotr):
c = SHIFT_ROR;
goto gen_shift;
+ gen_shift_maybe_vex:
+ if (have_bmi2 && !const_args[2]) {
+ tcg_out_vex_modrm(s, vexop + rexw, args[0], args[2], args[1]);
+ break;
+ }
+ /* FALLTHRU */
gen_shift:
if (const_args[2]) {
tcg_out_shifti(s, c + rexw, args[0], args[2]);
case INDEX_op_brcond_i32:
tcg_out_brcond32(s, args[2], args[0], args[1], const_args[1],
- args[3], 0);
+ arg_label(args[3]), 0);
break;
case INDEX_op_setcond_i32:
tcg_out_setcond32(s, args[3], args[0], args[1],
tcg_out_ext16u(s, args[0], args[1]);
break;
- case INDEX_op_qemu_ld8u:
+ case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, args, 0);
break;
- case INDEX_op_qemu_ld8s:
- tcg_out_qemu_ld(s, args, 0 | 4);
- break;
- case INDEX_op_qemu_ld16u:
+ case INDEX_op_qemu_ld_i64:
tcg_out_qemu_ld(s, args, 1);
break;
- case INDEX_op_qemu_ld16s:
- tcg_out_qemu_ld(s, args, 1 | 4);
- break;
-#if TCG_TARGET_REG_BITS == 64
- case INDEX_op_qemu_ld32u:
-#endif
- case INDEX_op_qemu_ld32:
- tcg_out_qemu_ld(s, args, 2);
- break;
- case INDEX_op_qemu_ld64:
- tcg_out_qemu_ld(s, args, 3);
- break;
-
- case INDEX_op_qemu_st8:
+ case INDEX_op_qemu_st_i32:
tcg_out_qemu_st(s, args, 0);
break;
- case INDEX_op_qemu_st16:
+ case INDEX_op_qemu_st_i64:
tcg_out_qemu_st(s, args, 1);
break;
- case INDEX_op_qemu_st32:
- tcg_out_qemu_st(s, args, 2);
- break;
- case INDEX_op_qemu_st64:
- tcg_out_qemu_st(s, args, 3);
- break;
OP_32_64(mulu2):
tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]);
tcg_out_setcond2(s, args, const_args);
break;
#else /* TCG_TARGET_REG_BITS == 64 */
- case INDEX_op_movi_i64:
- tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
- break;
case INDEX_op_ld32s_i64:
tcg_out_modrm_offset(s, OPC_MOVSLQ, args[0], args[1], args[2]);
break;
tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
}
break;
- case INDEX_op_qemu_ld32s:
- tcg_out_qemu_ld(s, args, 2 | 4);
- break;
case INDEX_op_brcond_i64:
tcg_out_brcond64(s, args[2], args[0], args[1], const_args[1],
- args[3], 0);
+ arg_label(args[3]), 0);
break;
case INDEX_op_setcond_i64:
tcg_out_setcond64(s, args[3], args[0], args[1],
}
break;
+ case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
+ case INDEX_op_mov_i64:
+ case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
+ case INDEX_op_movi_i64:
+ case INDEX_op_call: /* Always emitted via tcg_out_call. */
default:
tcg_abort();
}
static const TCGTargetOpDef x86_op_defs[] = {
{ INDEX_op_exit_tb, { } },
{ INDEX_op_goto_tb, { } },
- { INDEX_op_call, { "ri" } },
{ INDEX_op_br, { } },
- { INDEX_op_mov_i32, { "r", "r" } },
- { INDEX_op_movi_i32, { "r" } },
{ INDEX_op_ld8u_i32, { "r", "r" } },
{ INDEX_op_ld8s_i32, { "r", "r" } },
{ INDEX_op_ld16u_i32, { "r", "r" } },
{ INDEX_op_and_i32, { "r", "0", "ri" } },
{ INDEX_op_or_i32, { "r", "0", "ri" } },
{ INDEX_op_xor_i32, { "r", "0", "ri" } },
+ { INDEX_op_andc_i32, { "r", "r", "ri" } },
- { INDEX_op_shl_i32, { "r", "0", "ci" } },
- { INDEX_op_shr_i32, { "r", "0", "ci" } },
- { INDEX_op_sar_i32, { "r", "0", "ci" } },
+ { INDEX_op_shl_i32, { "r", "0", "Ci" } },
+ { INDEX_op_shr_i32, { "r", "0", "Ci" } },
+ { INDEX_op_sar_i32, { "r", "0", "Ci" } },
{ INDEX_op_rotl_i32, { "r", "0", "ci" } },
{ INDEX_op_rotr_i32, { "r", "0", "ci" } },
{ INDEX_op_setcond_i32, { "q", "r", "ri" } },
{ INDEX_op_deposit_i32, { "Q", "0", "Q" } },
-#if TCG_TARGET_HAS_movcond_i32
{ INDEX_op_movcond_i32, { "r", "r", "ri", "r", "0" } },
-#endif
{ INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },
{ INDEX_op_muls2_i32, { "a", "d", "a", "r" } },
{ INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
{ INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } },
#else
- { INDEX_op_mov_i64, { "r", "r" } },
- { INDEX_op_movi_i64, { "r" } },
{ INDEX_op_ld8u_i64, { "r", "r" } },
{ INDEX_op_ld8s_i64, { "r", "r" } },
{ INDEX_op_ld16u_i64, { "r", "r" } },
{ INDEX_op_and_i64, { "r", "0", "reZ" } },
{ INDEX_op_or_i64, { "r", "0", "re" } },
{ INDEX_op_xor_i64, { "r", "0", "re" } },
+ { INDEX_op_andc_i64, { "r", "r", "rI" } },
- { INDEX_op_shl_i64, { "r", "0", "ci" } },
- { INDEX_op_shr_i64, { "r", "0", "ci" } },
- { INDEX_op_sar_i64, { "r", "0", "ci" } },
+ { INDEX_op_shl_i64, { "r", "0", "Ci" } },
+ { INDEX_op_shr_i64, { "r", "0", "Ci" } },
+ { INDEX_op_sar_i64, { "r", "0", "Ci" } },
{ INDEX_op_rotl_i64, { "r", "0", "ci" } },
{ INDEX_op_rotr_i64, { "r", "0", "ci" } },
#endif
#if TCG_TARGET_REG_BITS == 64
- { INDEX_op_qemu_ld8u, { "r", "L" } },
- { INDEX_op_qemu_ld8s, { "r", "L" } },
- { INDEX_op_qemu_ld16u, { "r", "L" } },
- { INDEX_op_qemu_ld16s, { "r", "L" } },
- { INDEX_op_qemu_ld32, { "r", "L" } },
- { INDEX_op_qemu_ld32u, { "r", "L" } },
- { INDEX_op_qemu_ld32s, { "r", "L" } },
- { INDEX_op_qemu_ld64, { "r", "L" } },
-
- { INDEX_op_qemu_st8, { "L", "L" } },
- { INDEX_op_qemu_st16, { "L", "L" } },
- { INDEX_op_qemu_st32, { "L", "L" } },
- { INDEX_op_qemu_st64, { "L", "L" } },
+ { INDEX_op_qemu_ld_i32, { "r", "L" } },
+ { INDEX_op_qemu_st_i32, { "L", "L" } },
+ { INDEX_op_qemu_ld_i64, { "r", "L" } },
+ { INDEX_op_qemu_st_i64, { "L", "L" } },
#elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
- { INDEX_op_qemu_ld8u, { "r", "L" } },
- { INDEX_op_qemu_ld8s, { "r", "L" } },
- { INDEX_op_qemu_ld16u, { "r", "L" } },
- { INDEX_op_qemu_ld16s, { "r", "L" } },
- { INDEX_op_qemu_ld32, { "r", "L" } },
- { INDEX_op_qemu_ld64, { "r", "r", "L" } },
-
- { INDEX_op_qemu_st8, { "cb", "L" } },
- { INDEX_op_qemu_st16, { "L", "L" } },
- { INDEX_op_qemu_st32, { "L", "L" } },
- { INDEX_op_qemu_st64, { "L", "L", "L" } },
+ { INDEX_op_qemu_ld_i32, { "r", "L" } },
+ { INDEX_op_qemu_st_i32, { "L", "L" } },
+ { INDEX_op_qemu_ld_i64, { "r", "r", "L" } },
+ { INDEX_op_qemu_st_i64, { "L", "L", "L" } },
#else
- { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
- { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
- { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
- { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
- { INDEX_op_qemu_ld32, { "r", "L", "L" } },
- { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
-
- { INDEX_op_qemu_st8, { "cb", "L", "L" } },
- { INDEX_op_qemu_st16, { "L", "L", "L" } },
- { INDEX_op_qemu_st32, { "L", "L", "L" } },
- { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
+ { INDEX_op_qemu_ld_i32, { "r", "L", "L" } },
+ { INDEX_op_qemu_st_i32, { "L", "L", "L" } },
+ { INDEX_op_qemu_ld_i64, { "r", "r", "L", "L" } },
+ { INDEX_op_qemu_st_i64, { "L", "L", "L", "L" } },
#endif
{ -1 },
};
static void tcg_target_init(TCGContext *s)
{
- /* For 32-bit, 99% certainty that we're running on hardware that supports
- cmov, but we still need to check. In case cmov is not available, we'll
- use a small forward branch. */
+#ifdef CONFIG_CPUID_H
+ unsigned a, b, c, d;
+ int max = __get_cpuid_max(0, 0);
+
+ if (max >= 1) {
+ __cpuid(1, a, b, c, d);
#ifndef have_cmov
- {
- unsigned a, b, c, d;
- have_cmov = (__get_cpuid(1, &a, &b, &c, &d) && (d & bit_CMOV));
+ /* For 32-bit, 99% certainty that we're running on hardware that
+ supports cmov, but we still need to check. In case cmov is not
+ available, we'll use a small forward branch. */
+ have_cmov = (d & bit_CMOV) != 0;
+#endif
+#ifndef have_movbe
+ /* MOVBE is only available on Intel Atom and Haswell CPUs, so we
+ need to probe for it. */
+ have_movbe = (c & bit_MOVBE) != 0;
+#endif
+ }
+
+ if (max >= 7) {
+ /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */
+ __cpuid_count(7, 0, a, b, c, d);
+#ifdef bit_BMI
+ have_bmi1 = (b & bit_BMI) != 0;
+#endif
+#ifndef have_bmi2
+ have_bmi2 = (b & bit_BMI2) != 0;
+#endif
}
#endif
}
typedef struct {
- DebugFrameCIE cie;
- DebugFrameFDEHeader fde;
+ DebugFrameHeader h;
uint8_t fde_def_cfa[4];
uint8_t fde_reg_ofs[14];
} DebugFrame;
/* Host machine without ELF. */
#elif TCG_TARGET_REG_BITS == 64
#define ELF_HOST_MACHINE EM_X86_64
-static DebugFrame debug_frame = {
- .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
- .cie.id = -1,
- .cie.version = 1,
- .cie.code_align = 1,
- .cie.data_align = 0x78, /* sleb128 -8 */
- .cie.return_column = 16,
+static const DebugFrame debug_frame = {
+ .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
+ .h.cie.id = -1,
+ .h.cie.version = 1,
+ .h.cie.code_align = 1,
+ .h.cie.data_align = 0x78, /* sleb128 -8 */
+ .h.cie.return_column = 16,
/* Total FDE size does not include the "len" member. */
- .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
+ .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
.fde_def_cfa = {
12, 7, /* DW_CFA_def_cfa %rsp, ... */
};
#else
#define ELF_HOST_MACHINE EM_386
-static DebugFrame debug_frame = {
- .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
- .cie.id = -1,
- .cie.version = 1,
- .cie.code_align = 1,
- .cie.data_align = 0x7c, /* sleb128 -4 */
- .cie.return_column = 8,
+static const DebugFrame debug_frame = {
+ .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
+ .h.cie.id = -1,
+ .h.cie.version = 1,
+ .h.cie.code_align = 1,
+ .h.cie.data_align = 0x7c, /* sleb128 -4 */
+ .h.cie.return_column = 8,
/* Total FDE size does not include the "len" member. */
- .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
+ .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
.fde_def_cfa = {
12, 4, /* DW_CFA_def_cfa %esp, ... */
#if defined(ELF_HOST_MACHINE)
void tcg_register_jit(void *buf, size_t buf_size)
{
- debug_frame.fde.func_start = (uintptr_t)buf;
- debug_frame.fde.func_len = buf_size;
-
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
}
#endif