* THE SOFTWARE.
*/
+#include "tcg-be-ldst.h"
+
#ifndef NDEBUG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
#if TCG_TARGET_REG_BITS == 64
# define TCG_REG_L1 TCG_REG_EDX
#endif
+/* For 32-bit, we are going to attempt to determine at runtime whether cmov
+ is available. However, the host compiler must supply <cpuid.h>, as we're
+ not going to go so far as our own inline assembly. */
+#if TCG_TARGET_REG_BITS == 64
+# define have_cmov 1
+#elif defined(CONFIG_CPUID_H)
+#include <cpuid.h>
+static bool have_cmov;
+#else
+# define have_cmov 0
+#endif
+
static uint8_t *tb_ret_addr;
static void patch_reloc(uint8_t *code_ptr, int type,
- tcg_target_long value, tcg_target_long addend)
+ intptr_t value, intptr_t addend)
{
value += addend;
switch(type) {
/* qemu_ld/st address constraint */
case 'L':
ct->ct |= TCG_CT_REG;
-#if TCG_TARGET_REG_BITS == 64
+ if (TCG_TARGET_REG_BITS == 64) {
tcg_regset_set32(ct->u.regs, 0, 0xffff);
-#else
+ } else {
tcg_regset_set32(ct->u.regs, 0, 0xff);
-#endif
+ }
tcg_regset_reset_reg(ct->u.regs, TCG_REG_L0);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_L1);
break;
that will follow the instruction. */
static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm,
- int index, int shift,
- tcg_target_long offset)
+ int index, int shift, intptr_t offset)
{
int mod, len;
if (TCG_TARGET_REG_BITS == 64) {
/* Try for a rip-relative addressing mode. This has replaced
the 32-bit-mode absolute addressing encoding. */
- tcg_target_long pc = (tcg_target_long)s->code_ptr + 5 + ~rm;
- tcg_target_long disp = offset - pc;
+ intptr_t pc = (intptr_t)s->code_ptr + 5 + ~rm;
+ intptr_t disp = offset - pc;
if (disp == (int32_t)disp) {
tcg_out_opc(s, opc, r, 0, 0);
tcg_out8(s, (LOWREGMASK(r) << 3) | 5);
/* A simplification of the above with no index or shift. */
static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r,
- int rm, tcg_target_long offset)
+ int rm, intptr_t offset)
{
tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset);
}
static void tcg_out_movi(TCGContext *s, TCGType type,
TCGReg ret, tcg_target_long arg)
{
+ tcg_target_long diff;
+
if (arg == 0) {
tgen_arithr(s, ARITH_XOR, ret, ret);
return;
- } else if (arg == (uint32_t)arg || type == TCG_TYPE_I32) {
+ }
+ if (arg == (uint32_t)arg || type == TCG_TYPE_I32) {
tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0);
tcg_out32(s, arg);
- } else if (arg == (int32_t)arg) {
+ return;
+ }
+ if (arg == (int32_t)arg) {
tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret);
tcg_out32(s, arg);
- } else {
- tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0);
- tcg_out32(s, arg);
- tcg_out32(s, arg >> 31 >> 1);
+ return;
}
+
+ /* Try a 7 byte pc-relative lea before the 10 byte movq. */
+ diff = arg - ((uintptr_t)s->code_ptr + 7);
+ if (diff == (int32_t)diff) {
+ tcg_out_opc(s, OPC_LEA | P_REXW, ret, 0, 0);
+ tcg_out8(s, (LOWREGMASK(ret) << 3) | 5);
+ tcg_out32(s, diff);
+ return;
+ }
+
+ tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0);
+ tcg_out64(s, arg);
}
static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
}
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
- TCGReg arg1, tcg_target_long arg2)
+ TCGReg arg1, intptr_t arg2)
{
int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0);
tcg_out_modrm_offset(s, opc, ret, arg1, arg2);
}
static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
- TCGReg arg1, tcg_target_long arg2)
+ TCGReg arg1, intptr_t arg2)
{
int opc = OPC_MOVL_EvGv + (type == TCG_TYPE_I64 ? P_REXW : 0);
tcg_out_modrm_offset(s, opc, arg, arg1, arg2);
}
+static inline void tcg_out_sti(TCGContext *s, TCGType type, TCGReg base,
+ tcg_target_long ofs, tcg_target_long val)
+{
+ int opc = OPC_MOVL_EvIz + (type == TCG_TYPE_I64 ? P_REXW : 0);
+ tcg_out_modrm_offset(s, opc, 0, base, ofs);
+ tcg_out32(s, val);
+}
+
static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
{
/* Propagate an opcode prefix, such as P_DATA16. */
TCGLabel *l = &s->labels[label_index];
if (l->has_value) {
- val = l->u.value - (tcg_target_long)s->code_ptr;
+ val = l->u.value - (intptr_t)s->code_ptr;
val1 = val - 2;
if ((int8_t)val1 == val1) {
if (opc == -1) {
TCGArg v1)
{
tcg_out_cmp(s, c1, c2, const_c2, 0);
- tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond], dest, v1);
+ if (have_cmov) {
+ tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond], dest, v1);
+ } else {
+ int over = gen_new_label();
+ tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1);
+ tcg_out_mov(s, TCG_TYPE_I32, dest, v1);
+ tcg_out_label(s, over, s->code_ptr);
+ }
}
#if TCG_TARGET_REG_BITS == 64
}
#endif
-static void tcg_out_branch(TCGContext *s, int call, tcg_target_long dest)
+static void tcg_out_branch(TCGContext *s, int call, uintptr_t dest)
{
- tcg_target_long disp = dest - (tcg_target_long)s->code_ptr - 5;
+ intptr_t disp = dest - (intptr_t)s->code_ptr - 5;
if (disp == (int32_t)disp) {
tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0);
}
}
-static inline void tcg_out_calli(TCGContext *s, tcg_target_long dest)
+static inline void tcg_out_calli(TCGContext *s, uintptr_t dest)
{
tcg_out_branch(s, 1, dest);
}
-static void tcg_out_jmp(TCGContext *s, tcg_target_long dest)
+static void tcg_out_jmp(TCGContext *s, uintptr_t dest)
{
tcg_out_branch(s, 0, dest);
}
#if defined(CONFIG_SOFTMMU)
-
-#include "../../softmmu_defs.h"
-
-/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
- int mmu_idx) */
-static const void *qemu_ld_helpers[4] = {
- helper_ldb_mmu,
- helper_ldw_mmu,
- helper_ldl_mmu,
- helper_ldq_mmu,
+/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
+ * int mmu_idx, uintptr_t ra)
+ */
+static const void * const qemu_ld_helpers[4] = {
+ helper_ret_ldub_mmu,
+ helper_ret_lduw_mmu,
+ helper_ret_ldul_mmu,
+ helper_ret_ldq_mmu,
};
-/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
- uintxx_t val, int mmu_idx) */
-static const void *qemu_st_helpers[4] = {
- helper_stb_mmu,
- helper_stw_mmu,
- helper_stl_mmu,
- helper_stq_mmu,
+/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
+ * uintxx_t val, int mmu_idx, uintptr_t ra)
+ */
+static const void * const qemu_st_helpers[4] = {
+ helper_ret_stb_mmu,
+ helper_ret_stw_mmu,
+ helper_ret_stl_mmu,
+ helper_ret_stq_mmu,
};
-static void add_qemu_ldst_label(TCGContext *s,
- int is_ld,
- int opc,
- int data_reg,
- int data_reg2,
- int addrlo_reg,
- int addrhi_reg,
- int mem_index,
- uint8_t *raddr,
- uint8_t **label_ptr);
-
/* Perform the TLB load and compare.
Inputs:
- ADDRLO_IDX contains the index into ARGS of the low part of the
- address; the high part of the address is at ADDR_LOW_IDX+1.
+ ADDRLO and ADDRHI contain the low and high part of the address.
MEM_INDEX and S_BITS are the memory context and log2 size of the load.
First argument register is clobbered. */
-static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx,
- int mem_index, int s_bits,
- const TCGArg *args,
+static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
+ int mem_index, TCGMemOp s_bits,
uint8_t **label_ptr, int which)
{
- const int addrlo = args[addrlo_idx];
- const int r0 = TCG_REG_L0;
- const int r1 = TCG_REG_L1;
- TCGType type = TCG_TYPE_I32;
- int rexw = 0;
+ const TCGReg r0 = TCG_REG_L0;
+ const TCGReg r1 = TCG_REG_L1;
+ TCGType ttype = TCG_TYPE_I32;
+ TCGType htype = TCG_TYPE_I32;
+ int trexw = 0, hrexw = 0;
- if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 64) {
- type = TCG_TYPE_I64;
- rexw = P_REXW;
+ if (TCG_TARGET_REG_BITS == 64) {
+ if (TARGET_LONG_BITS == 64) {
+ ttype = TCG_TYPE_I64;
+ trexw = P_REXW;
+ }
+ if (TCG_TYPE_PTR == TCG_TYPE_I64) {
+ htype = TCG_TYPE_I64;
+ hrexw = P_REXW;
+ }
}
- tcg_out_mov(s, type, r0, addrlo);
- tcg_out_mov(s, type, r1, addrlo);
+ tcg_out_mov(s, htype, r0, addrlo);
+ tcg_out_mov(s, ttype, r1, addrlo);
- tcg_out_shifti(s, SHIFT_SHR + rexw, r0,
+ tcg_out_shifti(s, SHIFT_SHR + hrexw, r0,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
- tgen_arithi(s, ARITH_AND + rexw, r1,
+ tgen_arithi(s, ARITH_AND + trexw, r1,
TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0);
- tgen_arithi(s, ARITH_AND + rexw, r0,
+ tgen_arithi(s, ARITH_AND + hrexw, r0,
(CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
- tcg_out_modrm_sib_offset(s, OPC_LEA + P_REXW, r0, TCG_AREG0, r0, 0,
+ tcg_out_modrm_sib_offset(s, OPC_LEA + hrexw, r0, TCG_AREG0, r0, 0,
offsetof(CPUArchState, tlb_table[mem_index][0])
+ which);
/* cmp 0(r0), r1 */
- tcg_out_modrm_offset(s, OPC_CMP_GvEv + rexw, r1, r0, 0);
+ tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, 0);
- tcg_out_mov(s, type, r1, addrlo);
+ /* Prepare for both the fast path add of the tlb addend, and the slow
+ path function argument setup. There are two cases worth note:
+ For 32-bit guest and x86_64 host, MOVL zero-extends the guest address
+ before the fastpath ADDQ below. For 64-bit guest and x32 host, MOVQ
+ copies the entire guest address for the slow path, while truncation
+ for the 32-bit host happens with the fastpath ADDL below. */
+ tcg_out_mov(s, ttype, r1, addrlo);
/* jne slow_path */
tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
/* cmp 4(r0), addrhi */
- tcg_out_modrm_offset(s, OPC_CMP_GvEv, args[addrlo_idx+1], r0, 4);
+ tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, 4);
/* jne slow_path */
tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
/* TLB Hit. */
/* add addend(r0), r1 */
- tcg_out_modrm_offset(s, OPC_ADD_GvEv + P_REXW, r1, r0,
+ tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0,
offsetof(CPUTLBEntry, addend) - which);
}
+
+/*
+ * Record the context of a call to the out of line helper code for the slow path
+ * for a load or store, so that we can later generate the correct helper code
+ */
+static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc,
+ TCGReg datalo, TCGReg datahi,
+ TCGReg addrlo, TCGReg addrhi,
+ int mem_index, uint8_t *raddr,
+ uint8_t **label_ptr)
+{
+ TCGLabelQemuLdst *label = new_ldst_label(s);
+
+ label->is_ld = is_ld;
+ label->opc = opc;
+ label->datalo_reg = datalo;
+ label->datahi_reg = datahi;
+ label->addrlo_reg = addrlo;
+ label->addrhi_reg = addrhi;
+ label->mem_index = mem_index;
+ label->raddr = raddr;
+ label->label_ptr[0] = label_ptr[0];
+ if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
+ label->label_ptr[1] = label_ptr[1];
+ }
+}
+
+/*
+ * Generate code for the slow path for a load at the end of block
+ */
+static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ TCGMemOp opc = l->opc;
+ TCGMemOp s_bits = opc & MO_SIZE;
+ TCGReg data_reg;
+ uint8_t **label_ptr = &l->label_ptr[0];
+
+ /* resolve label address */
+ *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
+ if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
+ *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
+ }
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ int ofs = 0;
+
+ tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
+ ofs += 4;
+
+ tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
+ ofs += 4;
+
+ if (TARGET_LONG_BITS == 64) {
+ tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
+ ofs += 4;
+ }
+
+ tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
+ ofs += 4;
+
+ tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, (uintptr_t)l->raddr);
+ } else {
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
+ /* The second argument is already loaded with addrlo. */
+ tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
+ l->mem_index);
+ tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
+ (uintptr_t)l->raddr);
+ }
+
+ tcg_out_calli(s, (uintptr_t)qemu_ld_helpers[s_bits]);
+
+ data_reg = l->datalo_reg;
+ switch (opc & MO_SSIZE) {
+ case MO_SB:
+ tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW);
+ break;
+ case MO_SW:
+ tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW);
+ break;
+#if TCG_TARGET_REG_BITS == 64
+ case MO_SL:
+ tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
+ break;
+#endif
+ case MO_UB:
+ case MO_UW:
+ /* Note that the helpers have zero-extended to tcg_target_long. */
+ case MO_UL:
+ tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
+ break;
+ case MO_Q:
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
+ } else if (data_reg == TCG_REG_EDX) {
+ /* xchg %edx, %eax */
+ tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
+ tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX);
+ } else {
+ tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
+ tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX);
+ }
+ break;
+ default:
+ tcg_abort();
+ }
+
+ /* Jump to the code corresponding to next IR of qemu_st */
+ tcg_out_jmp(s, (uintptr_t)l->raddr);
+}
+
+/*
+ * Generate code for the slow path for a store at the end of block
+ */
+static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ TCGMemOp opc = l->opc;
+ TCGMemOp s_bits = opc & MO_SIZE;
+ uint8_t **label_ptr = &l->label_ptr[0];
+ TCGReg retaddr;
+
+ /* resolve label address */
+ *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
+ if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
+ *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
+ }
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ int ofs = 0;
+
+ tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
+ ofs += 4;
+
+ tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
+ ofs += 4;
+
+ if (TARGET_LONG_BITS == 64) {
+ tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
+ ofs += 4;
+ }
+
+ tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs);
+ ofs += 4;
+
+ if (s_bits == MO_64) {
+ tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs);
+ ofs += 4;
+ }
+
+ tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
+ ofs += 4;
+
+ retaddr = TCG_REG_EAX;
+ tcg_out_movi(s, TCG_TYPE_I32, retaddr, (uintptr_t)l->raddr);
+ tcg_out_st(s, TCG_TYPE_I32, retaddr, TCG_REG_ESP, ofs);
+ } else {
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
+ /* The second argument is already loaded with addrlo. */
+ tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
+ tcg_target_call_iarg_regs[2], l->datalo_reg);
+ tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
+ l->mem_index);
+
+ if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) {
+ retaddr = tcg_target_call_iarg_regs[4];
+ tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
+ } else {
+ retaddr = TCG_REG_RAX;
+ tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
+ tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, 0);
+ }
+ }
+
+ /* "Tail call" to the helper, with the return address back inline. */
+ tcg_out_push(s, retaddr);
+ tcg_out_jmp(s, (uintptr_t)qemu_st_helpers[s_bits]);
+}
#elif defined(__x86_64__) && defined(__linux__)
# include <asm/prctl.h>
# include <sys/prctl.h>
static inline void setup_guest_base_seg(void) { }
#endif /* SOFTMMU */
-static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo, int datahi,
- int base, tcg_target_long ofs, int seg,
- int sizeop)
+static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
+ TCGReg base, intptr_t ofs, int seg,
+ TCGMemOp memop)
{
-#ifdef TARGET_WORDS_BIGENDIAN
- const int bswap = 1;
-#else
- const int bswap = 0;
-#endif
- switch (sizeop) {
- case 0:
+ const TCGMemOp bswap = memop & MO_BSWAP;
+
+ switch (memop & MO_SSIZE) {
+ case MO_UB:
tcg_out_modrm_offset(s, OPC_MOVZBL + seg, datalo, base, ofs);
break;
- case 0 | 4:
+ case MO_SB:
tcg_out_modrm_offset(s, OPC_MOVSBL + P_REXW + seg, datalo, base, ofs);
break;
- case 1:
+ case MO_UW:
tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs);
if (bswap) {
tcg_out_rolw_8(s, datalo);
}
break;
- case 1 | 4:
+ case MO_SW:
if (bswap) {
tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs);
tcg_out_rolw_8(s, datalo);
datalo, base, ofs);
}
break;
- case 2:
+ case MO_UL:
tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, datalo, base, ofs);
if (bswap) {
tcg_out_bswap32(s, datalo);
}
break;
#if TCG_TARGET_REG_BITS == 64
- case 2 | 4:
+ case MO_SL:
if (bswap) {
tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, datalo, base, ofs);
tcg_out_bswap32(s, datalo);
}
break;
#endif
- case 3:
+ case MO_Q:
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_modrm_offset(s, OPC_MOVL_GvEv + P_REXW + seg,
datalo, base, ofs);
/* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
EAX. It will be useful once fixed registers globals are less
common. */
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
- int opc)
+static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGMemOp opc)
{
- int data_reg, data_reg2 = 0;
- int addrlo_idx;
+ TCGReg datalo, datahi, addrlo;
#if defined(CONFIG_SOFTMMU)
- int mem_index, s_bits;
+ TCGReg addrhi;
+ int mem_index;
+ TCGMemOp s_bits;
uint8_t *label_ptr[2];
#endif
- data_reg = args[0];
- addrlo_idx = 1;
- if (TCG_TARGET_REG_BITS == 32 && opc == 3) {
- data_reg2 = args[1];
- addrlo_idx = 2;
- }
+ datalo = *args++;
+ datahi = (TCG_TARGET_REG_BITS == 32 && opc == 3 ? *args++ : 0);
+ addrlo = *args++;
#if defined(CONFIG_SOFTMMU)
- mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)];
- s_bits = opc & 3;
+ addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
+ mem_index = *args++;
+ s_bits = opc & MO_SIZE;
- tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args,
+ tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
label_ptr, offsetof(CPUTLBEntry, addr_read));
/* TLB Hit. */
- tcg_out_qemu_ld_direct(s, data_reg, data_reg2, TCG_REG_L1, 0, 0, opc);
+ tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
/* Record the current context of a load into ldst label */
- add_qemu_ldst_label(s,
- 1,
- opc,
- data_reg,
- data_reg2,
- args[addrlo_idx],
- args[addrlo_idx + 1],
- mem_index,
- s->code_ptr,
- label_ptr);
+ add_qemu_ldst_label(s, 1, opc, datalo, datahi, addrlo, addrhi,
+ mem_index, s->code_ptr, label_ptr);
#else
{
int32_t offset = GUEST_BASE;
- int base = args[addrlo_idx];
+ TCGReg base = addrlo;
int seg = 0;
/* ??? We assume all operations have left us with register contents
offset = 0;
}
- tcg_out_qemu_ld_direct(s, data_reg, data_reg2, base, offset, seg, opc);
+ tcg_out_qemu_ld_direct(s, datalo, datahi, base, offset, seg, opc);
}
#endif
}
-static void tcg_out_qemu_st_direct(TCGContext *s, int datalo, int datahi,
- int base, tcg_target_long ofs, int seg,
- int sizeop)
+static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
+ TCGReg base, intptr_t ofs, int seg,
+ TCGMemOp memop)
{
-#ifdef TARGET_WORDS_BIGENDIAN
- const int bswap = 1;
-#else
- const int bswap = 0;
-#endif
+ const TCGMemOp bswap = memop & MO_BSWAP;
+
/* ??? Ideally we wouldn't need a scratch register. For user-only,
we could perform the bswap twice to restore the original value
instead of moving to the scratch. But as it is, the L constraint
means that TCG_REG_L0 is definitely free here. */
- const int scratch = TCG_REG_L0;
+ const TCGReg scratch = TCG_REG_L0;
- switch (sizeop) {
- case 0:
+ switch (memop & MO_SIZE) {
+ case MO_8:
+ /* In 32-bit mode, 8-byte stores can only happen from [abcd]x.
+ Use the scratch register if necessary. */
+ if (TCG_TARGET_REG_BITS == 32 && datalo >= 4) {
+ tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
+ datalo = scratch;
+ }
tcg_out_modrm_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg,
datalo, base, ofs);
break;
- case 1:
+ case MO_16:
if (bswap) {
tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
tcg_out_rolw_8(s, scratch);
tcg_out_modrm_offset(s, OPC_MOVL_EvGv + P_DATA16 + seg,
datalo, base, ofs);
break;
- case 2:
+ case MO_32:
if (bswap) {
tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
tcg_out_bswap32(s, scratch);
}
tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datalo, base, ofs);
break;
- case 3:
+ case MO_64:
if (TCG_TARGET_REG_BITS == 64) {
if (bswap) {
tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo);
}
}
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
- int opc)
+static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, TCGMemOp opc)
{
- int data_reg, data_reg2 = 0;
- int addrlo_idx;
+ TCGReg datalo, datahi, addrlo;
#if defined(CONFIG_SOFTMMU)
- int mem_index, s_bits;
+ TCGReg addrhi;
+ int mem_index;
+ TCGMemOp s_bits;
uint8_t *label_ptr[2];
#endif
- data_reg = args[0];
- addrlo_idx = 1;
- if (TCG_TARGET_REG_BITS == 32 && opc == 3) {
- data_reg2 = args[1];
- addrlo_idx = 2;
- }
+ datalo = *args++;
+ datahi = (TCG_TARGET_REG_BITS == 32 && opc == 3 ? *args++ : 0);
+ addrlo = *args++;
#if defined(CONFIG_SOFTMMU)
- mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)];
- s_bits = opc;
+ addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
+ mem_index = *args++;
+ s_bits = opc & MO_SIZE;
- tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args,
+ tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
label_ptr, offsetof(CPUTLBEntry, addr_write));
/* TLB Hit. */
- tcg_out_qemu_st_direct(s, data_reg, data_reg2, TCG_REG_L1, 0, 0, opc);
+ tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
/* Record the current context of a store into ldst label */
- add_qemu_ldst_label(s,
- 0,
- opc,
- data_reg,
- data_reg2,
- args[addrlo_idx],
- args[addrlo_idx + 1],
- mem_index,
- s->code_ptr,
- label_ptr);
+ add_qemu_ldst_label(s, 0, opc, datalo, datahi, addrlo, addrhi,
+ mem_index, s->code_ptr, label_ptr);
#else
{
int32_t offset = GUEST_BASE;
- int base = args[addrlo_idx];
+ TCGReg base = addrlo;
int seg = 0;
/* ??? We assume all operations have left us with register contents
offset = 0;
}
- tcg_out_qemu_st_direct(s, data_reg, data_reg2, base, offset, seg, opc);
- }
-#endif
-}
-
-#if defined(CONFIG_SOFTMMU)
-/*
- * Record the context of a call to the out of line helper code for the slow path
- * for a load or store, so that we can later generate the correct helper code
- */
-static void add_qemu_ldst_label(TCGContext *s,
- int is_ld,
- int opc,
- int data_reg,
- int data_reg2,
- int addrlo_reg,
- int addrhi_reg,
- int mem_index,
- uint8_t *raddr,
- uint8_t **label_ptr)
-{
- int idx;
- TCGLabelQemuLdst *label;
-
- if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) {
- tcg_abort();
- }
-
- idx = s->nb_qemu_ldst_labels++;
- label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[idx];
- label->is_ld = is_ld;
- label->opc = opc;
- label->datalo_reg = data_reg;
- label->datahi_reg = data_reg2;
- label->addrlo_reg = addrlo_reg;
- label->addrhi_reg = addrhi_reg;
- label->mem_index = mem_index;
- label->raddr = raddr;
- label->label_ptr[0] = label_ptr[0];
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- label->label_ptr[1] = label_ptr[1];
- }
-}
-
-/*
- * Generate code for the slow path for a load at the end of block
- */
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *label)
-{
- int s_bits;
- int opc = label->opc;
- int mem_index = label->mem_index;
-#if TCG_TARGET_REG_BITS == 32
- int stack_adjust;
- int addrlo_reg = label->addrlo_reg;
- int addrhi_reg = label->addrhi_reg;
-#endif
- int data_reg = label->datalo_reg;
- int data_reg2 = label->datahi_reg;
- uint8_t *raddr = label->raddr;
- uint8_t **label_ptr = &label->label_ptr[0];
-
- s_bits = opc & 3;
-
- /* resolve label address */
- *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
- }
-
-#if TCG_TARGET_REG_BITS == 32
- tcg_out_pushi(s, mem_index);
- stack_adjust = 4;
- if (TARGET_LONG_BITS == 64) {
- tcg_out_push(s, addrhi_reg);
- stack_adjust += 4;
- }
- tcg_out_push(s, addrlo_reg);
- stack_adjust += 4;
- tcg_out_push(s, TCG_AREG0);
- stack_adjust += 4;
-#else
- tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0], TCG_AREG0);
- /* The second argument is already loaded with addrlo. */
- tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], mem_index);
-#endif
-
- /* Code generation of qemu_ld/st's slow path calling MMU helper
-
- PRE_PROC ...
- call MMU helper
- jmp POST_PROC (2b) : short forward jump <- GETRA()
- jmp next_code (5b) : dummy long backward jump which is never executed
- POST_PROC ... : do post-processing <- GETRA() + 7
- jmp next_code : jump to the code corresponding to next IR of qemu_ld/st
- */
-
- tcg_out_calli(s, (tcg_target_long)qemu_ld_helpers[s_bits]);
-
- /* Jump to post-processing code */
- tcg_out8(s, OPC_JMP_short);
- tcg_out8(s, 5);
- /* Dummy backward jump having information of fast path'pc for MMU helpers */
- tcg_out8(s, OPC_JMP_long);
- *(int32_t *)s->code_ptr = (int32_t)(raddr - s->code_ptr - 4);
- s->code_ptr += 4;
-
-#if TCG_TARGET_REG_BITS == 32
- if (stack_adjust == (TCG_TARGET_REG_BITS / 8)) {
- /* Pop and discard. This is 2 bytes smaller than the add. */
- tcg_out_pop(s, TCG_REG_ECX);
- } else if (stack_adjust != 0) {
- tcg_out_addi(s, TCG_REG_CALL_STACK, stack_adjust);
- }
-#endif
-
- switch(opc) {
- case 0 | 4:
- tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW);
- break;
- case 1 | 4:
- tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW);
- break;
- case 0:
- tcg_out_ext8u(s, data_reg, TCG_REG_EAX);
- break;
- case 1:
- tcg_out_ext16u(s, data_reg, TCG_REG_EAX);
- break;
- case 2:
- tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
- break;
-#if TCG_TARGET_REG_BITS == 64
- case 2 | 4:
- tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
- break;
-#endif
- case 3:
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
- } else if (data_reg == TCG_REG_EDX) {
- /* xchg %edx, %eax */
- tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
- tcg_out_mov(s, TCG_TYPE_I32, data_reg2, TCG_REG_EAX);
- } else {
- tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
- tcg_out_mov(s, TCG_TYPE_I32, data_reg2, TCG_REG_EDX);
- }
- break;
- default:
- tcg_abort();
- }
-
- /* Jump to the code corresponding to next IR of qemu_st */
- tcg_out_jmp(s, (tcg_target_long)raddr);
-}
-
-/*
- * Generate code for the slow path for a store at the end of block
- */
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *label)
-{
- int s_bits;
- int stack_adjust;
- int opc = label->opc;
- int mem_index = label->mem_index;
- int data_reg = label->datalo_reg;
-#if TCG_TARGET_REG_BITS == 32
- int data_reg2 = label->datahi_reg;
- int addrlo_reg = label->addrlo_reg;
- int addrhi_reg = label->addrhi_reg;
-#endif
- uint8_t *raddr = label->raddr;
- uint8_t **label_ptr = &label->label_ptr[0];
-
- s_bits = opc & 3;
-
- /* resolve label address */
- *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
- }
-
-#if TCG_TARGET_REG_BITS == 32
- tcg_out_pushi(s, mem_index);
- stack_adjust = 4;
- if (opc == 3) {
- tcg_out_push(s, data_reg2);
- stack_adjust += 4;
- }
- tcg_out_push(s, data_reg);
- stack_adjust += 4;
- if (TARGET_LONG_BITS == 64) {
- tcg_out_push(s, addrhi_reg);
- stack_adjust += 4;
+ tcg_out_qemu_st_direct(s, datalo, datahi, base, offset, seg, opc);
}
- tcg_out_push(s, addrlo_reg);
- stack_adjust += 4;
- tcg_out_push(s, TCG_AREG0);
- stack_adjust += 4;
-#else
- tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0], TCG_AREG0);
- /* The second argument is already loaded with addrlo. */
- tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32),
- tcg_target_call_iarg_regs[2], data_reg);
- tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], mem_index);
- stack_adjust = 0;
#endif
-
- /* Code generation of qemu_ld/st's slow path calling MMU helper
-
- PRE_PROC ...
- call MMU helper
- jmp POST_PROC (2b) : short forward jump <- GETRA()
- jmp next_code (5b) : dummy long backward jump which is never executed
- POST_PROC ... : do post-processing <- GETRA() + 7
- jmp next_code : jump to the code corresponding to next IR of qemu_ld/st
- */
-
- tcg_out_calli(s, (tcg_target_long)qemu_st_helpers[s_bits]);
-
- /* Jump to post-processing code */
- tcg_out8(s, OPC_JMP_short);
- tcg_out8(s, 5);
- /* Dummy backward jump having information of fast path'pc for MMU helpers */
- tcg_out8(s, OPC_JMP_long);
- *(int32_t *)s->code_ptr = (int32_t)(raddr - s->code_ptr - 4);
- s->code_ptr += 4;
-
- if (stack_adjust == (TCG_TARGET_REG_BITS / 8)) {
- /* Pop and discard. This is 2 bytes smaller than the add. */
- tcg_out_pop(s, TCG_REG_ECX);
- } else if (stack_adjust != 0) {
- tcg_out_addi(s, TCG_REG_CALL_STACK, stack_adjust);
- }
-
- /* Jump to the code corresponding to next IR of qemu_st */
- tcg_out_jmp(s, (tcg_target_long)raddr);
}
-/*
- * Generate TB finalization at the end of block
- */
-void tcg_out_tb_finalize(TCGContext *s)
-{
- int i;
- TCGLabelQemuLdst *label;
-
- /* qemu_ld/st slow paths */
- for (i = 0; i < s->nb_qemu_ldst_labels; i++) {
- label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[i];
- if (label->is_ld) {
- tcg_out_qemu_ld_slow_path(s, label);
- } else {
- tcg_out_qemu_st_slow_path(s, label);
- }
- }
-}
-#endif /* CONFIG_SOFTMMU */
-
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg *args, const int *const_args)
{
switch(opc) {
case INDEX_op_exit_tb:
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, args[0]);
- tcg_out_jmp(s, (tcg_target_long) tb_ret_addr);
+ tcg_out_jmp(s, (uintptr_t)tb_ret_addr);
break;
case INDEX_op_goto_tb:
if (s->tb_jmp_offset) {
} else {
/* indirect jump method */
tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
- (tcg_target_long)(s->tb_next + args[0]));
+ (intptr_t)(s->tb_next + args[0]));
}
s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
break;
break;
case INDEX_op_qemu_ld8u:
- tcg_out_qemu_ld(s, args, 0);
+ tcg_out_qemu_ld(s, args, MO_UB);
break;
case INDEX_op_qemu_ld8s:
- tcg_out_qemu_ld(s, args, 0 | 4);
+ tcg_out_qemu_ld(s, args, MO_SB);
break;
case INDEX_op_qemu_ld16u:
- tcg_out_qemu_ld(s, args, 1);
+ tcg_out_qemu_ld(s, args, MO_TEUW);
break;
case INDEX_op_qemu_ld16s:
- tcg_out_qemu_ld(s, args, 1 | 4);
+ tcg_out_qemu_ld(s, args, MO_TESW);
break;
#if TCG_TARGET_REG_BITS == 64
case INDEX_op_qemu_ld32u:
#endif
case INDEX_op_qemu_ld32:
- tcg_out_qemu_ld(s, args, 2);
+ tcg_out_qemu_ld(s, args, MO_TEUL);
break;
case INDEX_op_qemu_ld64:
- tcg_out_qemu_ld(s, args, 3);
+ tcg_out_qemu_ld(s, args, MO_TEQ);
break;
case INDEX_op_qemu_st8:
- tcg_out_qemu_st(s, args, 0);
+ tcg_out_qemu_st(s, args, MO_UB);
break;
case INDEX_op_qemu_st16:
- tcg_out_qemu_st(s, args, 1);
+ tcg_out_qemu_st(s, args, MO_TEUW);
break;
case INDEX_op_qemu_st32:
- tcg_out_qemu_st(s, args, 2);
+ tcg_out_qemu_st(s, args, MO_TEUL);
break;
case INDEX_op_qemu_st64:
- tcg_out_qemu_st(s, args, 3);
+ tcg_out_qemu_st(s, args, MO_TEQ);
break;
-#if TCG_TARGET_REG_BITS == 32
- case INDEX_op_brcond2_i32:
- tcg_out_brcond2(s, args, const_args, 0);
- break;
- case INDEX_op_setcond2_i32:
- tcg_out_setcond2(s, args, const_args);
+ OP_32_64(mulu2):
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]);
break;
- case INDEX_op_mulu2_i32:
- tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_MUL, args[3]);
+ OP_32_64(muls2):
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, args[3]);
break;
- case INDEX_op_add2_i32:
+ OP_32_64(add2):
if (const_args[4]) {
- tgen_arithi(s, ARITH_ADD, args[0], args[4], 1);
+ tgen_arithi(s, ARITH_ADD + rexw, args[0], args[4], 1);
} else {
- tgen_arithr(s, ARITH_ADD, args[0], args[4]);
+ tgen_arithr(s, ARITH_ADD + rexw, args[0], args[4]);
}
if (const_args[5]) {
- tgen_arithi(s, ARITH_ADC, args[1], args[5], 1);
+ tgen_arithi(s, ARITH_ADC + rexw, args[1], args[5], 1);
} else {
- tgen_arithr(s, ARITH_ADC, args[1], args[5]);
+ tgen_arithr(s, ARITH_ADC + rexw, args[1], args[5]);
}
break;
- case INDEX_op_sub2_i32:
+ OP_32_64(sub2):
if (const_args[4]) {
- tgen_arithi(s, ARITH_SUB, args[0], args[4], 1);
+ tgen_arithi(s, ARITH_SUB + rexw, args[0], args[4], 1);
} else {
- tgen_arithr(s, ARITH_SUB, args[0], args[4]);
+ tgen_arithr(s, ARITH_SUB + rexw, args[0], args[4]);
}
if (const_args[5]) {
- tgen_arithi(s, ARITH_SBB, args[1], args[5], 1);
+ tgen_arithi(s, ARITH_SBB + rexw, args[1], args[5], 1);
} else {
- tgen_arithr(s, ARITH_SBB, args[1], args[5]);
+ tgen_arithr(s, ARITH_SBB + rexw, args[1], args[5]);
}
break;
+
+#if TCG_TARGET_REG_BITS == 32
+ case INDEX_op_brcond2_i32:
+ tcg_out_brcond2(s, args, const_args, 0);
+ break;
+ case INDEX_op_setcond2_i32:
+ tcg_out_setcond2(s, args, const_args);
+ break;
#else /* TCG_TARGET_REG_BITS == 64 */
case INDEX_op_movi_i64:
tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
}
break;
case INDEX_op_qemu_ld32s:
- tcg_out_qemu_ld(s, args, 2 | 4);
+ tcg_out_qemu_ld(s, args, MO_TESL);
break;
case INDEX_op_brcond_i64:
{ INDEX_op_movcond_i32, { "r", "r", "ri", "r", "0" } },
#endif
-#if TCG_TARGET_REG_BITS == 32
{ INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },
+ { INDEX_op_muls2_i32, { "a", "d", "a", "r" } },
{ INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } },
{ INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } },
+
+#if TCG_TARGET_REG_BITS == 32
{ INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
{ INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } },
#else
{ INDEX_op_st32_i64, { "ri", "r" } },
{ INDEX_op_st_i64, { "re", "r" } },
- { INDEX_op_add_i64, { "r", "0", "re" } },
+ { INDEX_op_add_i64, { "r", "r", "re" } },
{ INDEX_op_mul_i64, { "r", "0", "re" } },
{ INDEX_op_div2_i64, { "a", "d", "0", "1", "r" } },
{ INDEX_op_divu2_i64, { "a", "d", "0", "1", "r" } },
{ INDEX_op_deposit_i64, { "Q", "0", "Q" } },
{ INDEX_op_movcond_i64, { "r", "r", "re", "r", "0" } },
+
+ { INDEX_op_mulu2_i64, { "a", "d", "a", "r" } },
+ { INDEX_op_muls2_i64, { "a", "d", "a", "r" } },
+ { INDEX_op_add2_i64, { "r", "r", "0", "1", "re", "re" } },
+ { INDEX_op_sub2_i64, { "r", "r", "0", "1", "re", "re" } },
#endif
#if TCG_TARGET_REG_BITS == 64
{ INDEX_op_qemu_ld32, { "r", "L" } },
{ INDEX_op_qemu_ld64, { "r", "r", "L" } },
- { INDEX_op_qemu_st8, { "cb", "L" } },
+ { INDEX_op_qemu_st8, { "L", "L" } },
{ INDEX_op_qemu_st16, { "L", "L" } },
{ INDEX_op_qemu_st32, { "L", "L" } },
{ INDEX_op_qemu_st64, { "L", "L", "L" } },
{ INDEX_op_qemu_ld32, { "r", "L", "L" } },
{ INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
- { INDEX_op_qemu_st8, { "cb", "L", "L" } },
+ { INDEX_op_qemu_st8, { "L", "L", "L" } },
{ INDEX_op_qemu_st16, { "L", "L", "L" } },
{ INDEX_op_qemu_st32, { "L", "L", "L" } },
{ INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
static void tcg_target_init(TCGContext *s)
{
-#if !defined(CONFIG_USER_ONLY)
- /* fail safe */
- if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
- tcg_abort();
+ /* For 32-bit, 99% certainty that we're running on hardware that supports
+ cmov, but we still need to check. In case cmov is not available, we'll
+ use a small forward branch. */
+#ifndef have_cmov
+ {
+ unsigned a, b, c, d;
+ have_cmov = (__get_cpuid(1, &a, &b, &c, &d) && (d & bit_CMOV));
+ }
#endif
if (TCG_TARGET_REG_BITS == 64) {
tcg_add_target_add_op_defs(x86_op_defs);
}
-typedef struct {
- uint32_t len __attribute__((aligned((sizeof(void *)))));
- uint32_t id;
- uint8_t version;
- char augmentation[1];
- uint8_t code_align;
- uint8_t data_align;
- uint8_t return_column;
-} DebugFrameCIE;
-
-typedef struct {
- uint32_t len __attribute__((aligned((sizeof(void *)))));
- uint32_t cie_offset;
- tcg_target_long func_start __attribute__((packed));
- tcg_target_long func_len __attribute__((packed));
- uint8_t def_cfa[4];
- uint8_t reg_ofs[14];
-} DebugFrameFDE;
-
typedef struct {
DebugFrameCIE cie;
- DebugFrameFDE fde;
+ DebugFrameFDEHeader fde;
+ uint8_t fde_def_cfa[4];
+ uint8_t fde_reg_ofs[14];
} DebugFrame;
+/* We're expecting a 2 byte uleb128 encoded value. */
+QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
+
#if !defined(__ELF__)
/* Host machine without ELF. */
#elif TCG_TARGET_REG_BITS == 64
.cie.data_align = 0x78, /* sleb128 -8 */
.cie.return_column = 16,
- .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
- .fde.def_cfa = {
+ /* Total FDE size does not include the "len" member. */
+ .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
+
+ .fde_def_cfa = {
12, 7, /* DW_CFA_def_cfa %rsp, ... */
(FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
(FRAME_SIZE >> 7)
},
- .fde.reg_ofs = {
+ .fde_reg_ofs = {
0x90, 1, /* DW_CFA_offset, %rip, -8 */
/* The following ordering must match tcg_target_callee_save_regs. */
0x86, 2, /* DW_CFA_offset, %rbp, -16 */
.cie.data_align = 0x7c, /* sleb128 -4 */
.cie.return_column = 8,
- .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
- .fde.def_cfa = {
+ /* Total FDE size does not include the "len" member. */
+ .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
+
+ .fde_def_cfa = {
12, 4, /* DW_CFA_def_cfa %esp, ... */
(FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
(FRAME_SIZE >> 7)
},
- .fde.reg_ofs = {
+ .fde_reg_ofs = {
0x88, 1, /* DW_CFA_offset, %eip, -4 */
/* The following ordering must match tcg_target_callee_save_regs. */
0x85, 2, /* DW_CFA_offset, %ebp, -8 */
#if defined(ELF_HOST_MACHINE)
void tcg_register_jit(void *buf, size_t buf_size)
{
- /* We're expecting a 2 byte uleb128 encoded value. */
- assert(FRAME_SIZE >> 14 == 0);
-
- debug_frame.fde.func_start = (tcg_target_long) buf;
+ debug_frame.fde.func_start = (uintptr_t)buf;
debug_frame.fde.func_len = buf_size;
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));