X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/8e9a8681dd6066e4f79ba85b59deedb4d3d11aa2..5c53bb812152c3d7919cadfd47c210b181bf89ac:/tcg/i386/tcg-target.c diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c index 7aec3043e3..9a585ab5ec 100644 --- a/tcg/i386/tcg-target.c +++ b/tcg/i386/tcg-target.c @@ -22,6 +22,8 @@ * THE SOFTWARE. */ +#include "tcg-be-ldst.h" + #ifndef NDEBUG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { #if TCG_TARGET_REG_BITS == 64 @@ -86,6 +88,11 @@ static const int tcg_target_call_oarg_regs[] = { #endif }; +/* Constants we accept. */ +#define TCG_CT_CONST_S32 0x100 +#define TCG_CT_CONST_U32 0x200 +#define TCG_CT_CONST_I32 0x400 + /* Registers used with L constraint, which are the first argument registers on x86_64, and two random call clobbered registers on i386. */ @@ -97,22 +104,45 @@ static const int tcg_target_call_oarg_regs[] = { # define TCG_REG_L1 TCG_REG_EDX #endif +/* The host compiler should supply to enable runtime features + detection, as we're not going to go so far as our own inline assembly. + If not available, default values will be assumed. */ +#if defined(CONFIG_CPUID_H) +#include +#endif + /* For 32-bit, we are going to attempt to determine at runtime whether cmov - is available. However, the host compiler must supply , as we're - not going to go so far as our own inline assembly. */ + is available. */ #if TCG_TARGET_REG_BITS == 64 # define have_cmov 1 -#elif defined(CONFIG_CPUID_H) -#include +#elif defined(CONFIG_CPUID_H) && defined(bit_CMOV) static bool have_cmov; #else # define have_cmov 0 #endif +/* If bit_MOVBE is defined in cpuid.h (added in GCC version 4.6), we are + going to attempt to determine at runtime whether movbe is available. */ +#if defined(CONFIG_CPUID_H) && defined(bit_MOVBE) +static bool have_movbe; +#else +# define have_movbe 0 +#endif + +/* We need this symbol in tcg-target.h, and we can't properly conditionalize + it there. Therefore we always define the variable. */ +bool have_bmi1; + +#if defined(CONFIG_CPUID_H) && defined(bit_BMI2) +static bool have_bmi2; +#else +# define have_bmi2 0 +#endif + static uint8_t *tb_ret_addr; static void patch_reloc(uint8_t *code_ptr, int type, - tcg_target_long value, tcg_target_long addend) + intptr_t value, intptr_t addend) { value += addend; switch(type) { @@ -121,14 +151,14 @@ static void patch_reloc(uint8_t *code_ptr, int type, if (value != (int32_t)value) { tcg_abort(); } - *(uint32_t *)code_ptr = value; + tcg_patch32(code_ptr, value); break; case R_386_PC8: value -= (uintptr_t)code_ptr; if (value != (int8_t)value) { tcg_abort(); } - *(uint8_t *)code_ptr = value; + tcg_patch8(code_ptr, value); break; default: tcg_abort(); @@ -151,6 +181,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX); break; case 'c': + case_c: ct->ct |= TCG_CT_REG; tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX); break; @@ -179,6 +210,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) tcg_regset_set32(ct->u.regs, 0, 0xf); break; case 'r': + case_r: ct->ct |= TCG_CT_REG; if (TCG_TARGET_REG_BITS == 64) { tcg_regset_set32(ct->u.regs, 0, 0xffff); @@ -186,15 +218,22 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) tcg_regset_set32(ct->u.regs, 0, 0xff); } break; + case 'C': + /* With SHRX et al, we need not use ECX as shift count register. */ + if (have_bmi2) { + goto case_r; + } else { + goto case_c; + } /* qemu_ld/st address constraint */ case 'L': ct->ct |= TCG_CT_REG; -#if TCG_TARGET_REG_BITS == 64 + if (TCG_TARGET_REG_BITS == 64) { tcg_regset_set32(ct->u.regs, 0, 0xffff); -#else + } else { tcg_regset_set32(ct->u.regs, 0, 0xff); -#endif + } tcg_regset_reset_reg(ct->u.regs, TCG_REG_L0); tcg_regset_reset_reg(ct->u.regs, TCG_REG_L1); break; @@ -205,6 +244,9 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) case 'Z': ct->ct |= TCG_CT_CONST_U32; break; + case 'I': + ct->ct |= TCG_CT_CONST_I32; + break; default: return -1; @@ -215,7 +257,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) } /* test if a constant matches the constraint */ -static inline int tcg_target_const_match(tcg_target_long val, +static inline int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct = arg_ct->ct; @@ -228,6 +270,9 @@ static inline int tcg_target_const_match(tcg_target_long val, if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) { return 1; } + if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) { + return 1; + } return 0; } @@ -238,13 +283,14 @@ static inline int tcg_target_const_match(tcg_target_long val, #endif #define P_EXT 0x100 /* 0x0f opcode prefix */ -#define P_DATA16 0x200 /* 0x66 opcode prefix */ +#define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */ +#define P_DATA16 0x400 /* 0x66 opcode prefix */ #if TCG_TARGET_REG_BITS == 64 -# define P_ADDR32 0x400 /* 0x67 opcode prefix */ -# define P_REXW 0x800 /* Set REX.W = 1 */ -# define P_REXB_R 0x1000 /* REG field as byte register */ -# define P_REXB_RM 0x2000 /* R/M field as byte register */ -# define P_GS 0x4000 /* gs segment override */ +# define P_ADDR32 0x800 /* 0x67 opcode prefix */ +# define P_REXW 0x1000 /* Set REX.W = 1 */ +# define P_REXB_R 0x2000 /* REG field as byte register */ +# define P_REXB_RM 0x4000 /* R/M field as byte register */ +# define P_GS 0x8000 /* gs segment override */ #else # define P_ADDR32 0 # define P_REXW 0 @@ -252,10 +298,13 @@ static inline int tcg_target_const_match(tcg_target_long val, # define P_REXB_RM 0 # define P_GS 0 #endif +#define P_SIMDF3 0x10000 /* 0xf3 opcode prefix */ +#define P_SIMDF2 0x20000 /* 0xf2 opcode prefix */ #define OPC_ARITH_EvIz (0x81) #define OPC_ARITH_EvIb (0x83) #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */ +#define OPC_ANDN (0xf2 | P_EXT38) #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3)) #define OPC_BSWAP (0xc8 | P_EXT) #define OPC_CALL_Jz (0xe8) @@ -277,6 +326,8 @@ static inline int tcg_target_const_match(tcg_target_long val, #define OPC_MOVB_EvIz (0xc6) #define OPC_MOVL_EvIz (0xc7) #define OPC_MOVL_Iv (0xb8) +#define OPC_MOVBE_GyMy (0xf0 | P_EXT38) +#define OPC_MOVBE_MyGy (0xf1 | P_EXT38) #define OPC_MOVSBL (0xbe | P_EXT) #define OPC_MOVSWL (0xbf | P_EXT) #define OPC_MOVSLQ (0x63 | P_REXW) @@ -291,6 +342,9 @@ static inline int tcg_target_const_match(tcg_target_long val, #define OPC_SHIFT_1 (0xd1) #define OPC_SHIFT_Ib (0xc1) #define OPC_SHIFT_cl (0xd3) +#define OPC_SARX (0xf7 | P_EXT38 | P_SIMDF3) +#define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16) +#define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2) #define OPC_TESTL (0x85) #define OPC_XCHG_ax_r32 (0x90) @@ -379,10 +433,10 @@ static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x) } rex = 0; - rex |= (opc & P_REXW) >> 8; /* REX.W */ - rex |= (r & 8) >> 1; /* REX.R */ - rex |= (x & 8) >> 2; /* REX.X */ - rex |= (rm & 8) >> 3; /* REX.B */ + rex |= (opc & P_REXW) ? 0x8 : 0x0; /* REX.W */ + rex |= (r & 8) >> 1; /* REX.R */ + rex |= (x & 8) >> 2; /* REX.X */ + rex |= (rm & 8) >> 3; /* REX.B */ /* P_REXB_{R,RM} indicates that the given register is the low byte. For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do, @@ -396,9 +450,13 @@ static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x) tcg_out8(s, (uint8_t)(rex | 0x40)); } - if (opc & P_EXT) { + if (opc & (P_EXT | P_EXT38)) { tcg_out8(s, 0x0f); + if (opc & P_EXT38) { + tcg_out8(s, 0x38); + } } + tcg_out8(s, opc); } #else @@ -407,8 +465,11 @@ static void tcg_out_opc(TCGContext *s, int opc) if (opc & P_DATA16) { tcg_out8(s, 0x66); } - if (opc & P_EXT) { + if (opc & (P_EXT | P_EXT38)) { tcg_out8(s, 0x0f); + if (opc & P_EXT38) { + tcg_out8(s, 0x38); + } } tcg_out8(s, opc); } @@ -424,14 +485,55 @@ static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm) tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); } +static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm) +{ + int tmp; + + if ((opc & (P_REXW | P_EXT | P_EXT38)) || (rm & 8)) { + /* Three byte VEX prefix. */ + tcg_out8(s, 0xc4); + + /* VEX.m-mmmm */ + if (opc & P_EXT38) { + tmp = 2; + } else if (opc & P_EXT) { + tmp = 1; + } else { + tcg_abort(); + } + tmp |= 0x40; /* VEX.X */ + tmp |= (r & 8 ? 0 : 0x80); /* VEX.R */ + tmp |= (rm & 8 ? 0 : 0x20); /* VEX.B */ + tcg_out8(s, tmp); + + tmp = (opc & P_REXW ? 0x80 : 0); /* VEX.W */ + } else { + /* Two byte VEX prefix. */ + tcg_out8(s, 0xc5); + + tmp = (r & 8 ? 0 : 0x80); /* VEX.R */ + } + /* VEX.pp */ + if (opc & P_DATA16) { + tmp |= 1; /* 0x66 */ + } else if (opc & P_SIMDF3) { + tmp |= 2; /* 0xf3 */ + } else if (opc & P_SIMDF2) { + tmp |= 3; /* 0xf2 */ + } + tmp |= (~v & 15) << 3; /* VEX.vvvv */ + tcg_out8(s, tmp); + tcg_out8(s, opc); + tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); +} + /* Output an opcode with a full "rm + (index<code_ptr + 5 + ~rm; - tcg_target_long disp = offset - pc; + intptr_t pc = (intptr_t)s->code_ptr + 5 + ~rm; + intptr_t disp = offset - pc; if (disp == (int32_t)disp) { tcg_out_opc(s, opc, r, 0, 0); tcg_out8(s, (LOWREGMASK(r) << 3) | 5); @@ -514,7 +616,7 @@ static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm, /* A simplification of the above with no index or shift. */ static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, - int rm, tcg_target_long offset) + int rm, intptr_t offset) { tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset); } @@ -541,20 +643,34 @@ static inline void tcg_out_mov(TCGContext *s, TCGType type, static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long arg) { + tcg_target_long diff; + if (arg == 0) { tgen_arithr(s, ARITH_XOR, ret, ret); return; - } else if (arg == (uint32_t)arg || type == TCG_TYPE_I32) { + } + if (arg == (uint32_t)arg || type == TCG_TYPE_I32) { tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0); tcg_out32(s, arg); - } else if (arg == (int32_t)arg) { + return; + } + if (arg == (int32_t)arg) { tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret); tcg_out32(s, arg); - } else { - tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0); - tcg_out32(s, arg); - tcg_out32(s, arg >> 31 >> 1); + return; } + + /* Try a 7 byte pc-relative lea before the 10 byte movq. */ + diff = arg - ((uintptr_t)s->code_ptr + 7); + if (diff == (int32_t)diff) { + tcg_out_opc(s, OPC_LEA | P_REXW, ret, 0, 0); + tcg_out8(s, (LOWREGMASK(ret) << 3) | 5); + tcg_out32(s, diff); + return; + } + + tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0); + tcg_out64(s, arg); } static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val) @@ -581,19 +697,27 @@ static inline void tcg_out_pop(TCGContext *s, int reg) } static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, - TCGReg arg1, tcg_target_long arg2) + TCGReg arg1, intptr_t arg2) { int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0); tcg_out_modrm_offset(s, opc, ret, arg1, arg2); } static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, tcg_target_long arg2) + TCGReg arg1, intptr_t arg2) { int opc = OPC_MOVL_EvGv + (type == TCG_TYPE_I64 ? P_REXW : 0); tcg_out_modrm_offset(s, opc, arg, arg1, arg2); } +static inline void tcg_out_sti(TCGContext *s, TCGType type, TCGReg base, + tcg_target_long ofs, tcg_target_long val) +{ + int opc = OPC_MOVL_EvIz + (type == TCG_TYPE_I64 ? P_REXW : 0); + tcg_out_modrm_offset(s, opc, 0, base, ofs); + tcg_out32(s, val); +} + static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count) { /* Propagate an opcode prefix, such as P_DATA16. */ @@ -735,7 +859,7 @@ static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small) TCGLabel *l = &s->labels[label_index]; if (l->has_value) { - val = l->u.value - (tcg_target_long)s->code_ptr; + val = l->u.value - (intptr_t)s->code_ptr; val1 = val - 2; if ((int8_t)val1 == val1) { if (opc == -1) { @@ -975,9 +1099,9 @@ static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGArg dest, } #endif -static void tcg_out_branch(TCGContext *s, int call, tcg_target_long dest) +static void tcg_out_branch(TCGContext *s, int call, uintptr_t dest) { - tcg_target_long disp = dest - (tcg_target_long)s->code_ptr - 5; + intptr_t disp = dest - (intptr_t)s->code_ptr - 5; if (disp == (int32_t)disp) { tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0); @@ -989,54 +1113,47 @@ static void tcg_out_branch(TCGContext *s, int call, tcg_target_long dest) } } -static inline void tcg_out_calli(TCGContext *s, tcg_target_long dest) +static inline void tcg_out_calli(TCGContext *s, uintptr_t dest) { tcg_out_branch(s, 1, dest); } -static void tcg_out_jmp(TCGContext *s, tcg_target_long dest) +static void tcg_out_jmp(TCGContext *s, uintptr_t dest) { tcg_out_branch(s, 0, dest); } #if defined(CONFIG_SOFTMMU) - -#include "exec/softmmu_defs.h" - -/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, - int mmu_idx) */ -static const void *qemu_ld_helpers[4] = { - helper_ldb_mmu, - helper_ldw_mmu, - helper_ldl_mmu, - helper_ldq_mmu, +/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, + * int mmu_idx, uintptr_t ra) + */ +static const void * const qemu_ld_helpers[16] = { + [MO_UB] = helper_ret_ldub_mmu, + [MO_LEUW] = helper_le_lduw_mmu, + [MO_LEUL] = helper_le_ldul_mmu, + [MO_LEQ] = helper_le_ldq_mmu, + [MO_BEUW] = helper_be_lduw_mmu, + [MO_BEUL] = helper_be_ldul_mmu, + [MO_BEQ] = helper_be_ldq_mmu, }; -/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, - uintxx_t val, int mmu_idx) */ -static const void *qemu_st_helpers[4] = { - helper_stb_mmu, - helper_stw_mmu, - helper_stl_mmu, - helper_stq_mmu, +/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, + * uintxx_t val, int mmu_idx, uintptr_t ra) + */ +static const void * const qemu_st_helpers[16] = { + [MO_UB] = helper_ret_stb_mmu, + [MO_LEUW] = helper_le_stw_mmu, + [MO_LEUL] = helper_le_stl_mmu, + [MO_LEQ] = helper_le_stq_mmu, + [MO_BEUW] = helper_be_stw_mmu, + [MO_BEUL] = helper_be_stl_mmu, + [MO_BEQ] = helper_be_stq_mmu, }; -static void add_qemu_ldst_label(TCGContext *s, - int is_ld, - int opc, - int data_reg, - int data_reg2, - int addrlo_reg, - int addrhi_reg, - int mem_index, - uint8_t *raddr, - uint8_t **label_ptr); - /* Perform the TLB load and compare. Inputs: - ADDRLO_IDX contains the index into ARGS of the low part of the - address; the high part of the address is at ADDR_LOW_IDX+1. + ADDRLO and ADDRHI contain the low and high part of the address. MEM_INDEX and S_BITS are the memory context and log2 size of the load. @@ -1054,41 +1171,52 @@ static void add_qemu_ldst_label(TCGContext *s, First argument register is clobbered. */ -static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx, - int mem_index, int s_bits, - const TCGArg *args, +static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, + int mem_index, TCGMemOp s_bits, uint8_t **label_ptr, int which) { - const int addrlo = args[addrlo_idx]; - const int r0 = TCG_REG_L0; - const int r1 = TCG_REG_L1; - TCGType type = TCG_TYPE_I32; - int rexw = 0; + const TCGReg r0 = TCG_REG_L0; + const TCGReg r1 = TCG_REG_L1; + TCGType ttype = TCG_TYPE_I32; + TCGType htype = TCG_TYPE_I32; + int trexw = 0, hrexw = 0; - if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 64) { - type = TCG_TYPE_I64; - rexw = P_REXW; + if (TCG_TARGET_REG_BITS == 64) { + if (TARGET_LONG_BITS == 64) { + ttype = TCG_TYPE_I64; + trexw = P_REXW; + } + if (TCG_TYPE_PTR == TCG_TYPE_I64) { + htype = TCG_TYPE_I64; + hrexw = P_REXW; + } } - tcg_out_mov(s, type, r0, addrlo); - tcg_out_mov(s, type, r1, addrlo); + tcg_out_mov(s, htype, r0, addrlo); + tcg_out_mov(s, ttype, r1, addrlo); - tcg_out_shifti(s, SHIFT_SHR + rexw, r0, + tcg_out_shifti(s, SHIFT_SHR + hrexw, r0, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); - tgen_arithi(s, ARITH_AND + rexw, r1, + tgen_arithi(s, ARITH_AND + trexw, r1, TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0); - tgen_arithi(s, ARITH_AND + rexw, r0, + tgen_arithi(s, ARITH_AND + hrexw, r0, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0); - tcg_out_modrm_sib_offset(s, OPC_LEA + P_REXW, r0, TCG_AREG0, r0, 0, + tcg_out_modrm_sib_offset(s, OPC_LEA + hrexw, r0, TCG_AREG0, r0, 0, offsetof(CPUArchState, tlb_table[mem_index][0]) + which); /* cmp 0(r0), r1 */ - tcg_out_modrm_offset(s, OPC_CMP_GvEv + rexw, r1, r0, 0); + tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, 0); - tcg_out_mov(s, type, r1, addrlo); + /* Prepare for both the fast path add of the tlb addend, and the slow + path function argument setup. There are two cases worth note: + For 32-bit guest and x86_64 host, MOVL zero-extends the guest address + before the fastpath ADDQ below. For 64-bit guest and x32 host, MOVQ + copies the entire guest address for the slow path, while truncation + for the 32-bit host happens with the fastpath ADDL below. */ + tcg_out_mov(s, ttype, r1, addrlo); /* jne slow_path */ tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); @@ -1097,7 +1225,7 @@ static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx, if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { /* cmp 4(r0), addrhi */ - tcg_out_modrm_offset(s, OPC_CMP_GvEv, args[addrlo_idx+1], r0, 4); + tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, 4); /* jne slow_path */ tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); @@ -1108,9 +1236,185 @@ static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx, /* TLB Hit. */ /* add addend(r0), r1 */ - tcg_out_modrm_offset(s, OPC_ADD_GvEv + P_REXW, r1, r0, + tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0, offsetof(CPUTLBEntry, addend) - which); } + +/* + * Record the context of a call to the out of line helper code for the slow path + * for a load or store, so that we can later generate the correct helper code + */ +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, + TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addrhi, + int mem_index, uint8_t *raddr, + uint8_t **label_ptr) +{ + TCGLabelQemuLdst *label = new_ldst_label(s); + + label->is_ld = is_ld; + label->opc = opc; + label->datalo_reg = datalo; + label->datahi_reg = datahi; + label->addrlo_reg = addrlo; + label->addrhi_reg = addrhi; + label->mem_index = mem_index; + label->raddr = raddr; + label->label_ptr[0] = label_ptr[0]; + if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + label->label_ptr[1] = label_ptr[1]; + } +} + +/* + * Generate code for the slow path for a load at the end of block + */ +static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + TCGMemOp opc = l->opc; + TCGReg data_reg; + uint8_t **label_ptr = &l->label_ptr[0]; + + /* resolve label address */ + tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4); + if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4); + } + + if (TCG_TARGET_REG_BITS == 32) { + int ofs = 0; + + tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); + ofs += 4; + + tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); + ofs += 4; + + if (TARGET_LONG_BITS == 64) { + tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); + ofs += 4; + } + + tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index); + ofs += 4; + + tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, (uintptr_t)l->raddr); + } else { + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); + /* The second argument is already loaded with addrlo. */ + tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], + l->mem_index); + tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3], + (uintptr_t)l->raddr); + } + + tcg_out_calli(s, (uintptr_t)qemu_ld_helpers[opc & ~MO_SIGN]); + + data_reg = l->datalo_reg; + switch (opc & MO_SSIZE) { + case MO_SB: + tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW); + break; + case MO_SW: + tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW); + break; +#if TCG_TARGET_REG_BITS == 64 + case MO_SL: + tcg_out_ext32s(s, data_reg, TCG_REG_EAX); + break; +#endif + case MO_UB: + case MO_UW: + /* Note that the helpers have zero-extended to tcg_target_long. */ + case MO_UL: + tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); + break; + case MO_Q: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX); + } else if (data_reg == TCG_REG_EDX) { + /* xchg %edx, %eax */ + tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0); + tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX); + } else { + tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); + tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX); + } + break; + default: + tcg_abort(); + } + + /* Jump to the code corresponding to next IR of qemu_st */ + tcg_out_jmp(s, (uintptr_t)l->raddr); +} + +/* + * Generate code for the slow path for a store at the end of block + */ +static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + TCGMemOp opc = l->opc; + TCGMemOp s_bits = opc & MO_SIZE; + uint8_t **label_ptr = &l->label_ptr[0]; + TCGReg retaddr; + + /* resolve label address */ + tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4); + if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4); + } + + if (TCG_TARGET_REG_BITS == 32) { + int ofs = 0; + + tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); + ofs += 4; + + tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); + ofs += 4; + + if (TARGET_LONG_BITS == 64) { + tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); + ofs += 4; + } + + tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs); + ofs += 4; + + if (s_bits == MO_64) { + tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs); + ofs += 4; + } + + tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index); + ofs += 4; + + retaddr = TCG_REG_EAX; + tcg_out_movi(s, TCG_TYPE_I32, retaddr, (uintptr_t)l->raddr); + tcg_out_st(s, TCG_TYPE_I32, retaddr, TCG_REG_ESP, ofs); + } else { + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); + /* The second argument is already loaded with addrlo. */ + tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), + tcg_target_call_iarg_regs[2], l->datalo_reg); + tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], + l->mem_index); + + if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) { + retaddr = tcg_target_call_iarg_regs[4]; + tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); + } else { + retaddr = TCG_REG_RAX; + tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); + tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, 0); + } + } + + /* "Tail call" to the helper, with the return address back inline. */ + tcg_out_push(s, retaddr); + tcg_out_jmp(s, (uintptr_t)qemu_st_helpers[opc]); +} #elif defined(__x86_64__) && defined(__linux__) # include # include @@ -1129,78 +1433,84 @@ static inline void setup_guest_base_seg(void) static inline void setup_guest_base_seg(void) { } #endif /* SOFTMMU */ -static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo, int datahi, - int base, tcg_target_long ofs, int seg, - int sizeop) +static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg base, intptr_t ofs, int seg, + TCGMemOp memop) { -#ifdef TARGET_WORDS_BIGENDIAN - const int bswap = 1; -#else - const int bswap = 0; -#endif - switch (sizeop) { - case 0: + const TCGMemOp real_bswap = memop & MO_BSWAP; + TCGMemOp bswap = real_bswap; + int movop = OPC_MOVL_GvEv; + + if (have_movbe && real_bswap) { + bswap = 0; + movop = OPC_MOVBE_GyMy; + } + + switch (memop & MO_SSIZE) { + case MO_UB: tcg_out_modrm_offset(s, OPC_MOVZBL + seg, datalo, base, ofs); break; - case 0 | 4: + case MO_SB: tcg_out_modrm_offset(s, OPC_MOVSBL + P_REXW + seg, datalo, base, ofs); break; - case 1: + case MO_UW: tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs); - if (bswap) { + if (real_bswap) { tcg_out_rolw_8(s, datalo); } break; - case 1 | 4: - if (bswap) { - tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs); - tcg_out_rolw_8(s, datalo); + case MO_SW: + if (real_bswap) { + if (have_movbe) { + tcg_out_modrm_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg, + datalo, base, ofs); + } else { + tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs); + tcg_out_rolw_8(s, datalo); + } tcg_out_modrm(s, OPC_MOVSWL + P_REXW, datalo, datalo); } else { tcg_out_modrm_offset(s, OPC_MOVSWL + P_REXW + seg, datalo, base, ofs); } break; - case 2: - tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, datalo, base, ofs); + case MO_UL: + tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs); if (bswap) { tcg_out_bswap32(s, datalo); } break; #if TCG_TARGET_REG_BITS == 64 - case 2 | 4: - if (bswap) { - tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, datalo, base, ofs); - tcg_out_bswap32(s, datalo); + case MO_SL: + if (real_bswap) { + tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs); + if (bswap) { + tcg_out_bswap32(s, datalo); + } tcg_out_ext32s(s, datalo, datalo); } else { tcg_out_modrm_offset(s, OPC_MOVSLQ + seg, datalo, base, ofs); } break; #endif - case 3: + case MO_Q: if (TCG_TARGET_REG_BITS == 64) { - tcg_out_modrm_offset(s, OPC_MOVL_GvEv + P_REXW + seg, - datalo, base, ofs); + tcg_out_modrm_offset(s, movop + P_REXW + seg, datalo, base, ofs); if (bswap) { tcg_out_bswap64(s, datalo); } } else { - if (bswap) { + if (real_bswap) { int t = datalo; datalo = datahi; datahi = t; } if (base != datalo) { - tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, - datalo, base, ofs); - tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, - datahi, base, ofs + 4); + tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs); + tcg_out_modrm_offset(s, movop + seg, datahi, base, ofs + 4); } else { - tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, - datahi, base, ofs + 4); - tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, - datalo, base, ofs); + tcg_out_modrm_offset(s, movop + seg, datahi, base, ofs + 4); + tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs); } if (bswap) { tcg_out_bswap32(s, datalo); @@ -1216,48 +1526,40 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo, int datahi, /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and EAX. It will be useful once fixed registers globals are less common. */ -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, - int opc) +static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) { - int data_reg, data_reg2 = 0; - int addrlo_idx; + TCGReg datalo, datahi, addrlo; + TCGReg addrhi __attribute__((unused)); + TCGMemOp opc; #if defined(CONFIG_SOFTMMU) - int mem_index, s_bits; + int mem_index; + TCGMemOp s_bits; uint8_t *label_ptr[2]; #endif - data_reg = args[0]; - addrlo_idx = 1; - if (TCG_TARGET_REG_BITS == 32 && opc == 3) { - data_reg2 = args[1]; - addrlo_idx = 2; - } + datalo = *args++; + datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0); + addrlo = *args++; + addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0); + opc = *args++; #if defined(CONFIG_SOFTMMU) - mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)]; - s_bits = opc & 3; + mem_index = *args++; + s_bits = opc & MO_SIZE; - tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args, + tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits, label_ptr, offsetof(CPUTLBEntry, addr_read)); /* TLB Hit. */ - tcg_out_qemu_ld_direct(s, data_reg, data_reg2, TCG_REG_L1, 0, 0, opc); + tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc); /* Record the current context of a load into ldst label */ - add_qemu_ldst_label(s, - 1, - opc, - data_reg, - data_reg2, - args[addrlo_idx], - args[addrlo_idx + 1], - mem_index, - s->code_ptr, - label_ptr); + add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, addrhi, + mem_index, s->code_ptr, label_ptr); #else { int32_t offset = GUEST_BASE; - int base = args[addrlo_idx]; + TCGReg base = addrlo; int seg = 0; /* ??? We assume all operations have left us with register contents @@ -1275,57 +1577,64 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, offset = 0; } - tcg_out_qemu_ld_direct(s, data_reg, data_reg2, base, offset, seg, opc); + tcg_out_qemu_ld_direct(s, datalo, datahi, base, offset, seg, opc); } #endif } -static void tcg_out_qemu_st_direct(TCGContext *s, int datalo, int datahi, - int base, tcg_target_long ofs, int seg, - int sizeop) +static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg base, intptr_t ofs, int seg, + TCGMemOp memop) { -#ifdef TARGET_WORDS_BIGENDIAN - const int bswap = 1; -#else - const int bswap = 0; -#endif /* ??? Ideally we wouldn't need a scratch register. For user-only, we could perform the bswap twice to restore the original value instead of moving to the scratch. But as it is, the L constraint means that TCG_REG_L0 is definitely free here. */ - const int scratch = TCG_REG_L0; + const TCGReg scratch = TCG_REG_L0; + const TCGMemOp real_bswap = memop & MO_BSWAP; + TCGMemOp bswap = real_bswap; + int movop = OPC_MOVL_EvGv; + + if (have_movbe && real_bswap) { + bswap = 0; + movop = OPC_MOVBE_MyGy; + } - switch (sizeop) { - case 0: + switch (memop & MO_SIZE) { + case MO_8: + /* In 32-bit mode, 8-bit stores can only happen from [abcd]x. + Use the scratch register if necessary. */ + if (TCG_TARGET_REG_BITS == 32 && datalo >= 4) { + tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); + datalo = scratch; + } tcg_out_modrm_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg, datalo, base, ofs); break; - case 1: + case MO_16: if (bswap) { tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); tcg_out_rolw_8(s, scratch); datalo = scratch; } - tcg_out_modrm_offset(s, OPC_MOVL_EvGv + P_DATA16 + seg, - datalo, base, ofs); + tcg_out_modrm_offset(s, movop + P_DATA16 + seg, datalo, base, ofs); break; - case 2: + case MO_32: if (bswap) { tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); tcg_out_bswap32(s, scratch); datalo = scratch; } - tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datalo, base, ofs); + tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs); break; - case 3: + case MO_64: if (TCG_TARGET_REG_BITS == 64) { if (bswap) { tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo); tcg_out_bswap64(s, scratch); datalo = scratch; } - tcg_out_modrm_offset(s, OPC_MOVL_EvGv + P_REXW + seg, - datalo, base, ofs); + tcg_out_modrm_offset(s, movop + P_REXW + seg, datalo, base, ofs); } else if (bswap) { tcg_out_mov(s, TCG_TYPE_I32, scratch, datahi); tcg_out_bswap32(s, scratch); @@ -1334,8 +1643,13 @@ static void tcg_out_qemu_st_direct(TCGContext *s, int datalo, int datahi, tcg_out_bswap32(s, scratch); tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs+4); } else { - tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datalo, base, ofs); - tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datahi, base, ofs+4); + if (real_bswap) { + int t = datalo; + datalo = datahi; + datahi = t; + } + tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs); + tcg_out_modrm_offset(s, movop + seg, datahi, base, ofs+4); } break; default: @@ -1343,48 +1657,40 @@ static void tcg_out_qemu_st_direct(TCGContext *s, int datalo, int datahi, } } -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, - int opc) +static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) { - int data_reg, data_reg2 = 0; - int addrlo_idx; + TCGReg datalo, datahi, addrlo; + TCGReg addrhi __attribute__((unused)); + TCGMemOp opc; #if defined(CONFIG_SOFTMMU) - int mem_index, s_bits; + int mem_index; + TCGMemOp s_bits; uint8_t *label_ptr[2]; #endif - data_reg = args[0]; - addrlo_idx = 1; - if (TCG_TARGET_REG_BITS == 32 && opc == 3) { - data_reg2 = args[1]; - addrlo_idx = 2; - } + datalo = *args++; + datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0); + addrlo = *args++; + addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0); + opc = *args++; #if defined(CONFIG_SOFTMMU) - mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)]; - s_bits = opc; + mem_index = *args++; + s_bits = opc & MO_SIZE; - tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args, + tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits, label_ptr, offsetof(CPUTLBEntry, addr_write)); /* TLB Hit. */ - tcg_out_qemu_st_direct(s, data_reg, data_reg2, TCG_REG_L1, 0, 0, opc); + tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc); /* Record the current context of a store into ldst label */ - add_qemu_ldst_label(s, - 0, - opc, - data_reg, - data_reg2, - args[addrlo_idx], - args[addrlo_idx + 1], - mem_index, - s->code_ptr, - label_ptr); + add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi, + mem_index, s->code_ptr, label_ptr); #else { int32_t offset = GUEST_BASE; - int base = args[addrlo_idx]; + TCGReg base = addrlo; int seg = 0; /* ??? We assume all operations have left us with register contents @@ -1402,270 +1708,15 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, offset = 0; } - tcg_out_qemu_st_direct(s, data_reg, data_reg2, base, offset, seg, opc); + tcg_out_qemu_st_direct(s, datalo, datahi, base, offset, seg, opc); } #endif } -#if defined(CONFIG_SOFTMMU) -/* - * Record the context of a call to the out of line helper code for the slow path - * for a load or store, so that we can later generate the correct helper code - */ -static void add_qemu_ldst_label(TCGContext *s, - int is_ld, - int opc, - int data_reg, - int data_reg2, - int addrlo_reg, - int addrhi_reg, - int mem_index, - uint8_t *raddr, - uint8_t **label_ptr) -{ - int idx; - TCGLabelQemuLdst *label; - - if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) { - tcg_abort(); - } - - idx = s->nb_qemu_ldst_labels++; - label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[idx]; - label->is_ld = is_ld; - label->opc = opc; - label->datalo_reg = data_reg; - label->datahi_reg = data_reg2; - label->addrlo_reg = addrlo_reg; - label->addrhi_reg = addrhi_reg; - label->mem_index = mem_index; - label->raddr = raddr; - label->label_ptr[0] = label_ptr[0]; - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { - label->label_ptr[1] = label_ptr[1]; - } -} - -/* - * Generate code for the slow path for a load at the end of block - */ -static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *label) -{ - int s_bits; - int opc = label->opc; - int mem_index = label->mem_index; -#if TCG_TARGET_REG_BITS == 32 - int stack_adjust; - int addrlo_reg = label->addrlo_reg; - int addrhi_reg = label->addrhi_reg; -#endif - int data_reg = label->datalo_reg; - int data_reg2 = label->datahi_reg; - uint8_t *raddr = label->raddr; - uint8_t **label_ptr = &label->label_ptr[0]; - - s_bits = opc & 3; - - /* resolve label address */ - *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4); - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { - *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4); - } - -#if TCG_TARGET_REG_BITS == 32 - tcg_out_pushi(s, mem_index); - stack_adjust = 4; - if (TARGET_LONG_BITS == 64) { - tcg_out_push(s, addrhi_reg); - stack_adjust += 4; - } - tcg_out_push(s, addrlo_reg); - stack_adjust += 4; - tcg_out_push(s, TCG_AREG0); - stack_adjust += 4; -#else - tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0], TCG_AREG0); - /* The second argument is already loaded with addrlo. */ - tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], mem_index); -#endif - - /* Code generation of qemu_ld/st's slow path calling MMU helper - - PRE_PROC ... - call MMU helper - jmp POST_PROC (2b) : short forward jump <- GETRA() - jmp next_code (5b) : dummy long backward jump which is never executed - POST_PROC ... : do post-processing <- GETRA() + 7 - jmp next_code : jump to the code corresponding to next IR of qemu_ld/st - */ - - tcg_out_calli(s, (tcg_target_long)qemu_ld_helpers[s_bits]); - - /* Jump to post-processing code */ - tcg_out8(s, OPC_JMP_short); - tcg_out8(s, 5); - /* Dummy backward jump having information of fast path'pc for MMU helpers */ - tcg_out8(s, OPC_JMP_long); - *(int32_t *)s->code_ptr = (int32_t)(raddr - s->code_ptr - 4); - s->code_ptr += 4; - -#if TCG_TARGET_REG_BITS == 32 - if (stack_adjust == (TCG_TARGET_REG_BITS / 8)) { - /* Pop and discard. This is 2 bytes smaller than the add. */ - tcg_out_pop(s, TCG_REG_ECX); - } else if (stack_adjust != 0) { - tcg_out_addi(s, TCG_REG_CALL_STACK, stack_adjust); - } -#endif - - switch(opc) { - case 0 | 4: - tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW); - break; - case 1 | 4: - tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW); - break; - case 0: - tcg_out_ext8u(s, data_reg, TCG_REG_EAX); - break; - case 1: - tcg_out_ext16u(s, data_reg, TCG_REG_EAX); - break; - case 2: - tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); - break; -#if TCG_TARGET_REG_BITS == 64 - case 2 | 4: - tcg_out_ext32s(s, data_reg, TCG_REG_EAX); - break; -#endif - case 3: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX); - } else if (data_reg == TCG_REG_EDX) { - /* xchg %edx, %eax */ - tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0); - tcg_out_mov(s, TCG_TYPE_I32, data_reg2, TCG_REG_EAX); - } else { - tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); - tcg_out_mov(s, TCG_TYPE_I32, data_reg2, TCG_REG_EDX); - } - break; - default: - tcg_abort(); - } - - /* Jump to the code corresponding to next IR of qemu_st */ - tcg_out_jmp(s, (tcg_target_long)raddr); -} - -/* - * Generate code for the slow path for a store at the end of block - */ -static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *label) -{ - int s_bits; - int stack_adjust; - int opc = label->opc; - int mem_index = label->mem_index; - int data_reg = label->datalo_reg; -#if TCG_TARGET_REG_BITS == 32 - int data_reg2 = label->datahi_reg; - int addrlo_reg = label->addrlo_reg; - int addrhi_reg = label->addrhi_reg; -#endif - uint8_t *raddr = label->raddr; - uint8_t **label_ptr = &label->label_ptr[0]; - - s_bits = opc & 3; - - /* resolve label address */ - *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4); - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { - *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4); - } - -#if TCG_TARGET_REG_BITS == 32 - tcg_out_pushi(s, mem_index); - stack_adjust = 4; - if (opc == 3) { - tcg_out_push(s, data_reg2); - stack_adjust += 4; - } - tcg_out_push(s, data_reg); - stack_adjust += 4; - if (TARGET_LONG_BITS == 64) { - tcg_out_push(s, addrhi_reg); - stack_adjust += 4; - } - tcg_out_push(s, addrlo_reg); - stack_adjust += 4; - tcg_out_push(s, TCG_AREG0); - stack_adjust += 4; -#else - tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0], TCG_AREG0); - /* The second argument is already loaded with addrlo. */ - tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32), - tcg_target_call_iarg_regs[2], data_reg); - tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], mem_index); - stack_adjust = 0; -#endif - - /* Code generation of qemu_ld/st's slow path calling MMU helper - - PRE_PROC ... - call MMU helper - jmp POST_PROC (2b) : short forward jump <- GETRA() - jmp next_code (5b) : dummy long backward jump which is never executed - POST_PROC ... : do post-processing <- GETRA() + 7 - jmp next_code : jump to the code corresponding to next IR of qemu_ld/st - */ - - tcg_out_calli(s, (tcg_target_long)qemu_st_helpers[s_bits]); - - /* Jump to post-processing code */ - tcg_out8(s, OPC_JMP_short); - tcg_out8(s, 5); - /* Dummy backward jump having information of fast path'pc for MMU helpers */ - tcg_out8(s, OPC_JMP_long); - *(int32_t *)s->code_ptr = (int32_t)(raddr - s->code_ptr - 4); - s->code_ptr += 4; - - if (stack_adjust == (TCG_TARGET_REG_BITS / 8)) { - /* Pop and discard. This is 2 bytes smaller than the add. */ - tcg_out_pop(s, TCG_REG_ECX); - } else if (stack_adjust != 0) { - tcg_out_addi(s, TCG_REG_CALL_STACK, stack_adjust); - } - - /* Jump to the code corresponding to next IR of qemu_st */ - tcg_out_jmp(s, (tcg_target_long)raddr); -} - -/* - * Generate TB finalization at the end of block - */ -void tcg_out_tb_finalize(TCGContext *s) -{ - int i; - TCGLabelQemuLdst *label; - - /* qemu_ld/st slow paths */ - for (i = 0; i < s->nb_qemu_ldst_labels; i++) { - label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[i]; - if (label->is_ld) { - tcg_out_qemu_ld_slow_path(s, label); - } else { - tcg_out_qemu_st_slow_path(s, label); - } - } -} -#endif /* CONFIG_SOFTMMU */ - static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args) { - int c, rexw = 0; + int c, vexop, rexw = 0; #if TCG_TARGET_REG_BITS == 64 # define OP_32_64(x) \ @@ -1680,7 +1731,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, switch(opc) { case INDEX_op_exit_tb: tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, args[0]); - tcg_out_jmp(s, (tcg_target_long) tb_ret_addr); + tcg_out_jmp(s, (uintptr_t)tb_ret_addr); break; case INDEX_op_goto_tb: if (s->tb_jmp_offset) { @@ -1691,7 +1742,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, } else { /* indirect jump method */ tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1, - (tcg_target_long)(s->tb_next + args[0])); + (intptr_t)(s->tb_next + args[0])); } s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf; break; @@ -1801,6 +1852,16 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, } break; + OP_32_64(andc): + if (const_args[2]) { + tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, + args[0], args[1]); + tgen_arithi(s, ARITH_AND + rexw, args[0], ~args[2], 0); + } else { + tcg_out_vex_modrm(s, OPC_ANDN + rexw, args[0], args[2], args[1]); + } + break; + OP_32_64(mul): if (const_args[2]) { int32_t val; @@ -1826,19 +1887,28 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, OP_32_64(shl): c = SHIFT_SHL; - goto gen_shift; + vexop = OPC_SHLX; + goto gen_shift_maybe_vex; OP_32_64(shr): c = SHIFT_SHR; - goto gen_shift; + vexop = OPC_SHRX; + goto gen_shift_maybe_vex; OP_32_64(sar): c = SHIFT_SAR; - goto gen_shift; + vexop = OPC_SARX; + goto gen_shift_maybe_vex; OP_32_64(rotl): c = SHIFT_ROL; goto gen_shift; OP_32_64(rotr): c = SHIFT_ROR; goto gen_shift; + gen_shift_maybe_vex: + if (have_bmi2 && !const_args[2]) { + tcg_out_vex_modrm(s, vexop + rexw, args[0], args[2], args[1]); + break; + } + /* FALLTHRU */ gen_shift: if (const_args[2]) { tcg_out_shifti(s, c + rexw, args[0], args[2]); @@ -1887,75 +1957,57 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_ext16u(s, args[0], args[1]); break; - case INDEX_op_qemu_ld8u: + case INDEX_op_qemu_ld_i32: tcg_out_qemu_ld(s, args, 0); break; - case INDEX_op_qemu_ld8s: - tcg_out_qemu_ld(s, args, 0 | 4); - break; - case INDEX_op_qemu_ld16u: + case INDEX_op_qemu_ld_i64: tcg_out_qemu_ld(s, args, 1); break; - case INDEX_op_qemu_ld16s: - tcg_out_qemu_ld(s, args, 1 | 4); - break; -#if TCG_TARGET_REG_BITS == 64 - case INDEX_op_qemu_ld32u: -#endif - case INDEX_op_qemu_ld32: - tcg_out_qemu_ld(s, args, 2); - break; - case INDEX_op_qemu_ld64: - tcg_out_qemu_ld(s, args, 3); - break; - - case INDEX_op_qemu_st8: + case INDEX_op_qemu_st_i32: tcg_out_qemu_st(s, args, 0); break; - case INDEX_op_qemu_st16: + case INDEX_op_qemu_st_i64: tcg_out_qemu_st(s, args, 1); break; - case INDEX_op_qemu_st32: - tcg_out_qemu_st(s, args, 2); - break; - case INDEX_op_qemu_st64: - tcg_out_qemu_st(s, args, 3); - break; -#if TCG_TARGET_REG_BITS == 32 - case INDEX_op_brcond2_i32: - tcg_out_brcond2(s, args, const_args, 0); - break; - case INDEX_op_setcond2_i32: - tcg_out_setcond2(s, args, const_args); + OP_32_64(mulu2): + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]); break; - case INDEX_op_mulu2_i32: - tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_MUL, args[3]); + OP_32_64(muls2): + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, args[3]); break; - case INDEX_op_add2_i32: + OP_32_64(add2): if (const_args[4]) { - tgen_arithi(s, ARITH_ADD, args[0], args[4], 1); + tgen_arithi(s, ARITH_ADD + rexw, args[0], args[4], 1); } else { - tgen_arithr(s, ARITH_ADD, args[0], args[4]); + tgen_arithr(s, ARITH_ADD + rexw, args[0], args[4]); } if (const_args[5]) { - tgen_arithi(s, ARITH_ADC, args[1], args[5], 1); + tgen_arithi(s, ARITH_ADC + rexw, args[1], args[5], 1); } else { - tgen_arithr(s, ARITH_ADC, args[1], args[5]); + tgen_arithr(s, ARITH_ADC + rexw, args[1], args[5]); } break; - case INDEX_op_sub2_i32: + OP_32_64(sub2): if (const_args[4]) { - tgen_arithi(s, ARITH_SUB, args[0], args[4], 1); + tgen_arithi(s, ARITH_SUB + rexw, args[0], args[4], 1); } else { - tgen_arithr(s, ARITH_SUB, args[0], args[4]); + tgen_arithr(s, ARITH_SUB + rexw, args[0], args[4]); } if (const_args[5]) { - tgen_arithi(s, ARITH_SBB, args[1], args[5], 1); + tgen_arithi(s, ARITH_SBB + rexw, args[1], args[5], 1); } else { - tgen_arithr(s, ARITH_SBB, args[1], args[5]); + tgen_arithr(s, ARITH_SBB + rexw, args[1], args[5]); } break; + +#if TCG_TARGET_REG_BITS == 32 + case INDEX_op_brcond2_i32: + tcg_out_brcond2(s, args, const_args, 0); + break; + case INDEX_op_setcond2_i32: + tcg_out_setcond2(s, args, const_args); + break; #else /* TCG_TARGET_REG_BITS == 64 */ case INDEX_op_movi_i64: tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]); @@ -1975,9 +2027,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]); } break; - case INDEX_op_qemu_ld32s: - tcg_out_qemu_ld(s, args, 2 | 4); - break; case INDEX_op_brcond_i64: tcg_out_brcond64(s, args[2], args[0], args[1], const_args[1], @@ -2050,10 +2099,11 @@ static const TCGTargetOpDef x86_op_defs[] = { { INDEX_op_and_i32, { "r", "0", "ri" } }, { INDEX_op_or_i32, { "r", "0", "ri" } }, { INDEX_op_xor_i32, { "r", "0", "ri" } }, + { INDEX_op_andc_i32, { "r", "r", "ri" } }, - { INDEX_op_shl_i32, { "r", "0", "ci" } }, - { INDEX_op_shr_i32, { "r", "0", "ci" } }, - { INDEX_op_sar_i32, { "r", "0", "ci" } }, + { INDEX_op_shl_i32, { "r", "0", "Ci" } }, + { INDEX_op_shr_i32, { "r", "0", "Ci" } }, + { INDEX_op_sar_i32, { "r", "0", "Ci" } }, { INDEX_op_rotl_i32, { "r", "0", "ci" } }, { INDEX_op_rotr_i32, { "r", "0", "ci" } }, @@ -2074,14 +2124,14 @@ static const TCGTargetOpDef x86_op_defs[] = { { INDEX_op_setcond_i32, { "q", "r", "ri" } }, { INDEX_op_deposit_i32, { "Q", "0", "Q" } }, -#if TCG_TARGET_HAS_movcond_i32 { INDEX_op_movcond_i32, { "r", "r", "ri", "r", "0" } }, -#endif -#if TCG_TARGET_REG_BITS == 32 { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } }, + { INDEX_op_muls2_i32, { "a", "d", "a", "r" } }, { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } }, { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } }, + +#if TCG_TARGET_REG_BITS == 32 { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } }, { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } }, #else @@ -2107,10 +2157,11 @@ static const TCGTargetOpDef x86_op_defs[] = { { INDEX_op_and_i64, { "r", "0", "reZ" } }, { INDEX_op_or_i64, { "r", "0", "re" } }, { INDEX_op_xor_i64, { "r", "0", "re" } }, + { INDEX_op_andc_i64, { "r", "r", "rI" } }, - { INDEX_op_shl_i64, { "r", "0", "ci" } }, - { INDEX_op_shr_i64, { "r", "0", "ci" } }, - { INDEX_op_sar_i64, { "r", "0", "ci" } }, + { INDEX_op_shl_i64, { "r", "0", "Ci" } }, + { INDEX_op_shr_i64, { "r", "0", "Ci" } }, + { INDEX_op_sar_i64, { "r", "0", "Ci" } }, { INDEX_op_rotl_i64, { "r", "0", "ci" } }, { INDEX_op_rotr_i64, { "r", "0", "ci" } }, @@ -2132,46 +2183,28 @@ static const TCGTargetOpDef x86_op_defs[] = { { INDEX_op_deposit_i64, { "Q", "0", "Q" } }, { INDEX_op_movcond_i64, { "r", "r", "re", "r", "0" } }, + + { INDEX_op_mulu2_i64, { "a", "d", "a", "r" } }, + { INDEX_op_muls2_i64, { "a", "d", "a", "r" } }, + { INDEX_op_add2_i64, { "r", "r", "0", "1", "re", "re" } }, + { INDEX_op_sub2_i64, { "r", "r", "0", "1", "re", "re" } }, #endif #if TCG_TARGET_REG_BITS == 64 - { INDEX_op_qemu_ld8u, { "r", "L" } }, - { INDEX_op_qemu_ld8s, { "r", "L" } }, - { INDEX_op_qemu_ld16u, { "r", "L" } }, - { INDEX_op_qemu_ld16s, { "r", "L" } }, - { INDEX_op_qemu_ld32, { "r", "L" } }, - { INDEX_op_qemu_ld32u, { "r", "L" } }, - { INDEX_op_qemu_ld32s, { "r", "L" } }, - { INDEX_op_qemu_ld64, { "r", "L" } }, - - { INDEX_op_qemu_st8, { "L", "L" } }, - { INDEX_op_qemu_st16, { "L", "L" } }, - { INDEX_op_qemu_st32, { "L", "L" } }, - { INDEX_op_qemu_st64, { "L", "L" } }, + { INDEX_op_qemu_ld_i32, { "r", "L" } }, + { INDEX_op_qemu_st_i32, { "L", "L" } }, + { INDEX_op_qemu_ld_i64, { "r", "L" } }, + { INDEX_op_qemu_st_i64, { "L", "L" } }, #elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - { INDEX_op_qemu_ld8u, { "r", "L" } }, - { INDEX_op_qemu_ld8s, { "r", "L" } }, - { INDEX_op_qemu_ld16u, { "r", "L" } }, - { INDEX_op_qemu_ld16s, { "r", "L" } }, - { INDEX_op_qemu_ld32, { "r", "L" } }, - { INDEX_op_qemu_ld64, { "r", "r", "L" } }, - - { INDEX_op_qemu_st8, { "cb", "L" } }, - { INDEX_op_qemu_st16, { "L", "L" } }, - { INDEX_op_qemu_st32, { "L", "L" } }, - { INDEX_op_qemu_st64, { "L", "L", "L" } }, + { INDEX_op_qemu_ld_i32, { "r", "L" } }, + { INDEX_op_qemu_st_i32, { "L", "L" } }, + { INDEX_op_qemu_ld_i64, { "r", "r", "L" } }, + { INDEX_op_qemu_st_i64, { "L", "L", "L" } }, #else - { INDEX_op_qemu_ld8u, { "r", "L", "L" } }, - { INDEX_op_qemu_ld8s, { "r", "L", "L" } }, - { INDEX_op_qemu_ld16u, { "r", "L", "L" } }, - { INDEX_op_qemu_ld16s, { "r", "L", "L" } }, - { INDEX_op_qemu_ld32, { "r", "L", "L" } }, - { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } }, - - { INDEX_op_qemu_st8, { "cb", "L", "L" } }, - { INDEX_op_qemu_st16, { "L", "L", "L" } }, - { INDEX_op_qemu_st32, { "L", "L", "L" } }, - { INDEX_op_qemu_st64, { "L", "L", "L", "L" } }, + { INDEX_op_qemu_ld_i32, { "r", "L", "L" } }, + { INDEX_op_qemu_st_i32, { "L", "L", "L" } }, + { INDEX_op_qemu_ld_i64, { "r", "r", "L", "L" } }, + { INDEX_op_qemu_st_i64, { "L", "L", "L", "L" } }, #endif { -1 }, }; @@ -2262,20 +2295,35 @@ static void tcg_target_qemu_prologue(TCGContext *s) static void tcg_target_init(TCGContext *s) { - /* For 32-bit, 99% certainty that we're running on hardware that supports - cmov, but we still need to check. In case cmov is not available, we'll - use a small forward branch. */ +#ifdef CONFIG_CPUID_H + unsigned a, b, c, d; + int max = __get_cpuid_max(0, 0); + + if (max >= 1) { + __cpuid(1, a, b, c, d); #ifndef have_cmov - { - unsigned a, b, c, d; - have_cmov = (__get_cpuid(1, &a, &b, &c, &d) && (d & bit_CMOV)); - } + /* For 32-bit, 99% certainty that we're running on hardware that + supports cmov, but we still need to check. In case cmov is not + available, we'll use a small forward branch. */ + have_cmov = (d & bit_CMOV) != 0; +#endif +#ifndef have_movbe + /* MOVBE is only available on Intel Atom and Haswell CPUs, so we + need to probe for it. */ + have_movbe = (c & bit_MOVBE) != 0; #endif + } -#if !defined(CONFIG_USER_ONLY) - /* fail safe */ - if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry)) - tcg_abort(); + if (max >= 7) { + /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */ + __cpuid_count(7, 0, a, b, c, d); +#ifdef bit_BMI + have_bmi1 = (b & bit_BMI) != 0; +#endif +#ifndef have_bmi2 + have_bmi2 = (b & bit_BMI2) != 0; +#endif + } #endif if (TCG_TARGET_REG_BITS == 64) { @@ -2306,30 +2354,16 @@ static void tcg_target_init(TCGContext *s) tcg_add_target_add_op_defs(x86_op_defs); } -typedef struct { - uint32_t len __attribute__((aligned((sizeof(void *))))); - uint32_t id; - uint8_t version; - char augmentation[1]; - uint8_t code_align; - uint8_t data_align; - uint8_t return_column; -} DebugFrameCIE; - -typedef struct { - uint32_t len __attribute__((aligned((sizeof(void *))))); - uint32_t cie_offset; - tcg_target_long func_start __attribute__((packed)); - tcg_target_long func_len __attribute__((packed)); - uint8_t def_cfa[4]; - uint8_t reg_ofs[14]; -} DebugFrameFDE; - typedef struct { DebugFrameCIE cie; - DebugFrameFDE fde; + DebugFrameFDEHeader fde; + uint8_t fde_def_cfa[4]; + uint8_t fde_reg_ofs[14]; } DebugFrame; +/* We're expecting a 2 byte uleb128 encoded value. */ +QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); + #if !defined(__ELF__) /* Host machine without ELF. */ #elif TCG_TARGET_REG_BITS == 64 @@ -2342,13 +2376,15 @@ static DebugFrame debug_frame = { .cie.data_align = 0x78, /* sleb128 -8 */ .cie.return_column = 16, - .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */ - .fde.def_cfa = { + /* Total FDE size does not include the "len" member. */ + .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset), + + .fde_def_cfa = { 12, 7, /* DW_CFA_def_cfa %rsp, ... */ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ (FRAME_SIZE >> 7) }, - .fde.reg_ofs = { + .fde_reg_ofs = { 0x90, 1, /* DW_CFA_offset, %rip, -8 */ /* The following ordering must match tcg_target_callee_save_regs. */ 0x86, 2, /* DW_CFA_offset, %rbp, -16 */ @@ -2369,13 +2405,15 @@ static DebugFrame debug_frame = { .cie.data_align = 0x7c, /* sleb128 -4 */ .cie.return_column = 8, - .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */ - .fde.def_cfa = { + /* Total FDE size does not include the "len" member. */ + .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset), + + .fde_def_cfa = { 12, 4, /* DW_CFA_def_cfa %esp, ... */ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ (FRAME_SIZE >> 7) }, - .fde.reg_ofs = { + .fde_reg_ofs = { 0x88, 1, /* DW_CFA_offset, %eip, -4 */ /* The following ordering must match tcg_target_callee_save_regs. */ 0x85, 2, /* DW_CFA_offset, %ebp, -8 */ @@ -2389,10 +2427,7 @@ static DebugFrame debug_frame = { #if defined(ELF_HOST_MACHINE) void tcg_register_jit(void *buf, size_t buf_size) { - /* We're expecting a 2 byte uleb128 encoded value. */ - assert(FRAME_SIZE >> 14 == 0); - - debug_frame.fde.func_start = (tcg_target_long) buf; + debug_frame.fde.func_start = (uintptr_t)buf; debug_frame.fde.func_len = buf_size; tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));