# define have_bmi2 0
#endif
-static uint8_t *tb_ret_addr;
+static tcg_insn_unit *tb_ret_addr;
-static void patch_reloc(uint8_t *code_ptr, int type,
+static void patch_reloc(tcg_insn_unit *code_ptr, int type,
intptr_t value, intptr_t addend)
{
value += addend;
if (value != (int32_t)value) {
tcg_abort();
}
- *(uint32_t *)code_ptr = value;
+ tcg_patch32(code_ptr, value);
break;
case R_386_PC8:
value -= (uintptr_t)code_ptr;
if (value != (int8_t)value) {
tcg_abort();
}
- *(uint8_t *)code_ptr = value;
+ tcg_patch8(code_ptr, value);
break;
default:
tcg_abort();
}
/* Use SMALL != 0 to force a short forward branch. */
-static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small)
+static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, int small)
{
int32_t val, val1;
- TCGLabel *l = &s->labels[label_index];
if (l->has_value) {
- val = l->u.value - (intptr_t)s->code_ptr;
+ val = tcg_pcrel_diff(s, l->u.value_ptr);
val1 = val - 2;
if ((int8_t)val1 == val1) {
if (opc == -1) {
} else {
tcg_out8(s, OPC_JCC_short + opc);
}
- tcg_out_reloc(s, s->code_ptr, R_386_PC8, label_index, -1);
+ tcg_out_reloc(s, s->code_ptr, R_386_PC8, l, -1);
s->code_ptr += 1;
} else {
if (opc == -1) {
} else {
tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
}
- tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
+ tcg_out_reloc(s, s->code_ptr, R_386_PC32, l, -4);
s->code_ptr += 4;
}
}
static void tcg_out_brcond32(TCGContext *s, TCGCond cond,
TCGArg arg1, TCGArg arg2, int const_arg2,
- int label_index, int small)
+ TCGLabel *label, int small)
{
tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
- tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
+ tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
}
#if TCG_TARGET_REG_BITS == 64
static void tcg_out_brcond64(TCGContext *s, TCGCond cond,
TCGArg arg1, TCGArg arg2, int const_arg2,
- int label_index, int small)
+ TCGLabel *label, int small)
{
tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
- tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
+ tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
}
#else
/* XXX: we implement it at the target level to avoid having to
static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
const int *const_args, int small)
{
- int label_next;
- label_next = gen_new_label();
+ TCGLabel *label_next = gen_new_label();
+ TCGLabel *label_this = arg_label(args[5]);
+
switch(args[4]) {
case TCG_COND_EQ:
tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
label_next, 1);
tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
break;
case TCG_COND_NE:
tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
- args[5], small);
+ label_this, small);
tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
break;
case TCG_COND_LT:
tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
- args[5], small);
+ label_this, small);
break;
case TCG_COND_LE:
tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
- args[5], small);
+ label_this, small);
break;
case TCG_COND_GT:
tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
- args[5], small);
+ label_this, small);
break;
case TCG_COND_GE:
tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
- args[5], small);
+ label_this, small);
break;
case TCG_COND_LTU:
tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
- args[5], small);
+ label_this, small);
break;
case TCG_COND_LEU:
tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
- args[5], small);
+ label_this, small);
break;
case TCG_COND_GTU:
tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
- args[5], small);
+ label_this, small);
break;
case TCG_COND_GEU:
tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
- args[5], small);
+ label_this, small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
- args[5], small);
+ label_this, small);
break;
default:
tcg_abort();
const int *const_args)
{
TCGArg new_args[6];
- int label_true, label_over;
+ TCGLabel *label_true, *label_over;
memcpy(new_args, args+1, 5*sizeof(TCGArg));
label_true = gen_new_label();
label_over = gen_new_label();
- new_args[5] = label_true;
+ new_args[5] = label_arg(label_true);
tcg_out_brcond2(s, new_args, const_args+1, 1);
tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
label_over = gen_new_label();
new_args[4] = tcg_invert_cond(new_args[4]);
- new_args[5] = label_over;
+ new_args[5] = label_arg(label_over);
tcg_out_brcond2(s, new_args, const_args+1, 1);
tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
if (have_cmov) {
tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond], dest, v1);
} else {
- int over = gen_new_label();
+ TCGLabel *over = gen_new_label();
tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1);
tcg_out_mov(s, TCG_TYPE_I32, dest, v1);
tcg_out_label(s, over, s->code_ptr);
}
#endif
-static void tcg_out_branch(TCGContext *s, int call, uintptr_t dest)
+static void tcg_out_branch(TCGContext *s, int call, tcg_insn_unit *dest)
{
- intptr_t disp = dest - (intptr_t)s->code_ptr - 5;
+ intptr_t disp = tcg_pcrel_diff(s, dest) - 5;
if (disp == (int32_t)disp) {
tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0);
tcg_out32(s, disp);
} else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R10, dest);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R10, (uintptr_t)dest);
tcg_out_modrm(s, OPC_GRP5,
call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev, TCG_REG_R10);
}
}
-static inline void tcg_out_calli(TCGContext *s, uintptr_t dest)
+static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
{
tcg_out_branch(s, 1, dest);
}
-static void tcg_out_jmp(TCGContext *s, uintptr_t dest)
+static void tcg_out_jmp(TCGContext *s, tcg_insn_unit *dest)
{
tcg_out_branch(s, 0, dest);
}
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* int mmu_idx, uintptr_t ra)
*/
-static const void * const qemu_ld_helpers[16] = {
+static void * const qemu_ld_helpers[16] = {
[MO_UB] = helper_ret_ldub_mmu,
[MO_LEUW] = helper_le_lduw_mmu,
[MO_LEUL] = helper_le_ldul_mmu,
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
* uintxx_t val, int mmu_idx, uintptr_t ra)
*/
-static const void * const qemu_st_helpers[16] = {
+static void * const qemu_st_helpers[16] = {
[MO_UB] = helper_ret_stb_mmu,
[MO_LEUW] = helper_le_stw_mmu,
[MO_LEUL] = helper_le_stl_mmu,
static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
int mem_index, TCGMemOp s_bits,
- uint8_t **label_ptr, int which)
+ tcg_insn_unit **label_ptr, int which)
{
const TCGReg r0 = TCG_REG_L0;
const TCGReg r1 = TCG_REG_L1;
* Record the context of a call to the out of line helper code for the slow path
* for a load or store, so that we can later generate the correct helper code
*/
-static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
TCGReg datalo, TCGReg datahi,
TCGReg addrlo, TCGReg addrhi,
- int mem_index, uint8_t *raddr,
- uint8_t **label_ptr)
+ tcg_insn_unit *raddr,
+ tcg_insn_unit **label_ptr)
{
TCGLabelQemuLdst *label = new_ldst_label(s);
label->is_ld = is_ld;
- label->opc = opc;
+ label->oi = oi;
label->datalo_reg = datalo;
label->datahi_reg = datahi;
label->addrlo_reg = addrlo;
label->addrhi_reg = addrhi;
- label->mem_index = mem_index;
label->raddr = raddr;
label->label_ptr[0] = label_ptr[0];
if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
*/
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
- TCGMemOp opc = l->opc;
+ TCGMemOpIdx oi = l->oi;
+ TCGMemOp opc = get_memop(oi);
TCGReg data_reg;
- uint8_t **label_ptr = &l->label_ptr[0];
+ tcg_insn_unit **label_ptr = &l->label_ptr[0];
/* resolve label address */
- *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
+ tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
+ tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
}
if (TCG_TARGET_REG_BITS == 32) {
ofs += 4;
}
- tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
+ tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, oi);
ofs += 4;
- tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, (uintptr_t)l->raddr);
+ tcg_out_sti(s, TCG_TYPE_PTR, TCG_REG_ESP, ofs, (uintptr_t)l->raddr);
} else {
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
/* The second argument is already loaded with addrlo. */
- tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
- l->mem_index);
+ tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], oi);
tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
(uintptr_t)l->raddr);
}
- tcg_out_calli(s, (uintptr_t)qemu_ld_helpers[opc & ~MO_SIGN]);
+ tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]);
data_reg = l->datalo_reg;
switch (opc & MO_SSIZE) {
}
/* Jump to the code corresponding to next IR of qemu_st */
- tcg_out_jmp(s, (uintptr_t)l->raddr);
+ tcg_out_jmp(s, l->raddr);
}
/*
*/
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
- TCGMemOp opc = l->opc;
+ TCGMemOpIdx oi = l->oi;
+ TCGMemOp opc = get_memop(oi);
TCGMemOp s_bits = opc & MO_SIZE;
- uint8_t **label_ptr = &l->label_ptr[0];
+ tcg_insn_unit **label_ptr = &l->label_ptr[0];
TCGReg retaddr;
/* resolve label address */
- *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
+ tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
+ tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
}
if (TCG_TARGET_REG_BITS == 32) {
ofs += 4;
}
- tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
+ tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, oi);
ofs += 4;
retaddr = TCG_REG_EAX;
- tcg_out_movi(s, TCG_TYPE_I32, retaddr, (uintptr_t)l->raddr);
- tcg_out_st(s, TCG_TYPE_I32, retaddr, TCG_REG_ESP, ofs);
+ tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
+ tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, ofs);
} else {
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
/* The second argument is already loaded with addrlo. */
tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
tcg_target_call_iarg_regs[2], l->datalo_reg);
- tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
- l->mem_index);
+ tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], oi);
if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) {
retaddr = tcg_target_call_iarg_regs[4];
} else {
retaddr = TCG_REG_RAX;
tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
- tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, 0);
+ tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP,
+ TCG_TARGET_CALL_STACK_OFFSET);
}
}
/* "Tail call" to the helper, with the return address back inline. */
tcg_out_push(s, retaddr);
- tcg_out_jmp(s, (uintptr_t)qemu_st_helpers[opc]);
+ tcg_out_jmp(s, qemu_st_helpers[opc]);
}
#elif defined(__x86_64__) && defined(__linux__)
# include <asm/prctl.h>
{
TCGReg datalo, datahi, addrlo;
TCGReg addrhi __attribute__((unused));
+ TCGMemOpIdx oi;
TCGMemOp opc;
#if defined(CONFIG_SOFTMMU)
int mem_index;
TCGMemOp s_bits;
- uint8_t *label_ptr[2];
+ tcg_insn_unit *label_ptr[2];
#endif
datalo = *args++;
datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
addrlo = *args++;
addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
- opc = *args++;
+ oi = *args++;
+ opc = get_memop(oi);
#if defined(CONFIG_SOFTMMU)
- mem_index = *args++;
+ mem_index = get_mmuidx(oi);
s_bits = opc & MO_SIZE;
tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
/* Record the current context of a load into ldst label */
- add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, addrhi,
- mem_index, s->code_ptr, label_ptr);
+ add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
+ s->code_ptr, label_ptr);
#else
{
int32_t offset = GUEST_BASE;
{
TCGReg datalo, datahi, addrlo;
TCGReg addrhi __attribute__((unused));
+ TCGMemOpIdx oi;
TCGMemOp opc;
#if defined(CONFIG_SOFTMMU)
int mem_index;
TCGMemOp s_bits;
- uint8_t *label_ptr[2];
+ tcg_insn_unit *label_ptr[2];
#endif
datalo = *args++;
datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
addrlo = *args++;
addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
- opc = *args++;
+ oi = *args++;
+ opc = get_memop(oi);
#if defined(CONFIG_SOFTMMU)
- mem_index = *args++;
+ mem_index = get_mmuidx(oi);
s_bits = opc & MO_SIZE;
tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
/* Record the current context of a store into ldst label */
- add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi,
- mem_index, s->code_ptr, label_ptr);
+ add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
+ s->code_ptr, label_ptr);
#else
{
int32_t offset = GUEST_BASE;
switch(opc) {
case INDEX_op_exit_tb:
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, args[0]);
- tcg_out_jmp(s, (uintptr_t)tb_ret_addr);
+ tcg_out_jmp(s, tb_ret_addr);
break;
case INDEX_op_goto_tb:
if (s->tb_jmp_offset) {
/* direct jump method */
tcg_out8(s, OPC_JMP_long); /* jmp im */
- s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
+ s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
tcg_out32(s, 0);
} else {
/* indirect jump method */
tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
(intptr_t)(s->tb_next + args[0]));
}
- s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
- break;
- case INDEX_op_call:
- if (const_args[0]) {
- tcg_out_calli(s, args[0]);
- } else {
- /* call *reg */
- tcg_out_modrm(s, OPC_GRP5, EXT5_CALLN_Ev, args[0]);
- }
+ s->tb_next_offset[args[0]] = tcg_current_code_size(s);
break;
case INDEX_op_br:
- tcg_out_jxx(s, JCC_JMP, args[0], 0);
- break;
- case INDEX_op_movi_i32:
- tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
+ tcg_out_jxx(s, JCC_JMP, arg_label(args[0]), 0);
break;
OP_32_64(ld8u):
/* Note that we can ignore REXW for the zero-extend to 64-bit. */
case INDEX_op_brcond_i32:
tcg_out_brcond32(s, args[2], args[0], args[1], const_args[1],
- args[3], 0);
+ arg_label(args[3]), 0);
break;
case INDEX_op_setcond_i32:
tcg_out_setcond32(s, args[3], args[0], args[1],
tcg_out_setcond2(s, args, const_args);
break;
#else /* TCG_TARGET_REG_BITS == 64 */
- case INDEX_op_movi_i64:
- tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
- break;
case INDEX_op_ld32s_i64:
tcg_out_modrm_offset(s, OPC_MOVSLQ, args[0], args[1], args[2]);
break;
case INDEX_op_brcond_i64:
tcg_out_brcond64(s, args[2], args[0], args[1], const_args[1],
- args[3], 0);
+ arg_label(args[3]), 0);
break;
case INDEX_op_setcond_i64:
tcg_out_setcond64(s, args[3], args[0], args[1],
}
break;
+ case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
+ case INDEX_op_mov_i64:
+ case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
+ case INDEX_op_movi_i64:
+ case INDEX_op_call: /* Always emitted via tcg_out_call. */
default:
tcg_abort();
}
static const TCGTargetOpDef x86_op_defs[] = {
{ INDEX_op_exit_tb, { } },
{ INDEX_op_goto_tb, { } },
- { INDEX_op_call, { "ri" } },
{ INDEX_op_br, { } },
- { INDEX_op_mov_i32, { "r", "r" } },
- { INDEX_op_movi_i32, { "r" } },
{ INDEX_op_ld8u_i32, { "r", "r" } },
{ INDEX_op_ld8s_i32, { "r", "r" } },
{ INDEX_op_ld16u_i32, { "r", "r" } },
{ INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
{ INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } },
#else
- { INDEX_op_mov_i64, { "r", "r" } },
- { INDEX_op_movi_i64, { "r" } },
{ INDEX_op_ld8u_i64, { "r", "r" } },
{ INDEX_op_ld8s_i64, { "r", "r" } },
{ INDEX_op_ld16u_i64, { "r", "r" } },
}
typedef struct {
- DebugFrameCIE cie;
- DebugFrameFDEHeader fde;
+ DebugFrameHeader h;
uint8_t fde_def_cfa[4];
uint8_t fde_reg_ofs[14];
} DebugFrame;
/* Host machine without ELF. */
#elif TCG_TARGET_REG_BITS == 64
#define ELF_HOST_MACHINE EM_X86_64
-static DebugFrame debug_frame = {
- .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
- .cie.id = -1,
- .cie.version = 1,
- .cie.code_align = 1,
- .cie.data_align = 0x78, /* sleb128 -8 */
- .cie.return_column = 16,
+static const DebugFrame debug_frame = {
+ .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
+ .h.cie.id = -1,
+ .h.cie.version = 1,
+ .h.cie.code_align = 1,
+ .h.cie.data_align = 0x78, /* sleb128 -8 */
+ .h.cie.return_column = 16,
/* Total FDE size does not include the "len" member. */
- .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
+ .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
.fde_def_cfa = {
12, 7, /* DW_CFA_def_cfa %rsp, ... */
};
#else
#define ELF_HOST_MACHINE EM_386
-static DebugFrame debug_frame = {
- .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
- .cie.id = -1,
- .cie.version = 1,
- .cie.code_align = 1,
- .cie.data_align = 0x7c, /* sleb128 -4 */
- .cie.return_column = 8,
+static const DebugFrame debug_frame = {
+ .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
+ .h.cie.id = -1,
+ .h.cie.version = 1,
+ .h.cie.code_align = 1,
+ .h.cie.data_align = 0x7c, /* sleb128 -4 */
+ .h.cie.return_column = 8,
/* Total FDE size does not include the "len" member. */
- .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
+ .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
.fde_def_cfa = {
12, 4, /* DW_CFA_def_cfa %esp, ... */
#if defined(ELF_HOST_MACHINE)
void tcg_register_jit(void *buf, size_t buf_size)
{
- debug_frame.fde.func_start = (uintptr_t)buf;
- debug_frame.fde.func_len = buf_size;
-
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
}
#endif