#include <stdio.h>
#include <string.h>
#include <inttypes.h>
+#ifdef _WIN32
+#include <malloc.h>
+#endif
#include "config.h"
-#include "osdep.h"
+#include "qemu-common.h"
/* Note: the long term plan is to reduce the dependancies on the QEMU
CPU definitions. Currently they are used for qemu_ld/st
static void patch_reloc(uint8_t *code_ptr, int type,
- tcg_target_long value);
+ tcg_target_long value, tcg_target_long addend);
TCGOpDef tcg_op_defs[] = {
#define DEF(s, n, copy_size) { #s, 0, 0, n, n, 0, copy_size },
l = &s->labels[label_index];
if (l->has_value) {
- patch_reloc(code_ptr, type, l->u.value + addend);
+ /* FIXME: This may break relocations on RISC targets that
+ modify instruction fields in place. The caller may not have
+ written the initial value. */
+ patch_reloc(code_ptr, type, l->u.value, addend);
} else {
/* add a new relocation entry */
r = tcg_malloc(sizeof(TCGRelocation));
tcg_abort();
r = l->u.first_reloc;
while (r != NULL) {
- patch_reloc(r->ptr, r->type, value + r->addend);
+ patch_reloc(r->ptr, r->type, value, r->addend);
r = r->next;
}
l->has_value = 1;
#include "tcg-target.c"
-/* XXX: factorize */
-static void pstrcpy(char *buf, int buf_size, const char *str)
-{
- int c;
- char *q = buf;
-
- if (buf_size <= 0)
- return;
-
- for(;;) {
- c = *str++;
- if (c == 0 || q >= buf + buf_size - 1)
- break;
- *q++ = c;
- }
- *q = '\0';
-}
-
-#if TCG_TARGET_REG_BITS == 32
-/* strcat and truncate. */
-static char *pstrcat(char *buf, int buf_size, const char *s)
-{
- int len;
- len = strlen(buf);
- if (len < buf_size)
- pstrcpy(buf + len, buf_size - len, s);
- return buf;
-}
-#endif
-
/* pool based memory allocation */
void *tcg_malloc_internal(TCGContext *s, int size)
{
}
tcg_target_init(s);
+
+ /* init global prologue and epilogue */
+ s->code_buf = code_gen_prologue;
+ s->code_ptr = s->code_buf;
+ tcg_target_qemu_prologue(s);
+ flush_icache_range((unsigned long)s->code_buf,
+ (unsigned long)s->code_ptr);
}
void tcg_set_frame(TCGContext *s, int reg,
return MAKE_TCGV(idx);
}
+#if TCG_TARGET_REG_BITS == 32
+/* temporary hack to avoid register shortage for tcg_qemu_st64() */
+TCGv tcg_global_reg2_new_hack(TCGType type, int reg1, int reg2,
+ const char *name)
+{
+ TCGContext *s = &tcg_ctx;
+ TCGTemp *ts;
+ int idx;
+ char buf[64];
+
+ if (type != TCG_TYPE_I64)
+ tcg_abort();
+ idx = s->nb_globals;
+ tcg_temp_alloc(s, s->nb_globals + 2);
+ ts = &s->temps[s->nb_globals];
+ ts->base_type = type;
+ ts->type = TCG_TYPE_I32;
+ ts->fixed_reg = 1;
+ ts->reg = reg1;
+ ts->val_type = TEMP_VAL_REG;
+ pstrcpy(buf, sizeof(buf), name);
+ pstrcat(buf, sizeof(buf), "_0");
+ ts->name = strdup(buf);
+
+ ts++;
+ ts->base_type = type;
+ ts->type = TCG_TYPE_I32;
+ ts->fixed_reg = 1;
+ ts->reg = reg2;
+ ts->val_type = TEMP_VAL_REG;
+ pstrcpy(buf, sizeof(buf), name);
+ pstrcat(buf, sizeof(buf), "_1");
+ ts->name = strdup(buf);
+
+ s->nb_globals += 2;
+ return MAKE_TCGV(idx);
+}
+#endif
+
TCGv tcg_global_mem_new(TCGType type, int reg, tcg_target_long offset,
const char *name)
{
args2[j++] = TCGV_HIGH(arg);
#else
#ifdef TCG_TARGET_WORDS_BIGENDIAN
- args2[j++] = TCGV_HOGH(arg);
+ args2[j++] = TCGV_HIGH(arg);
args2[j++] = arg;
#else
args2[j++] = arg;
nb_oargs = arg >> 16;
nb_iargs = arg & 0xffff;
nb_cargs = def->nb_cargs;
- } else if (c == INDEX_op_nopn) {
- /* variable number of arguments */
- nb_cargs = *args;
- nb_oargs = 0;
- nb_iargs = 0;
- } else {
- nb_oargs = def->nb_oargs;
- nb_iargs = def->nb_iargs;
- nb_cargs = def->nb_cargs;
- }
- k = 0;
- for(i = 0; i < nb_oargs; i++) {
- if (k != 0)
- fprintf(outfile, ",");
- fprintf(outfile, "%s",
- tcg_get_arg_str_idx(s, buf, sizeof(buf), args[k++]));
- }
- for(i = 0; i < nb_iargs; i++) {
- if (k != 0)
- fprintf(outfile, ",");
+ /* function name */
/* XXX: dump helper name for call */
fprintf(outfile, "%s",
- tcg_get_arg_str_idx(s, buf, sizeof(buf), args[k++]));
- }
- for(i = 0; i < nb_cargs; i++) {
- if (k != 0)
+ tcg_get_arg_str_idx(s, buf, sizeof(buf), args[nb_oargs + nb_iargs - 1]));
+ /* flags */
+ fprintf(outfile, ",$0x%" TCG_PRIlx,
+ args[nb_oargs + nb_iargs]);
+ /* nb out args */
+ fprintf(outfile, ",$%d", nb_oargs);
+ for(i = 0; i < nb_oargs; i++) {
fprintf(outfile, ",");
- arg = args[k++];
- fprintf(outfile, "$0x%" TCG_PRIlx, arg);
+ fprintf(outfile, "%s",
+ tcg_get_arg_str_idx(s, buf, sizeof(buf), args[i]));
+ }
+ for(i = 0; i < (nb_iargs - 1); i++) {
+ fprintf(outfile, ",");
+ fprintf(outfile, "%s",
+ tcg_get_arg_str_idx(s, buf, sizeof(buf), args[nb_oargs + i]));
+ }
+ } else {
+ if (c == INDEX_op_nopn) {
+ /* variable number of arguments */
+ nb_cargs = *args;
+ nb_oargs = 0;
+ nb_iargs = 0;
+ } else {
+ nb_oargs = def->nb_oargs;
+ nb_iargs = def->nb_iargs;
+ nb_cargs = def->nb_cargs;
+ }
+
+ k = 0;
+ for(i = 0; i < nb_oargs; i++) {
+ if (k != 0)
+ fprintf(outfile, ",");
+ fprintf(outfile, "%s",
+ tcg_get_arg_str_idx(s, buf, sizeof(buf), args[k++]));
+ }
+ for(i = 0; i < nb_iargs; i++) {
+ if (k != 0)
+ fprintf(outfile, ",");
+ fprintf(outfile, "%s",
+ tcg_get_arg_str_idx(s, buf, sizeof(buf), args[k++]));
+ }
+ for(i = 0; i < nb_cargs; i++) {
+ if (k != 0)
+ fprintf(outfile, ",");
+ arg = args[k++];
+ fprintf(outfile, "$0x%" TCG_PRIlx, arg);
+ }
}
fprintf(outfile, "\n");
args += nb_iargs + nb_oargs + nb_cargs;
def = &tcg_op_defs[op];
switch(op) {
case INDEX_op_call:
- nb_args = args[-1];
- args -= nb_args;
- nb_iargs = args[0] & 0xffff;
- nb_oargs = args[0] >> 16;
- args++;
+ {
+ int call_flags;
- /* output args are dead */
- for(i = 0; i < nb_oargs; i++) {
- arg = args[i];
- dead_temps[arg] = 1;
- }
-
- /* globals are live (they may be used by the call) */
- memset(dead_temps, 0, s->nb_globals);
+ nb_args = args[-1];
+ args -= nb_args;
+ nb_iargs = args[0] & 0xffff;
+ nb_oargs = args[0] >> 16;
+ args++;
+ call_flags = args[nb_oargs + nb_iargs];
+
+ /* pure functions can be removed if their result is not
+ used */
+ if (call_flags & TCG_CALL_PURE) {
+ for(i = 0; i < nb_oargs; i++) {
+ arg = args[i];
+ if (!dead_temps[arg])
+ goto do_not_remove_call;
+ }
+ tcg_set_nop(s, gen_opc_buf + op_index,
+ args - 1, nb_args);
+ } else {
+ do_not_remove_call:
- /* input args are live */
- dead_iargs = 0;
- for(i = 0; i < nb_iargs; i++) {
- arg = args[i + nb_oargs];
- if (dead_temps[arg]) {
- dead_iargs |= (1 << i);
+ /* output args are dead */
+ for(i = 0; i < nb_oargs; i++) {
+ arg = args[i];
+ dead_temps[arg] = 1;
+ }
+
+ /* globals are live (they may be used by the call) */
+ memset(dead_temps, 0, s->nb_globals);
+
+ /* input args are live */
+ dead_iargs = 0;
+ for(i = 0; i < nb_iargs; i++) {
+ arg = args[i + nb_oargs];
+ if (dead_temps[arg]) {
+ dead_iargs |= (1 << i);
+ }
+ dead_temps[arg] = 0;
+ }
+ s->op_dead_iargs[op_index] = dead_iargs;
}
- dead_temps[arg] = 0;
+ args--;
}
- s->op_dead_iargs[op_index] = dead_iargs;
- args--;
break;
case INDEX_op_set_label:
args--;
/* if end of basic block, update */
if (def->flags & TCG_OPF_BB_END) {
tcg_la_bb_end(s, dead_temps);
+ } else if (def->flags & TCG_OPF_CALL_CLOBBER) {
+ /* globals are live */
+ memset(dead_temps, 0, s->nb_globals);
}
/* input args are live */
ts->reg != reg) {
printf("Inconsistency for register %s:\n",
tcg_target_reg_names[reg]);
- printf("reg state:\n");
- dump_regs(s);
- tcg_abort();
+ goto fail;
}
}
}
s->reg_to_temp[ts->reg] != k) {
printf("Inconsistency for temp %s:\n",
tcg_get_arg_str_idx(s, buf, sizeof(buf), k));
+ fail:
printf("reg state:\n");
dump_regs(s);
tcg_abort();
}
+ if (ts->val_type == TEMP_VAL_CONST && k < s->nb_globals) {
+ printf("constant forbidden in global %s\n",
+ tcg_get_arg_str_idx(s, buf, sizeof(buf), k));
+ goto fail;
+ }
}
}
#endif
if (!ts->mem_coherent) {
if (!ts->mem_allocated)
temp_allocate_frame(s, temp);
- tcg_out_st(s, reg, ts->mem_reg, ts->mem_offset);
+ tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
}
ts->val_type = TEMP_VAL_MEM;
s->reg_to_temp[reg] = -1;
tcg_regset_andnot(reg_ct, reg1, reg2);
/* first try free registers */
- for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
+ for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
reg = tcg_target_reg_alloc_order[i];
if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == -1)
return reg;
}
/* XXX: do better spill choice */
- for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
+ for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
reg = tcg_target_reg_alloc_order[i];
if (tcg_regset_test_reg(reg_ct, reg)) {
tcg_reg_free(s, reg);
} else {
reg = tcg_reg_alloc(s, arg_ct->u.regs, s->reserved_regs);
}
- tcg_out_ld(s, reg, ts->mem_reg, ts->mem_offset);
+ tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
} else if (ts->val_type == TEMP_VAL_CONST) {
if (ots->val_type == TEMP_VAL_REG) {
reg = ots->reg;
ts = &s->temps[arg];
if (ts->val_type == TEMP_VAL_MEM) {
reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
- tcg_out_ld(s, reg, ts->mem_reg, ts->mem_offset);
+ tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
ts->val_type = TEMP_VAL_REG;
ts->reg = reg;
ts->mem_coherent = 1;
}
}
- /* XXX: permit generic clobber register list ? */
if (def->flags & TCG_OPF_CALL_CLOBBER) {
+ /* XXX: permit generic clobber register list ? */
for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
tcg_reg_free(s, reg);
}
}
+ /* XXX: for load/store we could do that only for the slow path
+ (i.e. when a memory callback is called) */
+
+ /* store globals and free associated registers (we assume the insn
+ can modify any global. */
+ for(i = 0; i < s->nb_globals; i++) {
+ ts = &s->temps[i];
+ if (!ts->fixed_reg) {
+ if (ts->val_type == TEMP_VAL_REG) {
+ tcg_reg_free(s, ts->reg);
+ }
+ }
+ }
}
/* satisfy the output constraints */
}
}
+#ifdef TCG_TARGET_STACK_GROWSUP
+#define STACK_DIR(x) (-(x))
+#else
+#define STACK_DIR(x) (x)
+#endif
+
static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
int opc, const TCGArg *args,
unsigned int dead_iargs)
int nb_iargs, nb_oargs, flags, nb_regs, i, reg, nb_params;
TCGArg arg, func_arg;
TCGTemp *ts;
- tcg_target_long stack_offset, call_stack_size;
- int const_func_arg;
+ tcg_target_long stack_offset, call_stack_size, func_addr;
+ int const_func_arg, allocate_args;
TCGRegSet allocated_regs;
const TCGArgConstraint *arg_ct;
call_stack_size = (nb_params - nb_regs) * sizeof(tcg_target_long);
call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
~(TCG_TARGET_STACK_ALIGN - 1);
- tcg_out_addi(s, TCG_REG_CALL_STACK, -call_stack_size);
-
+ allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
+ if (allocate_args) {
+ tcg_out_addi(s, TCG_REG_CALL_STACK, -STACK_DIR(call_stack_size));
+ }
+ /* XXX: on some architectures it does not start at zero */
stack_offset = 0;
for(i = nb_regs; i < nb_params; i++) {
arg = args[nb_oargs + i];
ts = &s->temps[arg];
if (ts->val_type == TEMP_VAL_REG) {
- tcg_out_st(s, ts->reg, TCG_REG_CALL_STACK, stack_offset);
+ tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
} else if (ts->val_type == TEMP_VAL_MEM) {
reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
s->reserved_regs);
/* XXX: not correct if reading values from the stack */
- tcg_out_ld(s, reg, ts->mem_reg, ts->mem_offset);
- tcg_out_st(s, reg, TCG_REG_CALL_STACK, stack_offset);
+ tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
+ tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
} else if (ts->val_type == TEMP_VAL_CONST) {
reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
s->reserved_regs);
/* XXX: sign extend may be needed on some targets */
tcg_out_movi(s, ts->type, reg, ts->val);
- tcg_out_st(s, reg, TCG_REG_CALL_STACK, stack_offset);
+ tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
} else {
tcg_abort();
}
- stack_offset += sizeof(tcg_target_long);
+ /* XXX: not necessarily in the same order */
+ stack_offset += STACK_DIR(sizeof(tcg_target_long));
}
/* assign input registers */
tcg_out_mov(s, reg, ts->reg);
}
} else if (ts->val_type == TEMP_VAL_MEM) {
- tcg_out_ld(s, reg, ts->mem_reg, ts->mem_offset);
+ tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
} else if (ts->val_type == TEMP_VAL_CONST) {
/* XXX: sign extend ? */
tcg_out_movi(s, ts->type, reg, ts->val);
func_arg = args[nb_oargs + nb_iargs - 1];
arg_ct = &def->args_ct[0];
ts = &s->temps[func_arg];
+ func_addr = ts->val;
const_func_arg = 0;
if (ts->val_type == TEMP_VAL_MEM) {
reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
- tcg_out_ld(s, reg, ts->mem_reg, ts->mem_offset);
+ tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
func_arg = reg;
} else if (ts->val_type == TEMP_VAL_REG) {
reg = ts->reg;
}
func_arg = reg;
} else if (ts->val_type == TEMP_VAL_CONST) {
- if (tcg_target_const_match(ts->val, arg_ct)) {
+ if (tcg_target_const_match(func_addr, arg_ct)) {
const_func_arg = 1;
- func_arg = ts->val;
+ func_arg = func_addr;
} else {
reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
- tcg_out_movi(s, ts->type, reg, ts->val);
+ tcg_out_movi(s, ts->type, reg, func_addr);
func_arg = reg;
}
} else {
}
/* mark dead temporaries and free the associated registers */
- for(i = 0; i < nb_params; i++) {
+ for(i = 0; i < nb_iargs; i++) {
arg = args[nb_oargs + i];
if (IS_DEAD_IARG(i)) {
ts = &s->temps[arg];
tcg_out_op(s, opc, &func_arg, &const_func_arg);
- tcg_out_addi(s, TCG_REG_CALL_STACK, call_stack_size);
+ if (allocate_args) {
+ tcg_out_addi(s, TCG_REG_CALL_STACK, STACK_DIR(call_stack_size));
+ }
/* assign output registers and emit moves if needed */
for(i = 0; i < nb_oargs; i++) {
static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf,
- int do_search_pc,
- const uint8_t *searched_pc)
+ long search_pc)
{
int opc, op_index, macro_op_index;
const TCGOpDef *def;
macro_op_index = -1;
args = gen_opparam_buf;
op_index = 0;
+
for(;;) {
opc = gen_opc_buf[op_index];
#ifdef CONFIG_PROFILER
goto next;
case INDEX_op_end:
goto the_end;
+
+#ifdef CONFIG_DYNGEN_OP
case 0 ... INDEX_op_end - 1:
/* legacy dyngen ops */
#ifdef CONFIG_PROFILER
}
#endif
tcg_reg_alloc_bb_end(s);
- if (do_search_pc) {
+ if (search_pc >= 0) {
s->code_ptr += def->copy_size;
args += def->nb_args;
} else {
args = dyngen_op(s, opc, args);
}
goto next;
+#endif
default:
/* Note: in order to speed up the code, it would be much
faster to have specialized register allocator functions for
}
args += def->nb_args;
next: ;
- if (do_search_pc) {
- if (searched_pc < s->code_ptr) {
- if (macro_op_index >= 0)
- return macro_op_index;
- else
- return op_index;
- }
+ if (search_pc >= 0 && search_pc < s->code_ptr - gen_code_buf) {
+ if (macro_op_index >= 0)
+ return macro_op_index;
+ else
+ return op_index;
}
op_index++;
#ifndef NDEBUG
}
#endif
- tcg_gen_code_common(s, gen_code_buf, 0, NULL);
+ tcg_gen_code_common(s, gen_code_buf, -1);
/* flush instruction cache */
flush_icache_range((unsigned long)gen_code_buf,
return s->code_ptr - gen_code_buf;
}
-/* return the index of the micro operation such as the pc after is <
- search_pc. Note: gen_code_buf is accessed during the operation, but
- its content should not be modified. Return -1 if not found. */
-int dyngen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf,
- const uint8_t *searched_pc)
+/* Return the index of the micro operation such as the pc after is <
+ offset bytes from the start of the TB. The contents of gen_code_buf must
+ not be changed, though writing the same values is ok.
+ Return -1 if not found. */
+int dyngen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, long offset)
{
- return tcg_gen_code_common(s, gen_code_buf, 1, searched_pc);
+ return tcg_gen_code_common(s, gen_code_buf, offset);
}