* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
+
+#ifndef TCG_H
+#define TCG_H
+
#include "qemu-common.h"
+#include "qemu/bitops.h"
+#include "tcg-target.h"
-/* Target word size (must be identical to pointer size). */
-#if UINTPTR_MAX == UINT32_MAX
-# define TCG_TARGET_REG_BITS 32
-#elif UINTPTR_MAX == UINT64_MAX
-# define TCG_TARGET_REG_BITS 64
-#else
-# error Unknown pointer size for tcg target
+#define CPU_TEMP_BUF_NLONGS 128
+
+/* Default target word size to pointer size. */
+#ifndef TCG_TARGET_REG_BITS
+# if UINTPTR_MAX == UINT32_MAX
+# define TCG_TARGET_REG_BITS 32
+# elif UINTPTR_MAX == UINT64_MAX
+# define TCG_TARGET_REG_BITS 64
+# else
+# error Unknown pointer size for tcg target
+# endif
#endif
#if TCG_TARGET_REG_BITS == 32
#error unsupported
#endif
-#include "tcg-target.h"
-#include "tcg-runtime.h"
-
#if TCG_TARGET_NB_REGS <= 32
typedef uint32_t TCGRegSet;
#elif TCG_TARGET_NB_REGS <= 64
#if TCG_TARGET_REG_BITS == 32
/* Turn some undef macros into false macros. */
+#define TCG_TARGET_HAS_extrl_i64_i32 0
+#define TCG_TARGET_HAS_extrh_i64_i32 0
#define TCG_TARGET_HAS_div_i64 0
#define TCG_TARGET_HAS_rem_i64 0
#define TCG_TARGET_HAS_div2_i64 0
#define TCG_TARGET_HAS_sub2_i64 0
#define TCG_TARGET_HAS_mulu2_i64 0
#define TCG_TARGET_HAS_muls2_i64 0
+#define TCG_TARGET_HAS_muluh_i64 0
+#define TCG_TARGET_HAS_mulsh_i64 0
/* Turn some undef macros into true macros. */
#define TCG_TARGET_HAS_add2_i32 1
#define TCG_TARGET_HAS_sub2_i32 1
-#define TCG_TARGET_HAS_mulu2_i32 1
#endif
#ifndef TCG_TARGET_deposit_i32_valid
#define TCG_TARGET_HAS_rem_i64 0
#endif
+/* For 32-bit targets, some sort of unsigned widening multiply is required. */
+#if TCG_TARGET_REG_BITS == 32 \
+ && !(defined(TCG_TARGET_HAS_mulu2_i32) \
+ || defined(TCG_TARGET_HAS_muluh_i32))
+# error "Missing unsigned widening multiply"
+#endif
+
+#ifndef TARGET_INSN_START_EXTRA_WORDS
+# define TARGET_INSN_START_WORDS 1
+#else
+# define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
+#endif
+
typedef enum TCGOpcode {
#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
#include "tcg-opc.h"
#define tcg_regset_andnot(d, a, b) (d) = (a) & ~(b)
#define tcg_regset_not(d, a) (d) = ~(a)
+#ifndef TCG_TARGET_INSN_UNIT_SIZE
+# error "Missing TCG_TARGET_INSN_UNIT_SIZE"
+#elif TCG_TARGET_INSN_UNIT_SIZE == 1
+typedef uint8_t tcg_insn_unit;
+#elif TCG_TARGET_INSN_UNIT_SIZE == 2
+typedef uint16_t tcg_insn_unit;
+#elif TCG_TARGET_INSN_UNIT_SIZE == 4
+typedef uint32_t tcg_insn_unit;
+#elif TCG_TARGET_INSN_UNIT_SIZE == 8
+typedef uint64_t tcg_insn_unit;
+#else
+/* The port better have done this. */
+#endif
+
+
typedef struct TCGRelocation {
struct TCGRelocation *next;
int type;
- uint8_t *ptr;
- tcg_target_long addend;
+ tcg_insn_unit *ptr;
+ intptr_t addend;
} TCGRelocation;
typedef struct TCGLabel {
- int has_value;
+ unsigned has_value : 1;
+ unsigned id : 31;
union {
- tcg_target_ulong value;
+ uintptr_t value;
+ tcg_insn_unit *value_ptr;
TCGRelocation *first_reloc;
} u;
} TCGLabel;
#define TCG_POOL_CHUNK_SIZE 32768
-#define TCG_MAX_LABELS 512
-
#define TCG_MAX_TEMPS 512
+#define TCG_MAX_INSNS 512
/* when the size of the arguments of a called function is smaller than
this value, they are statically allocated in the TB stack frame */
TCG_TYPE_REG = TCG_TYPE_I64,
#endif
- /* An alias for the size of the native pointer. We don't currently
- support any hosts with 64-bit registers and 32-bit pointers. */
- TCG_TYPE_PTR = TCG_TYPE_REG,
+ /* An alias for the size of the native pointer. */
+#if UINTPTR_MAX == UINT32_MAX
+ TCG_TYPE_PTR = TCG_TYPE_I32,
+#else
+ TCG_TYPE_PTR = TCG_TYPE_I64,
+#endif
/* An alias for the size of the target "long", aka register. */
#if TARGET_LONG_BITS == 64
#endif
} TCGType;
+/* Constants for qemu_ld and qemu_st for the Memory Operation field. */
+typedef enum TCGMemOp {
+ MO_8 = 0,
+ MO_16 = 1,
+ MO_32 = 2,
+ MO_64 = 3,
+ MO_SIZE = 3, /* Mask for the above. */
+
+ MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */
+
+ MO_BSWAP = 8, /* Host reverse endian. */
+#ifdef HOST_WORDS_BIGENDIAN
+ MO_LE = MO_BSWAP,
+ MO_BE = 0,
+#else
+ MO_LE = 0,
+ MO_BE = MO_BSWAP,
+#endif
+#ifdef TARGET_WORDS_BIGENDIAN
+ MO_TE = MO_BE,
+#else
+ MO_TE = MO_LE,
+#endif
+
+ /* MO_UNALN accesses are never checked for alignment.
+ MO_ALIGN accesses will result in a call to the CPU's
+ do_unaligned_access hook if the guest address is not aligned.
+ The default depends on whether the target CPU defines ALIGNED_ONLY. */
+ MO_AMASK = 16,
+#ifdef ALIGNED_ONLY
+ MO_ALIGN = 0,
+ MO_UNALN = MO_AMASK,
+#else
+ MO_ALIGN = MO_AMASK,
+ MO_UNALN = 0,
+#endif
+
+ /* Combinations of the above, for ease of use. */
+ MO_UB = MO_8,
+ MO_UW = MO_16,
+ MO_UL = MO_32,
+ MO_SB = MO_SIGN | MO_8,
+ MO_SW = MO_SIGN | MO_16,
+ MO_SL = MO_SIGN | MO_32,
+ MO_Q = MO_64,
+
+ MO_LEUW = MO_LE | MO_UW,
+ MO_LEUL = MO_LE | MO_UL,
+ MO_LESW = MO_LE | MO_SW,
+ MO_LESL = MO_LE | MO_SL,
+ MO_LEQ = MO_LE | MO_Q,
+
+ MO_BEUW = MO_BE | MO_UW,
+ MO_BEUL = MO_BE | MO_UL,
+ MO_BESW = MO_BE | MO_SW,
+ MO_BESL = MO_BE | MO_SL,
+ MO_BEQ = MO_BE | MO_Q,
+
+ MO_TEUW = MO_TE | MO_UW,
+ MO_TEUL = MO_TE | MO_UL,
+ MO_TESW = MO_TE | MO_SW,
+ MO_TESL = MO_TE | MO_SL,
+ MO_TEQ = MO_TE | MO_Q,
+
+ MO_SSIZE = MO_SIZE | MO_SIGN,
+} TCGMemOp;
+
typedef tcg_target_ulong TCGArg;
-/* Define a type and accessor macros for variables. Using a struct is
- nice because it gives some level of type safely. Ideally the compiler
- be able to see through all this. However in practice this is not true,
- especially on targets with braindamaged ABIs (e.g. i386).
- We use plain int by default to avoid this runtime overhead.
- Users of tcg_gen_* don't need to know about any of this, and should
- treat TCGv as an opaque type.
+/* Define a type and accessor macros for variables. Using pointer types
+ is nice because it gives some level of type safely. Converting to and
+ from intptr_t rather than int reduces the number of sign-extension
+ instructions that get implied on 64-bit hosts. Users of tcg_gen_* don't
+ need to know about any of this, and should treat TCGv as an opaque type.
In addition we do typechecking for different types of variables. TCGv_i32
and TCGv_i64 are 32/64-bit variables respectively. TCGv and TCGv_ptr
- are aliases for target_ulong and host pointer sized values respectively.
- */
+ are aliases for target_ulong and host pointer sized values respectively. */
-#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
-/* Macros/structures for qemu_ld/st IR code optimization:
- TCG_MAX_HELPER_LABELS is defined as same as OPC_BUF_SIZE in exec-all.h. */
-#define TCG_MAX_QEMU_LDST 640
-
-typedef struct TCGLabelQemuLdst {
- int is_ld:1; /* qemu_ld: 1, qemu_st: 0 */
- int opc:4;
- int addrlo_reg; /* reg index for low word of guest virtual addr */
- int addrhi_reg; /* reg index for high word of guest virtual addr */
- int datalo_reg; /* reg index for low word to be loaded or stored */
- int datahi_reg; /* reg index for high word to be loaded or stored */
- int mem_index; /* soft MMU memory index */
- uint8_t *raddr; /* gen code addr of the next IR of qemu_ld/st IR */
- uint8_t *label_ptr[2]; /* label pointers to be updated */
-} TCGLabelQemuLdst;
-#endif
+typedef struct TCGv_i32_d *TCGv_i32;
+typedef struct TCGv_i64_d *TCGv_i64;
+typedef struct TCGv_ptr_d *TCGv_ptr;
-#ifdef CONFIG_DEBUG_TCG
-#define DEBUG_TCGV 1
-#endif
+static inline TCGv_i32 QEMU_ARTIFICIAL MAKE_TCGV_I32(intptr_t i)
+{
+ return (TCGv_i32)i;
+}
-#ifdef DEBUG_TCGV
+static inline TCGv_i64 QEMU_ARTIFICIAL MAKE_TCGV_I64(intptr_t i)
+{
+ return (TCGv_i64)i;
+}
-typedef struct
+static inline TCGv_ptr QEMU_ARTIFICIAL MAKE_TCGV_PTR(intptr_t i)
{
- int i32;
-} TCGv_i32;
+ return (TCGv_ptr)i;
+}
-typedef struct
+static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I32(TCGv_i32 t)
{
- int i64;
-} TCGv_i64;
-
-typedef struct {
- int iptr;
-} TCGv_ptr;
-
-#define MAKE_TCGV_I32(i) __extension__ \
- ({ TCGv_i32 make_tcgv_tmp = {i}; make_tcgv_tmp;})
-#define MAKE_TCGV_I64(i) __extension__ \
- ({ TCGv_i64 make_tcgv_tmp = {i}; make_tcgv_tmp;})
-#define MAKE_TCGV_PTR(i) __extension__ \
- ({ TCGv_ptr make_tcgv_tmp = {i}; make_tcgv_tmp; })
-#define GET_TCGV_I32(t) ((t).i32)
-#define GET_TCGV_I64(t) ((t).i64)
-#define GET_TCGV_PTR(t) ((t).iptr)
-#if TCG_TARGET_REG_BITS == 32
-#define TCGV_LOW(t) MAKE_TCGV_I32(GET_TCGV_I64(t))
-#define TCGV_HIGH(t) MAKE_TCGV_I32(GET_TCGV_I64(t) + 1)
-#endif
+ return (intptr_t)t;
+}
-#else /* !DEBUG_TCGV */
+static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I64(TCGv_i64 t)
+{
+ return (intptr_t)t;
+}
-typedef int TCGv_i32;
-typedef int TCGv_i64;
-#if TCG_TARGET_REG_BITS == 32
-#define TCGv_ptr TCGv_i32
-#else
-#define TCGv_ptr TCGv_i64
-#endif
-#define MAKE_TCGV_I32(x) (x)
-#define MAKE_TCGV_I64(x) (x)
-#define MAKE_TCGV_PTR(x) (x)
-#define GET_TCGV_I32(t) (t)
-#define GET_TCGV_I64(t) (t)
-#define GET_TCGV_PTR(t) (t)
+static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_PTR(TCGv_ptr t)
+{
+ return (intptr_t)t;
+}
#if TCG_TARGET_REG_BITS == 32
-#define TCGV_LOW(t) (t)
-#define TCGV_HIGH(t) ((t) + 1)
+#define TCGV_LOW(t) MAKE_TCGV_I32(GET_TCGV_I64(t))
+#define TCGV_HIGH(t) MAKE_TCGV_I32(GET_TCGV_I64(t) + 1)
#endif
-#endif /* DEBUG_TCGV */
-
#define TCGV_EQUAL_I32(a, b) (GET_TCGV_I32(a) == GET_TCGV_I32(b))
#define TCGV_EQUAL_I64(a, b) (GET_TCGV_I64(a) == GET_TCGV_I64(b))
+#define TCGV_EQUAL_PTR(a, b) (GET_TCGV_PTR(a) == GET_TCGV_PTR(b))
/* Dummy definition to avoid compiler warnings. */
#define TCGV_UNUSED_I32(x) x = MAKE_TCGV_I32(-1)
#define TCGV_UNUSED_I64(x) x = MAKE_TCGV_I64(-1)
+#define TCGV_UNUSED_PTR(x) x = MAKE_TCGV_PTR(-1)
#define TCGV_IS_UNUSED_I32(x) (GET_TCGV_I32(x) == -1)
#define TCGV_IS_UNUSED_I64(x) (GET_TCGV_I64(x) == -1)
+#define TCGV_IS_UNUSED_PTR(x) (GET_TCGV_PTR(x) == -1)
/* call flags */
/* Helper does not read globals (either directly or through an exception). It
}
}
-#define TEMP_VAL_DEAD 0
-#define TEMP_VAL_REG 1
-#define TEMP_VAL_MEM 2
-#define TEMP_VAL_CONST 3
+typedef enum TCGTempVal {
+ TEMP_VAL_DEAD,
+ TEMP_VAL_REG,
+ TEMP_VAL_MEM,
+ TEMP_VAL_CONST,
+} TCGTempVal;
-/* XXX: optimize memory layout */
typedef struct TCGTemp {
- TCGType base_type;
- TCGType type;
- int val_type;
- int reg;
- tcg_target_long val;
- int mem_reg;
- tcg_target_long mem_offset;
+ TCGReg reg:8;
+ TCGTempVal val_type:8;
+ TCGType base_type:8;
+ TCGType type:8;
unsigned int fixed_reg:1;
+ unsigned int indirect_reg:1;
+ unsigned int indirect_base:1;
unsigned int mem_coherent:1;
unsigned int mem_allocated:1;
unsigned int temp_local:1; /* If true, the temp is saved across
basic blocks. Otherwise, it is not
preserved across basic blocks. */
unsigned int temp_allocated:1; /* never used for code gen */
- /* index of next free temp of same base type, -1 if end */
- int next_free_temp;
- const char *name;
-} TCGTemp;
-typedef struct TCGHelperInfo {
- tcg_target_ulong func;
+ tcg_target_long val;
+ struct TCGTemp *mem_base;
+ intptr_t mem_offset;
const char *name;
-} TCGHelperInfo;
+} TCGTemp;
typedef struct TCGContext TCGContext;
+typedef struct TCGTempSet {
+ unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)];
+} TCGTempSet;
+
+typedef struct TCGOp {
+ TCGOpcode opc : 8;
+
+ /* The number of out and in parameter for a call. */
+ unsigned callo : 2;
+ unsigned calli : 6;
+
+ /* Index of the arguments for this op, or -1 for zero-operand ops. */
+ signed args : 16;
+
+ /* Index of the prex/next op, or -1 for the end of the list. */
+ signed prev : 16;
+ signed next : 16;
+} TCGOp;
+
+QEMU_BUILD_BUG_ON(NB_OPS > 0xff);
+QEMU_BUILD_BUG_ON(OPC_BUF_SIZE >= 0x7fff);
+QEMU_BUILD_BUG_ON(OPPARAM_BUF_SIZE >= 0x7fff);
+
struct TCGContext {
uint8_t *pool_cur, *pool_end;
TCGPool *pool_first, *pool_current, *pool_first_large;
- TCGLabel *labels;
int nb_labels;
int nb_globals;
int nb_temps;
- /* index of free temps, -1 if none */
- int first_free_temp[TCG_TYPE_COUNT * 2];
/* goto_tb support */
- uint8_t *code_buf;
+ tcg_insn_unit *code_buf;
uintptr_t *tb_next;
uint16_t *tb_next_offset;
uint16_t *tb_jmp_offset; /* != NULL if USE_DIRECT_JUMP */
corresponding output argument needs to be
sync to memory. */
- /* tells in which temporary a given register is. It does not take
- into account fixed registers */
- int reg_to_temp[TCG_TARGET_NB_REGS];
TCGRegSet reserved_regs;
- tcg_target_long current_frame_offset;
- tcg_target_long frame_start;
- tcg_target_long frame_end;
- int frame_reg;
+ intptr_t current_frame_offset;
+ intptr_t frame_start;
+ intptr_t frame_end;
+ TCGTemp *frame_temp;
- uint8_t *code_ptr;
- TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
+ tcg_insn_unit *code_ptr;
- TCGHelperInfo *helpers;
- int nb_helpers;
- int allocated_helpers;
- int helpers_sorted;
+ GHashTable *helpers;
#ifdef CONFIG_PROFILER
/* profiling info */
int64_t del_op_count;
int64_t code_in_len;
int64_t code_out_len;
+ int64_t search_out_len;
int64_t interm_time;
int64_t code_time;
int64_t la_time;
int goto_tb_issue_mask;
#endif
- uint16_t gen_opc_buf[OPC_BUF_SIZE];
- TCGArg gen_opparam_buf[OPPARAM_BUF_SIZE];
-
- uint16_t *gen_opc_ptr;
- TCGArg *gen_opparam_ptr;
- target_ulong gen_opc_pc[OPC_BUF_SIZE];
- uint16_t gen_opc_icount[OPC_BUF_SIZE];
- uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
+ int gen_first_op_idx;
+ int gen_last_op_idx;
+ int gen_next_op_idx;
+ int gen_next_parm_idx;
- /* Code generation */
+ /* Code generation. Note that we specifically do not use tcg_insn_unit
+ here, because there's too much arithmetic throughout that relies
+ on addition and subtraction working on bytes. Rely on the GCC
+ extension that allows arithmetic on void*. */
int code_gen_max_blocks;
- uint8_t *code_gen_prologue;
- uint8_t *code_gen_buffer;
+ void *code_gen_prologue;
+ void *code_gen_buffer;
size_t code_gen_buffer_size;
- /* threshold to flush the translated code buffer */
- size_t code_gen_buffer_max_size;
- uint8_t *code_gen_ptr;
+ void *code_gen_ptr;
+
+ /* Threshold to flush the translated code buffer. */
+ void *code_gen_highwater;
TBContext tb_ctx;
-#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
- /* labels info for qemu_ld/st IRs
- The labels help to generate TLB miss case codes at the end of TB */
- TCGLabelQemuLdst *qemu_ldst_labels;
- int nb_qemu_ldst_labels;
-#endif
+ /* The TCGBackendData structure is private to tcg-target.inc.c. */
+ struct TCGBackendData *be;
+
+ TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
+ TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
+
+ /* Tells which temporary holds a given register.
+ It does not take into account fixed registers */
+ TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
+
+ TCGOp gen_op_buf[OPC_BUF_SIZE];
+ TCGArg gen_opparam_buf[OPPARAM_BUF_SIZE];
+
+ uint16_t gen_insn_end_off[TCG_MAX_INSNS];
+ target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
};
extern TCGContext tcg_ctx;
+/* The number of opcodes emitted so far. */
+static inline int tcg_op_buf_count(void)
+{
+ return tcg_ctx.gen_next_op_idx;
+}
+
+/* Test for whether to terminate the TB for using too many opcodes. */
+static inline bool tcg_op_buf_full(void)
+{
+ return tcg_op_buf_count() >= OPC_MAX_SIZE;
+}
+
/* pool based memory allocation */
void *tcg_malloc_internal(TCGContext *s, int size);
void tcg_pool_reset(TCGContext *s);
void tcg_pool_delete(TCGContext *s);
+void tb_lock(void);
+void tb_unlock(void);
+void tb_lock_reset(void);
+
static inline void *tcg_malloc(int size)
{
TCGContext *s = &tcg_ctx;
void tcg_prologue_init(TCGContext *s);
void tcg_func_start(TCGContext *s);
-int tcg_gen_code(TCGContext *s, uint8_t *gen_code_buf);
-int tcg_gen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, long offset);
+int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf);
-void tcg_set_frame(TCGContext *s, int reg,
- tcg_target_long start, tcg_target_long size);
+void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
+
+int tcg_global_mem_new_internal(TCGType, TCGv_ptr, intptr_t, const char *);
+
+TCGv_i32 tcg_global_reg_new_i32(TCGReg reg, const char *name);
+TCGv_i64 tcg_global_reg_new_i64(TCGReg reg, const char *name);
-TCGv_i32 tcg_global_reg_new_i32(int reg, const char *name);
-TCGv_i32 tcg_global_mem_new_i32(int reg, tcg_target_long offset,
- const char *name);
TCGv_i32 tcg_temp_new_internal_i32(int temp_local);
+TCGv_i64 tcg_temp_new_internal_i64(int temp_local);
+
+void tcg_temp_free_i32(TCGv_i32 arg);
+void tcg_temp_free_i64(TCGv_i64 arg);
+
+static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
+ const char *name)
+{
+ int idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
+ return MAKE_TCGV_I32(idx);
+}
+
static inline TCGv_i32 tcg_temp_new_i32(void)
{
return tcg_temp_new_internal_i32(0);
}
+
static inline TCGv_i32 tcg_temp_local_new_i32(void)
{
return tcg_temp_new_internal_i32(1);
}
-void tcg_temp_free_i32(TCGv_i32 arg);
-char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg);
-TCGv_i64 tcg_global_reg_new_i64(int reg, const char *name);
-TCGv_i64 tcg_global_mem_new_i64(int reg, tcg_target_long offset,
- const char *name);
-TCGv_i64 tcg_temp_new_internal_i64(int temp_local);
+static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
+ const char *name)
+{
+ int idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
+ return MAKE_TCGV_I64(idx);
+}
+
static inline TCGv_i64 tcg_temp_new_i64(void)
{
return tcg_temp_new_internal_i64(0);
}
+
static inline TCGv_i64 tcg_temp_local_new_i64(void)
{
return tcg_temp_new_internal_i64(1);
}
-void tcg_temp_free_i64(TCGv_i64 arg);
-char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg);
#if defined(CONFIG_DEBUG_TCG)
/* If you call tcg_clear_temp_count() at the start of a section of
#endif
void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf);
+void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf);
#define TCG_CT_ALIAS 0x80
#define TCG_CT_IALIAS 0x40
void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs);
-#if TCG_TARGET_REG_BITS == 32
+#if UINTPTR_MAX == UINT32_MAX
#define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I32(n))
#define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I32(GET_TCGV_PTR(n))
-#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32((tcg_target_long)(V)))
+#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32((intptr_t)(V)))
#define tcg_global_reg_new_ptr(R, N) \
TCGV_NAT_TO_PTR(tcg_global_reg_new_i32((R), (N)))
#define tcg_global_mem_new_ptr(R, O, N) \
#define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I64(n))
#define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I64(GET_TCGV_PTR(n))
-#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64((tcg_target_long)(V)))
+#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64((intptr_t)(V)))
#define tcg_global_reg_new_ptr(R, N) \
TCGV_NAT_TO_PTR(tcg_global_reg_new_i64((R), (N)))
#define tcg_global_mem_new_ptr(R, O, N) \
#define tcg_temp_free_ptr(T) tcg_temp_free_i64(TCGV_PTR_TO_NAT(T))
#endif
-void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags,
- int sizemask, TCGArg ret, int nargs, TCGArg *args);
-
-void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
- int c, int right, int arith);
+void tcg_gen_callN(TCGContext *s, void *func,
+ TCGArg ret, int nargs, TCGArg *args);
-TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr, TCGArg *args,
- TCGOpDef *tcg_op_def);
+void tcg_op_remove(TCGContext *s, TCGOp *op);
+void tcg_optimize(TCGContext *s);
/* only used for debugging purposes */
-void tcg_register_helper(void *func, const char *name);
-const char *tcg_helper_get_name(TCGContext *s, void *func);
void tcg_dump_ops(TCGContext *s);
void dump_ops(const uint16_t *opc_buf, const TCGArg *opparam_buf);
TCGv_i32 tcg_const_local_i32(int32_t val);
TCGv_i64 tcg_const_local_i64(int64_t val);
+TCGLabel *gen_new_label(void);
+
+/**
+ * label_arg
+ * @l: label
+ *
+ * Encode a label for storage in the TCG opcode stream.
+ */
+
+static inline TCGArg label_arg(TCGLabel *l)
+{
+ return (uintptr_t)l;
+}
+
+/**
+ * arg_label
+ * @i: value
+ *
+ * The opposite of label_arg. Retrieve a label from the
+ * encoding of the TCG opcode stream.
+ */
+
+static inline TCGLabel *arg_label(TCGArg i)
+{
+ return (TCGLabel *)(uintptr_t)i;
+}
+
+/**
+ * tcg_ptr_byte_diff
+ * @a, @b: addresses to be differenced
+ *
+ * There are many places within the TCG backends where we need a byte
+ * difference between two pointers. While this can be accomplished
+ * with local casting, it's easy to get wrong -- especially if one is
+ * concerned with the signedness of the result.
+ *
+ * This version relies on GCC's void pointer arithmetic to get the
+ * correct result.
+ */
+
+static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b)
+{
+ return a - b;
+}
+
+/**
+ * tcg_pcrel_diff
+ * @s: the tcg context
+ * @target: address of the target
+ *
+ * Produce a pc-relative difference, from the current code_ptr
+ * to the destination address.
+ */
+
+static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, void *target)
+{
+ return tcg_ptr_byte_diff(target, s->code_ptr);
+}
+
+/**
+ * tcg_current_code_size
+ * @s: the tcg context
+ *
+ * Compute the current code size within the translation block.
+ * This is used to fill in qemu's data structures for goto_tb.
+ */
+
+static inline size_t tcg_current_code_size(TCGContext *s)
+{
+ return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
+}
+
+/* Combine the TCGMemOp and mmu_idx parameters into a single value. */
+typedef uint32_t TCGMemOpIdx;
+
+/**
+ * make_memop_idx
+ * @op: memory operation
+ * @idx: mmu index
+ *
+ * Encode these values into a single parameter.
+ */
+static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx)
+{
+ tcg_debug_assert(idx <= 15);
+ return (op << 4) | idx;
+}
+
+/**
+ * get_memop
+ * @oi: combined op/idx parameter
+ *
+ * Extract the memory operation from the combined value.
+ */
+static inline TCGMemOp get_memop(TCGMemOpIdx oi)
+{
+ return oi >> 4;
+}
+
+/**
+ * get_mmuidx
+ * @oi: combined op/idx parameter
+ *
+ * Extract the mmu index from the combined value.
+ */
+static inline unsigned get_mmuidx(TCGMemOpIdx oi)
+{
+ return oi & 15;
+}
+
/**
* tcg_qemu_tb_exec:
* @env: CPUArchState * for the CPU
* state is correctly synchronised and ready for execution of the next
* TB (and in particular the guest PC is the address to execute next).
* Otherwise, we gave up on execution of this TB before it started, and
- * the caller must fix up the CPU state by calling cpu_pc_from_tb()
- * with the next-TB pointer we return.
+ * the caller must fix up the CPU state by calling the CPU's
+ * synchronize_from_tb() method with the next-TB pointer we return (falling
+ * back to calling the CPU's set_pc method with tb->pb if no
+ * synchronize_from_tb() method exists).
*
* Note that TCG targets may use a different definition of tcg_qemu_tb_exec
* to this default (which just calls the prologue.code emitted by
#define TB_EXIT_ICOUNT_EXPIRED 2
#define TB_EXIT_REQUESTED 3
-#if !defined(tcg_qemu_tb_exec)
+#ifdef HAVE_TCG_QEMU_TB_EXEC
+uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
+#else
# define tcg_qemu_tb_exec(env, tb_ptr) \
- ((tcg_target_ulong (*)(void *, void *))tcg_ctx.code_gen_prologue)(env, \
- tb_ptr)
+ ((uintptr_t (*)(void *, void *))tcg_ctx.code_gen_prologue)(env, tb_ptr)
#endif
void tcg_register_jit(void *buf, size_t buf_size);
-#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
-/* Generate TB finalization at the end of block */
-void tcg_out_tb_finalize(TCGContext *s);
+/*
+ * Memory helpers that will be used by TCG generated code.
+ */
+#ifdef CONFIG_SOFTMMU
+/* Value zero-extended to tcg register size. */
+tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+
+/* Value sign-extended to tcg register size. */
+tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+
+void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+
+uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+
+/* Temporary aliases until backends are converted. */
+#ifdef TARGET_WORDS_BIGENDIAN
+# define helper_ret_ldsw_mmu helper_be_ldsw_mmu
+# define helper_ret_lduw_mmu helper_be_lduw_mmu
+# define helper_ret_ldsl_mmu helper_be_ldsl_mmu
+# define helper_ret_ldul_mmu helper_be_ldul_mmu
+# define helper_ret_ldl_mmu helper_be_ldul_mmu
+# define helper_ret_ldq_mmu helper_be_ldq_mmu
+# define helper_ret_stw_mmu helper_be_stw_mmu
+# define helper_ret_stl_mmu helper_be_stl_mmu
+# define helper_ret_stq_mmu helper_be_stq_mmu
+# define helper_ret_ldw_cmmu helper_be_ldw_cmmu
+# define helper_ret_ldl_cmmu helper_be_ldl_cmmu
+# define helper_ret_ldq_cmmu helper_be_ldq_cmmu
+#else
+# define helper_ret_ldsw_mmu helper_le_ldsw_mmu
+# define helper_ret_lduw_mmu helper_le_lduw_mmu
+# define helper_ret_ldsl_mmu helper_le_ldsl_mmu
+# define helper_ret_ldul_mmu helper_le_ldul_mmu
+# define helper_ret_ldl_mmu helper_le_ldul_mmu
+# define helper_ret_ldq_mmu helper_le_ldq_mmu
+# define helper_ret_stw_mmu helper_le_stw_mmu
+# define helper_ret_stl_mmu helper_le_stl_mmu
+# define helper_ret_stq_mmu helper_le_stq_mmu
+# define helper_ret_ldw_cmmu helper_le_ldw_cmmu
+# define helper_ret_ldl_cmmu helper_le_ldl_cmmu
+# define helper_ret_ldq_cmmu helper_le_ldq_cmmu
#endif
+
+#endif /* CONFIG_SOFTMMU */
+
+#endif /* TCG_H */