int nb_globals;
int nb_temps;
int nb_indirects;
+ int nb_ops;
/* goto_tb support */
tcg_insn_unit *code_buf;
/* Threshold to flush the translated code buffer. */
void *code_gen_highwater;
+ size_t tb_phys_invalidate_count;
+
/* Track which vCPU triggers events */
CPUState *cpu; /* *_trans */
/* These structures are private to tcg-target.inc.c. */
#ifdef TCG_TARGET_NEED_LDST_LABELS
- struct TCGLabelQemuLdst *ldst_labels;
+ QSIMPLEQ_HEAD(ldst_labels, TCGLabelQemuLdst) ldst_labels;
#endif
#ifdef TCG_TARGET_NEED_POOL_LABELS
struct TCGLabelPoolData *pool_labels;
/* Test for whether to terminate the TB for using too many opcodes. */
static inline bool tcg_op_buf_full(void)
{
- return false;
+ /* This is not a hard limit, it merely stops translation when
+ * we have produced "enough" opcodes. We want to limit TB size
+ * such that a RISC host can reasonably use a 16-bit signed
+ * branch within the TB. We also need to be mindful of the
+ * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[]
+ * and TCGContext.gen_insn_end_off[].
+ */
+ return tcg_ctx->nb_ops >= 4000;
}
/* pool based memory allocation */
-/* user-mode: tb_lock must be held for tcg_malloc_internal. */
+/* user-mode: mmap_lock must be held for tcg_malloc_internal. */
void *tcg_malloc_internal(TCGContext *s, int size);
void tcg_pool_reset(TCGContext *s);
TranslationBlock *tcg_tb_alloc(TCGContext *s);
size_t tcg_code_size(void);
size_t tcg_code_capacity(void);
-/* user-mode: Called with tb_lock held. */
+void tcg_tb_insert(TranslationBlock *tb);
+void tcg_tb_remove(TranslationBlock *tb);
+size_t tcg_tb_phys_invalidate_count(void);
+TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr);
+void tcg_tb_foreach(GTraverseFunc func, gpointer user_data);
+size_t tcg_nb_tbs(void);
+
+/* user-mode: Called with mmap_lock held. */
static inline void *tcg_malloc(int size)
{
TCGContext *s = tcg_ctx;
TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
intptr_t, const char *);
-
-TCGv_i32 tcg_temp_new_internal_i32(int temp_local);
-TCGv_i64 tcg_temp_new_internal_i64(int temp_local);
+TCGTemp *tcg_temp_new_internal(TCGType, bool);
+void tcg_temp_free_internal(TCGTemp *);
TCGv_vec tcg_temp_new_vec(TCGType type);
TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match);
-void tcg_temp_free_i32(TCGv_i32 arg);
-void tcg_temp_free_i64(TCGv_i64 arg);
-void tcg_temp_free_vec(TCGv_vec arg);
+static inline void tcg_temp_free_i32(TCGv_i32 arg)
+{
+ tcg_temp_free_internal(tcgv_i32_temp(arg));
+}
+
+static inline void tcg_temp_free_i64(TCGv_i64 arg)
+{
+ tcg_temp_free_internal(tcgv_i64_temp(arg));
+}
+
+static inline void tcg_temp_free_ptr(TCGv_ptr arg)
+{
+ tcg_temp_free_internal(tcgv_ptr_temp(arg));
+}
+
+static inline void tcg_temp_free_vec(TCGv_vec arg)
+{
+ tcg_temp_free_internal(tcgv_vec_temp(arg));
+}
static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
const char *name)
static inline TCGv_i32 tcg_temp_new_i32(void)
{
- return tcg_temp_new_internal_i32(0);
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, false);
+ return temp_tcgv_i32(t);
}
static inline TCGv_i32 tcg_temp_local_new_i32(void)
{
- return tcg_temp_new_internal_i32(1);
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, true);
+ return temp_tcgv_i32(t);
}
static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
static inline TCGv_i64 tcg_temp_new_i64(void)
{
- return tcg_temp_new_internal_i64(0);
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, false);
+ return temp_tcgv_i64(t);
}
static inline TCGv_i64 tcg_temp_local_new_i64(void)
{
- return tcg_temp_new_internal_i64(1);
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, true);
+ return temp_tcgv_i64(t);
+}
+
+static inline TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t offset,
+ const char *name)
+{
+ TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_PTR, reg, offset, name);
+ return temp_tcgv_ptr(t);
+}
+
+static inline TCGv_ptr tcg_temp_new_ptr(void)
+{
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, false);
+ return temp_tcgv_ptr(t);
+}
+
+static inline TCGv_ptr tcg_temp_local_new_ptr(void)
+{
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, true);
+ return temp_tcgv_ptr(t);
}
#if defined(CONFIG_DEBUG_TCG)
abort();\
} while (0)
-#if UINTPTR_MAX == UINT32_MAX
-static inline TCGv_ptr TCGV_NAT_TO_PTR(TCGv_i32 n) { return (TCGv_ptr)n; }
-static inline TCGv_i32 TCGV_PTR_TO_NAT(TCGv_ptr n) { return (TCGv_i32)n; }
-
-#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32((intptr_t)(V)))
-#define tcg_global_mem_new_ptr(R, O, N) \
- TCGV_NAT_TO_PTR(tcg_global_mem_new_i32((R), (O), (N)))
-#define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i32())
-#define tcg_temp_free_ptr(T) tcg_temp_free_i32(TCGV_PTR_TO_NAT(T))
-#else
-static inline TCGv_ptr TCGV_NAT_TO_PTR(TCGv_i64 n) { return (TCGv_ptr)n; }
-static inline TCGv_i64 TCGV_PTR_TO_NAT(TCGv_ptr n) { return (TCGv_i64)n; }
-
-#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64((intptr_t)(V)))
-#define tcg_global_mem_new_ptr(R, O, N) \
- TCGV_NAT_TO_PTR(tcg_global_mem_new_i64((R), (O), (N)))
-#define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i64())
-#define tcg_temp_free_ptr(T) tcg_temp_free_i64(TCGV_PTR_TO_NAT(T))
-#endif
-
bool tcg_op_supported(TCGOpcode op);
void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args);
TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec);
TCGv_vec tcg_const_ones_vec_matching(TCGv_vec);
+#if UINTPTR_MAX == UINT32_MAX
+# define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i32((intptr_t)(x)))
+# define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x)))
+#else
+# define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i64((intptr_t)(x)))
+# define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i64((intptr_t)(x)))
+#endif
+
TCGLabel *gen_new_label(void);
/**
* to this default (which just calls the prologue.code emitted by
* tcg_target_qemu_prologue()).
*/
-#define TB_EXIT_MASK 3
-#define TB_EXIT_IDX0 0
-#define TB_EXIT_IDX1 1
+#define TB_EXIT_MASK 3
+#define TB_EXIT_IDX0 0
+#define TB_EXIT_IDX1 1
+#define TB_EXIT_IDXMAX 1
#define TB_EXIT_REQUESTED 3
#ifdef HAVE_TCG_QEMU_TB_EXEC
GEN_ATOMIC_HELPER_ALL(fetch_and)
GEN_ATOMIC_HELPER_ALL(fetch_or)
GEN_ATOMIC_HELPER_ALL(fetch_xor)
+GEN_ATOMIC_HELPER_ALL(fetch_smin)
+GEN_ATOMIC_HELPER_ALL(fetch_umin)
+GEN_ATOMIC_HELPER_ALL(fetch_smax)
+GEN_ATOMIC_HELPER_ALL(fetch_umax)
GEN_ATOMIC_HELPER_ALL(add_fetch)
GEN_ATOMIC_HELPER_ALL(sub_fetch)
GEN_ATOMIC_HELPER_ALL(and_fetch)
GEN_ATOMIC_HELPER_ALL(or_fetch)
GEN_ATOMIC_HELPER_ALL(xor_fetch)
+GEN_ATOMIC_HELPER_ALL(smin_fetch)
+GEN_ATOMIC_HELPER_ALL(umin_fetch)
+GEN_ATOMIC_HELPER_ALL(smax_fetch)
+GEN_ATOMIC_HELPER_ALL(umax_fetch)
GEN_ATOMIC_HELPER_ALL(xchg)