#else
#define MAX_OPC_PARAM_PER_ARG 1
#endif
-#define MAX_OPC_PARAM_IARGS 5
+#define MAX_OPC_PARAM_IARGS 6
#define MAX_OPC_PARAM_OARGS 1
#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
# error "Missing unsigned widening multiply"
#endif
+#if !defined(TCG_TARGET_HAS_v64) \
+ && !defined(TCG_TARGET_HAS_v128) \
+ && !defined(TCG_TARGET_HAS_v256)
+#define TCG_TARGET_MAYBE_vec 0
+#define TCG_TARGET_HAS_neg_vec 0
+#define TCG_TARGET_HAS_not_vec 0
+#define TCG_TARGET_HAS_andc_vec 0
+#define TCG_TARGET_HAS_orc_vec 0
+#define TCG_TARGET_HAS_shi_vec 0
+#define TCG_TARGET_HAS_shs_vec 0
+#define TCG_TARGET_HAS_shv_vec 0
+#define TCG_TARGET_HAS_mul_vec 0
+#else
+#define TCG_TARGET_MAYBE_vec 1
+#endif
+#ifndef TCG_TARGET_HAS_v64
+#define TCG_TARGET_HAS_v64 0
+#endif
+#ifndef TCG_TARGET_HAS_v128
+#define TCG_TARGET_HAS_v128 0
+#endif
+#ifndef TCG_TARGET_HAS_v256
+#define TCG_TARGET_HAS_v256 0
+#endif
+
#ifndef TARGET_INSN_START_EXTRA_WORDS
# define TARGET_INSN_START_WORDS 1
#else
typedef enum TCGType {
TCG_TYPE_I32,
TCG_TYPE_I64,
+
+ TCG_TYPE_V64,
+ TCG_TYPE_V128,
+ TCG_TYPE_V256,
+
TCG_TYPE_COUNT, /* number of different types */
/* An alias for the size of the host register. */
* TCGv_i32 : 32 bit integer type
* TCGv_i64 : 64 bit integer type
* TCGv_ptr : a host pointer type
+ * TCGv_vec : a host vector type; the exact size is not exposed
+ to the CPU front-end code.
* TCGv : an integer type the same size as target_ulong
(an alias for either TCGv_i32 or TCGv_i64)
The compiler's type checking will complain if you mix them
typedef struct TCGv_i32_d *TCGv_i32;
typedef struct TCGv_i64_d *TCGv_i64;
typedef struct TCGv_ptr_d *TCGv_ptr;
+typedef struct TCGv_vec_d *TCGv_vec;
typedef TCGv_ptr TCGv_env;
#if TARGET_LONG_BITS == 32
#define TCGv TCGv_i32
return c & 2 ? (TCGCond)(c ^ 6) : c;
}
+/* Create a "signed" version of an "unsigned" comparison. */
+static inline TCGCond tcg_signed_cond(TCGCond c)
+{
+ return c & 4 ? (TCGCond)(c ^ 6) : c;
+}
+
/* Must a comparison be considered unsigned? */
static inline bool is_unsigned_cond(TCGCond c)
{
#define TCGOP_CALLI(X) (X)->param1
#define TCGOP_CALLO(X) (X)->param2
+#define TCGOP_VECL(X) (X)->param1
+#define TCGOP_VECE(X) (X)->param2
+
/* Make sure operands fit in the bitfields above. */
QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
int nb_globals;
int nb_temps;
int nb_indirects;
+ int nb_ops;
/* goto_tb support */
tcg_insn_unit *code_buf;
/* Threshold to flush the translated code buffer. */
void *code_gen_highwater;
+ size_t tb_phys_invalidate_count;
+
/* Track which vCPU triggers events */
CPUState *cpu; /* *_trans */
/* These structures are private to tcg-target.inc.c. */
#ifdef TCG_TARGET_NEED_LDST_LABELS
- struct TCGLabelQemuLdst *ldst_labels;
+ QSIMPLEQ_HEAD(ldst_labels, TCGLabelQemuLdst) ldst_labels;
#endif
#ifdef TCG_TARGET_NEED_POOL_LABELS
struct TCGLabelPoolData *pool_labels;
return tcgv_i32_temp((TCGv_i32)v);
}
+static inline TCGTemp *tcgv_vec_temp(TCGv_vec v)
+{
+ return tcgv_i32_temp((TCGv_i32)v);
+}
+
static inline TCGArg tcgv_i32_arg(TCGv_i32 v)
{
return temp_arg(tcgv_i32_temp(v));
return temp_arg(tcgv_ptr_temp(v));
}
+static inline TCGArg tcgv_vec_arg(TCGv_vec v)
+{
+ return temp_arg(tcgv_vec_temp(v));
+}
+
static inline TCGv_i32 temp_tcgv_i32(TCGTemp *t)
{
(void)temp_idx(t); /* trigger embedded assert */
return (TCGv_ptr)temp_tcgv_i32(t);
}
+static inline TCGv_vec temp_tcgv_vec(TCGTemp *t)
+{
+ return (TCGv_vec)temp_tcgv_i32(t);
+}
+
#if TCG_TARGET_REG_BITS == 32
static inline TCGv_i32 TCGV_LOW(TCGv_i64 t)
{
op->args[arg] = v;
}
+static inline void tcg_set_insn_start_param(TCGOp *op, int arg, target_ulong v)
+{
+#if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
+ tcg_set_insn_param(op, arg, v);
+#else
+ tcg_set_insn_param(op, arg * 2, v);
+ tcg_set_insn_param(op, arg * 2 + 1, v >> 32);
+#endif
+}
+
/* The last op that was emitted. */
static inline TCGOp *tcg_last_op(void)
{
/* Test for whether to terminate the TB for using too many opcodes. */
static inline bool tcg_op_buf_full(void)
{
- return false;
+ /* This is not a hard limit, it merely stops translation when
+ * we have produced "enough" opcodes. We want to limit TB size
+ * such that a RISC host can reasonably use a 16-bit signed
+ * branch within the TB. We also need to be mindful of the
+ * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[]
+ * and TCGContext.gen_insn_end_off[].
+ */
+ return tcg_ctx->nb_ops >= 4000;
}
/* pool based memory allocation */
-/* user-mode: tb_lock must be held for tcg_malloc_internal. */
+/* user-mode: mmap_lock must be held for tcg_malloc_internal. */
void *tcg_malloc_internal(TCGContext *s, int size);
void tcg_pool_reset(TCGContext *s);
TranslationBlock *tcg_tb_alloc(TCGContext *s);
size_t tcg_code_size(void);
size_t tcg_code_capacity(void);
-/* user-mode: Called with tb_lock held. */
+void tcg_tb_insert(TranslationBlock *tb);
+void tcg_tb_remove(TranslationBlock *tb);
+size_t tcg_tb_phys_invalidate_count(void);
+TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr);
+void tcg_tb_foreach(GTraverseFunc func, gpointer user_data);
+size_t tcg_nb_tbs(void);
+
+/* user-mode: Called with mmap_lock held. */
static inline void *tcg_malloc(int size)
{
TCGContext *s = tcg_ctx;
TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
intptr_t, const char *);
+TCGTemp *tcg_temp_new_internal(TCGType, bool);
+void tcg_temp_free_internal(TCGTemp *);
+TCGv_vec tcg_temp_new_vec(TCGType type);
+TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match);
-TCGv_i32 tcg_temp_new_internal_i32(int temp_local);
-TCGv_i64 tcg_temp_new_internal_i64(int temp_local);
+static inline void tcg_temp_free_i32(TCGv_i32 arg)
+{
+ tcg_temp_free_internal(tcgv_i32_temp(arg));
+}
-void tcg_temp_free_i32(TCGv_i32 arg);
-void tcg_temp_free_i64(TCGv_i64 arg);
+static inline void tcg_temp_free_i64(TCGv_i64 arg)
+{
+ tcg_temp_free_internal(tcgv_i64_temp(arg));
+}
+
+static inline void tcg_temp_free_ptr(TCGv_ptr arg)
+{
+ tcg_temp_free_internal(tcgv_ptr_temp(arg));
+}
+
+static inline void tcg_temp_free_vec(TCGv_vec arg)
+{
+ tcg_temp_free_internal(tcgv_vec_temp(arg));
+}
static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
const char *name)
static inline TCGv_i32 tcg_temp_new_i32(void)
{
- return tcg_temp_new_internal_i32(0);
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, false);
+ return temp_tcgv_i32(t);
}
static inline TCGv_i32 tcg_temp_local_new_i32(void)
{
- return tcg_temp_new_internal_i32(1);
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, true);
+ return temp_tcgv_i32(t);
}
static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
static inline TCGv_i64 tcg_temp_new_i64(void)
{
- return tcg_temp_new_internal_i64(0);
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, false);
+ return temp_tcgv_i64(t);
}
static inline TCGv_i64 tcg_temp_local_new_i64(void)
{
- return tcg_temp_new_internal_i64(1);
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, true);
+ return temp_tcgv_i64(t);
+}
+
+static inline TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t offset,
+ const char *name)
+{
+ TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_PTR, reg, offset, name);
+ return temp_tcgv_ptr(t);
+}
+
+static inline TCGv_ptr tcg_temp_new_ptr(void)
+{
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, false);
+ return temp_tcgv_ptr(t);
+}
+
+static inline TCGv_ptr tcg_temp_local_new_ptr(void)
+{
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, true);
+ return temp_tcgv_ptr(t);
}
#if defined(CONFIG_DEBUG_TCG)
/* Instruction is optional and not implemented by the host, or insn
is generic and should not be implemened by the host. */
TCG_OPF_NOT_PRESENT = 0x10,
+ /* Instruction operands are vectors. */
+ TCG_OPF_VECTOR = 0x20,
};
typedef struct TCGOpDef {
abort();\
} while (0)
-#if UINTPTR_MAX == UINT32_MAX
-static inline TCGv_ptr TCGV_NAT_TO_PTR(TCGv_i32 n) { return (TCGv_ptr)n; }
-static inline TCGv_i32 TCGV_PTR_TO_NAT(TCGv_ptr n) { return (TCGv_i32)n; }
-
-#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32((intptr_t)(V)))
-#define tcg_global_mem_new_ptr(R, O, N) \
- TCGV_NAT_TO_PTR(tcg_global_mem_new_i32((R), (O), (N)))
-#define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i32())
-#define tcg_temp_free_ptr(T) tcg_temp_free_i32(TCGV_PTR_TO_NAT(T))
-#else
-static inline TCGv_ptr TCGV_NAT_TO_PTR(TCGv_i64 n) { return (TCGv_ptr)n; }
-static inline TCGv_i64 TCGV_PTR_TO_NAT(TCGv_ptr n) { return (TCGv_i64)n; }
-
-#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64((intptr_t)(V)))
-#define tcg_global_mem_new_ptr(R, O, N) \
- TCGV_NAT_TO_PTR(tcg_global_mem_new_i64((R), (O), (N)))
-#define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i64())
-#define tcg_temp_free_ptr(T) tcg_temp_free_i64(TCGV_PTR_TO_NAT(T))
-#endif
-
bool tcg_op_supported(TCGOpcode op);
void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args);
TCGv_i64 tcg_const_i64(int64_t val);
TCGv_i32 tcg_const_local_i32(int32_t val);
TCGv_i64 tcg_const_local_i64(int64_t val);
+TCGv_vec tcg_const_zeros_vec(TCGType);
+TCGv_vec tcg_const_ones_vec(TCGType);
+TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec);
+TCGv_vec tcg_const_ones_vec_matching(TCGv_vec);
+
+#if UINTPTR_MAX == UINT32_MAX
+# define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i32((intptr_t)(x)))
+# define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x)))
+#else
+# define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i64((intptr_t)(x)))
+# define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i64((intptr_t)(x)))
+#endif
TCGLabel *gen_new_label(void);
* to this default (which just calls the prologue.code emitted by
* tcg_target_qemu_prologue()).
*/
-#define TB_EXIT_MASK 3
-#define TB_EXIT_IDX0 0
-#define TB_EXIT_IDX1 1
+#define TB_EXIT_MASK 3
+#define TB_EXIT_IDX0 0
+#define TB_EXIT_IDX1 1
+#define TB_EXIT_IDXMAX 1
#define TB_EXIT_REQUESTED 3
#ifdef HAVE_TCG_QEMU_TB_EXEC
void tcg_register_jit(void *buf, size_t buf_size);
+#if TCG_TARGET_MAYBE_vec
+/* Return zero if the tuple (opc, type, vece) is unsupportable;
+ return > 0 if it is directly supportable;
+ return < 0 if we must call tcg_expand_vec_op. */
+int tcg_can_emit_vec_op(TCGOpcode, TCGType, unsigned);
+#else
+static inline int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve)
+{
+ return 0;
+}
+#endif
+
+/* Expand the tuple (opc, type, vece) on the given arguments. */
+void tcg_expand_vec_op(TCGOpcode, TCGType, unsigned, TCGArg, ...);
+
+/* Replicate a constant C accoring to the log2 of the element size. */
+uint64_t dup_const(unsigned vece, uint64_t c);
+
+#define dup_const(VECE, C) \
+ (__builtin_constant_p(VECE) \
+ ? ( (VECE) == MO_8 ? 0x0101010101010101ull * (uint8_t)(C) \
+ : (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C) \
+ : (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C) \
+ : dup_const(VECE, C)) \
+ : dup_const(VECE, C))
+
+
/*
* Memory helpers that will be used by TCG generated code.
*/
GEN_ATOMIC_HELPER_ALL(fetch_and)
GEN_ATOMIC_HELPER_ALL(fetch_or)
GEN_ATOMIC_HELPER_ALL(fetch_xor)
+GEN_ATOMIC_HELPER_ALL(fetch_smin)
+GEN_ATOMIC_HELPER_ALL(fetch_umin)
+GEN_ATOMIC_HELPER_ALL(fetch_smax)
+GEN_ATOMIC_HELPER_ALL(fetch_umax)
GEN_ATOMIC_HELPER_ALL(add_fetch)
GEN_ATOMIC_HELPER_ALL(sub_fetch)
GEN_ATOMIC_HELPER_ALL(and_fetch)
GEN_ATOMIC_HELPER_ALL(or_fetch)
GEN_ATOMIC_HELPER_ALL(xor_fetch)
+GEN_ATOMIC_HELPER_ALL(smin_fetch)
+GEN_ATOMIC_HELPER_ALL(umin_fetch)
+GEN_ATOMIC_HELPER_ALL(smax_fetch)
+GEN_ATOMIC_HELPER_ALL(umax_fetch)
GEN_ATOMIC_HELPER_ALL(xchg)