#define TCGV_HIGH TCGV_HIGH_link_error
#endif
-/* Note that this is optimized for sequential allocation during translate.
- Up to and including filling in the forward link immediately. We'll do
- proper termination of the end of the list after we finish translation. */
-
-static inline TCGOp *tcg_emit_op(TCGOpcode opc)
-{
- TCGContext *ctx = tcg_ctx;
- int oi = ctx->gen_next_op_idx;
- int ni = oi + 1;
- int pi = oi - 1;
- TCGOp *op = &ctx->gen_op_buf[oi];
-
- tcg_debug_assert(oi < OPC_BUF_SIZE);
- ctx->gen_op_buf[0].prev = oi;
- ctx->gen_next_op_idx = ni;
-
- memset(op, 0, offsetof(TCGOp, args));
- op->opc = opc;
- op->prev = pi;
- op->next = ni;
-
- return op;
-}
-
void tcg_gen_op1(TCGOpcode opc, TCGArg a1)
{
TCGOp *op = tcg_emit_op(opc);
}
}
-void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
+void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
{
TCGv_i32 t0;
/* Some cases can be optimized here. */
case 0:
tcg_gen_movi_i32(ret, 0);
return;
- case 0xffffffffu:
+ case -1:
tcg_gen_mov_i32(ret, arg1);
return;
- case 0xffu:
+ case 0xff:
/* Don't recurse with tcg_gen_ext8u_i32. */
if (TCG_TARGET_HAS_ext8u_i32) {
tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg1);
return;
}
break;
- case 0xffffu:
+ case 0xffff:
if (TCG_TARGET_HAS_ext16u_i32) {
tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg1);
return;
}
}
-void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
+void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
{
- tcg_debug_assert(arg2 < 32);
+ tcg_debug_assert(arg2 >= 0 && arg2 < 32);
if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1);
} else {
}
}
-void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
+void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
{
- tcg_debug_assert(arg2 < 32);
+ tcg_debug_assert(arg2 >= 0 && arg2 < 32);
if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1);
} else {
}
}
-void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
+void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
{
- tcg_debug_assert(arg2 < 32);
+ tcg_debug_assert(arg2 >= 0 && arg2 < 32);
if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1);
} else {
void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
{
- TCGv_i32 t0 = tcg_const_i32(arg2);
- tcg_gen_mul_i32(ret, arg1, t0);
- tcg_temp_free_i32(t0);
+ if (arg2 == 0) {
+ tcg_gen_movi_i32(ret, 0);
+ } else if (is_power_of_2(arg2)) {
+ tcg_gen_shli_i32(ret, arg1, ctz32(arg2));
+ } else {
+ TCGv_i32 t0 = tcg_const_i32(arg2);
+ tcg_gen_mul_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+ }
}
void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
}
}
+void tcg_gen_smin_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_movcond_i32(TCG_COND_LT, ret, a, b, a, b);
+}
+
+void tcg_gen_umin_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_movcond_i32(TCG_COND_LTU, ret, a, b, a, b);
+}
+
+void tcg_gen_smax_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_movcond_i32(TCG_COND_LT, ret, a, b, b, a);
+}
+
+void tcg_gen_umax_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_movcond_i32(TCG_COND_LTU, ret, a, b, b, a);
+}
+
/* 64-bit ops */
#if TCG_TARGET_REG_BITS == 32
}
}
-void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
+void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
{
TCGv_i64 t0;
case 0:
tcg_gen_movi_i64(ret, 0);
return;
- case 0xffffffffffffffffull:
+ case -1:
tcg_gen_mov_i64(ret, arg1);
return;
- case 0xffull:
+ case 0xff:
/* Don't recurse with tcg_gen_ext8u_i64. */
if (TCG_TARGET_HAS_ext8u_i64) {
tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg1);
return;
}
break;
- case 0xffffu:
+ case 0xffff:
if (TCG_TARGET_HAS_ext16u_i64) {
tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg1);
return;
}
break;
- case 0xffffffffull:
+ case 0xffffffffu:
if (TCG_TARGET_HAS_ext32u_i64) {
tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg1);
return;
}
}
-void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
+void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
{
- tcg_debug_assert(arg2 < 64);
+ tcg_debug_assert(arg2 >= 0 && arg2 < 64);
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_shifti_i64(ret, arg1, arg2, 0, 0);
} else if (arg2 == 0) {
}
}
-void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
+void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
{
- tcg_debug_assert(arg2 < 64);
+ tcg_debug_assert(arg2 >= 0 && arg2 < 64);
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_shifti_i64(ret, arg1, arg2, 1, 0);
} else if (arg2 == 0) {
}
}
-void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
+void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
{
- tcg_debug_assert(arg2 < 64);
+ tcg_debug_assert(arg2 >= 0 && arg2 < 64);
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_shifti_i64(ret, arg1, arg2, 1, 1);
} else if (arg2 == 0) {
void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
{
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_mul_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
+ if (arg2 == 0) {
+ tcg_gen_movi_i64(ret, 0);
+ } else if (is_power_of_2(arg2)) {
+ tcg_gen_shli_i64(ret, arg1, ctz64(arg2));
+ } else {
+ TCGv_i64 t0 = tcg_const_i64(arg2);
+ tcg_gen_mul_i64(ret, arg1, t0);
+ tcg_temp_free_i64(t0);
+ }
}
void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
tcg_temp_free_i64(t2);
}
+void tcg_gen_smin_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_movcond_i64(TCG_COND_LT, ret, a, b, a, b);
+}
+
+void tcg_gen_umin_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_movcond_i64(TCG_COND_LTU, ret, a, b, a, b);
+}
+
+void tcg_gen_smax_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_movcond_i64(TCG_COND_LT, ret, a, b, b, a);
+}
+
+void tcg_gen_umax_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_movcond_i64(TCG_COND_LTU, ret, a, b, b, a);
+}
+
/* Size changing operations. */
void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
/* QEMU specific operations. */
+void tcg_gen_exit_tb(TranslationBlock *tb, unsigned idx)
+{
+ uintptr_t val = (uintptr_t)tb + idx;
+
+ if (tb == NULL) {
+ tcg_debug_assert(idx == 0);
+ } else if (idx <= TB_EXIT_IDXMAX) {
+#ifdef CONFIG_DEBUG_TCG
+ /* This is an exit following a goto_tb. Verify that we have
+ seen this numbered exit before, via tcg_gen_goto_tb. */
+ tcg_debug_assert(tcg_ctx->goto_tb_issue_mask & (1 << idx));
+#endif
+ } else {
+ /* This is an exit via the exitreq label. */
+ tcg_debug_assert(idx == TB_EXIT_REQUESTED);
+ }
+
+ tcg_gen_op1i(INDEX_op_exit_tb, val);
+}
+
void tcg_gen_goto_tb(unsigned idx)
{
/* We only support two chained exits. */
- tcg_debug_assert(idx <= 1);
+ tcg_debug_assert(idx <= TB_EXIT_IDXMAX);
#ifdef CONFIG_DEBUG_TCG
/* Verify that we havn't seen this numbered exit before. */
tcg_debug_assert((tcg_ctx->goto_tb_issue_mask & (1 << idx)) == 0);
tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr));
tcg_temp_free_ptr(ptr);
} else {
- tcg_gen_exit_tb(0);
+ tcg_gen_exit_tb(NULL, 0);
}
}
GEN_ATOMIC_HELPER(fetch_and, and, 0)
GEN_ATOMIC_HELPER(fetch_or, or, 0)
GEN_ATOMIC_HELPER(fetch_xor, xor, 0)
+GEN_ATOMIC_HELPER(fetch_smin, smin, 0)
+GEN_ATOMIC_HELPER(fetch_umin, umin, 0)
+GEN_ATOMIC_HELPER(fetch_smax, smax, 0)
+GEN_ATOMIC_HELPER(fetch_umax, umax, 0)
GEN_ATOMIC_HELPER(add_fetch, add, 1)
GEN_ATOMIC_HELPER(and_fetch, and, 1)
GEN_ATOMIC_HELPER(or_fetch, or, 1)
GEN_ATOMIC_HELPER(xor_fetch, xor, 1)
+GEN_ATOMIC_HELPER(smin_fetch, smin, 1)
+GEN_ATOMIC_HELPER(umin_fetch, umin, 1)
+GEN_ATOMIC_HELPER(smax_fetch, smax, 1)
+GEN_ATOMIC_HELPER(umax_fetch, umax, 1)
static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b)
{