*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "tcg.h"
#define TCGV_HIGH TCGV_HIGH_link_error
#endif
-/* Note that this is optimized for sequential allocation during translate.
- Up to and including filling in the forward link immediately. We'll do
- proper termination of the end of the list after we finish translation. */
-
-static void tcg_emit_op(TCGContext *ctx, TCGOpcode opc, int args)
+void tcg_gen_op1(TCGOpcode opc, TCGArg a1)
{
- int oi = ctx->gen_next_op_idx;
- int ni = oi + 1;
- int pi = oi - 1;
-
- tcg_debug_assert(oi < OPC_BUF_SIZE);
- ctx->gen_op_buf[0].prev = oi;
- ctx->gen_next_op_idx = ni;
-
- ctx->gen_op_buf[oi] = (TCGOp){
- .opc = opc,
- .args = args,
- .prev = pi,
- .next = ni
- };
+ TCGOp *op = tcg_emit_op(opc);
+ op->args[0] = a1;
}
-void tcg_gen_op1(TCGContext *ctx, TCGOpcode opc, TCGArg a1)
+void tcg_gen_op2(TCGOpcode opc, TCGArg a1, TCGArg a2)
{
- int pi = ctx->gen_next_parm_idx;
-
- tcg_debug_assert(pi + 1 <= OPPARAM_BUF_SIZE);
- ctx->gen_next_parm_idx = pi + 1;
- ctx->gen_opparam_buf[pi] = a1;
-
- tcg_emit_op(ctx, opc, pi);
+ TCGOp *op = tcg_emit_op(opc);
+ op->args[0] = a1;
+ op->args[1] = a2;
}
-void tcg_gen_op2(TCGContext *ctx, TCGOpcode opc, TCGArg a1, TCGArg a2)
+void tcg_gen_op3(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3)
{
- int pi = ctx->gen_next_parm_idx;
-
- tcg_debug_assert(pi + 2 <= OPPARAM_BUF_SIZE);
- ctx->gen_next_parm_idx = pi + 2;
- ctx->gen_opparam_buf[pi + 0] = a1;
- ctx->gen_opparam_buf[pi + 1] = a2;
-
- tcg_emit_op(ctx, opc, pi);
+ TCGOp *op = tcg_emit_op(opc);
+ op->args[0] = a1;
+ op->args[1] = a2;
+ op->args[2] = a3;
}
-void tcg_gen_op3(TCGContext *ctx, TCGOpcode opc, TCGArg a1,
- TCGArg a2, TCGArg a3)
+void tcg_gen_op4(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, TCGArg a4)
{
- int pi = ctx->gen_next_parm_idx;
-
- tcg_debug_assert(pi + 3 <= OPPARAM_BUF_SIZE);
- ctx->gen_next_parm_idx = pi + 3;
- ctx->gen_opparam_buf[pi + 0] = a1;
- ctx->gen_opparam_buf[pi + 1] = a2;
- ctx->gen_opparam_buf[pi + 2] = a3;
-
- tcg_emit_op(ctx, opc, pi);
-}
-
-void tcg_gen_op4(TCGContext *ctx, TCGOpcode opc, TCGArg a1,
- TCGArg a2, TCGArg a3, TCGArg a4)
-{
- int pi = ctx->gen_next_parm_idx;
-
- tcg_debug_assert(pi + 4 <= OPPARAM_BUF_SIZE);
- ctx->gen_next_parm_idx = pi + 4;
- ctx->gen_opparam_buf[pi + 0] = a1;
- ctx->gen_opparam_buf[pi + 1] = a2;
- ctx->gen_opparam_buf[pi + 2] = a3;
- ctx->gen_opparam_buf[pi + 3] = a4;
-
- tcg_emit_op(ctx, opc, pi);
+ TCGOp *op = tcg_emit_op(opc);
+ op->args[0] = a1;
+ op->args[1] = a2;
+ op->args[2] = a3;
+ op->args[3] = a4;
}
-void tcg_gen_op5(TCGContext *ctx, TCGOpcode opc, TCGArg a1,
- TCGArg a2, TCGArg a3, TCGArg a4, TCGArg a5)
+void tcg_gen_op5(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3,
+ TCGArg a4, TCGArg a5)
{
- int pi = ctx->gen_next_parm_idx;
-
- tcg_debug_assert(pi + 5 <= OPPARAM_BUF_SIZE);
- ctx->gen_next_parm_idx = pi + 5;
- ctx->gen_opparam_buf[pi + 0] = a1;
- ctx->gen_opparam_buf[pi + 1] = a2;
- ctx->gen_opparam_buf[pi + 2] = a3;
- ctx->gen_opparam_buf[pi + 3] = a4;
- ctx->gen_opparam_buf[pi + 4] = a5;
-
- tcg_emit_op(ctx, opc, pi);
+ TCGOp *op = tcg_emit_op(opc);
+ op->args[0] = a1;
+ op->args[1] = a2;
+ op->args[2] = a3;
+ op->args[3] = a4;
+ op->args[4] = a5;
}
-void tcg_gen_op6(TCGContext *ctx, TCGOpcode opc, TCGArg a1, TCGArg a2,
- TCGArg a3, TCGArg a4, TCGArg a5, TCGArg a6)
+void tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3,
+ TCGArg a4, TCGArg a5, TCGArg a6)
{
- int pi = ctx->gen_next_parm_idx;
-
- tcg_debug_assert(pi + 6 <= OPPARAM_BUF_SIZE);
- ctx->gen_next_parm_idx = pi + 6;
- ctx->gen_opparam_buf[pi + 0] = a1;
- ctx->gen_opparam_buf[pi + 1] = a2;
- ctx->gen_opparam_buf[pi + 2] = a3;
- ctx->gen_opparam_buf[pi + 3] = a4;
- ctx->gen_opparam_buf[pi + 4] = a5;
- ctx->gen_opparam_buf[pi + 5] = a6;
-
- tcg_emit_op(ctx, opc, pi);
+ TCGOp *op = tcg_emit_op(opc);
+ op->args[0] = a1;
+ op->args[1] = a2;
+ op->args[2] = a3;
+ op->args[3] = a4;
+ op->args[4] = a5;
+ op->args[5] = a6;
}
void tcg_gen_mb(TCGBar mb_type)
{
- if (parallel_cpus) {
- tcg_gen_op1(&tcg_ctx, INDEX_op_mb, mb_type);
+ if (tcg_ctx->tb_cflags & CF_PARALLEL) {
+ tcg_gen_op1(INDEX_op_mb, mb_type);
}
}
}
}
-void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
+void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
{
TCGv_i32 t0;
/* Some cases can be optimized here. */
case 0:
tcg_gen_movi_i32(ret, 0);
return;
- case 0xffffffffu:
+ case -1:
tcg_gen_mov_i32(ret, arg1);
return;
- case 0xffu:
+ case 0xff:
/* Don't recurse with tcg_gen_ext8u_i32. */
if (TCG_TARGET_HAS_ext8u_i32) {
tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg1);
return;
}
break;
- case 0xffffu:
+ case 0xffff:
if (TCG_TARGET_HAS_ext16u_i32) {
tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg1);
return;
}
}
-void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
+void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
{
- tcg_debug_assert(arg2 < 32);
+ tcg_debug_assert(arg2 >= 0 && arg2 < 32);
if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1);
} else {
}
}
-void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
+void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
{
- tcg_debug_assert(arg2 < 32);
+ tcg_debug_assert(arg2 >= 0 && arg2 < 32);
if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1);
} else {
}
}
-void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
+void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
{
- tcg_debug_assert(arg2 < 32);
+ tcg_debug_assert(arg2 >= 0 && arg2 < 32);
if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1);
} else {
if (cond == TCG_COND_ALWAYS) {
tcg_gen_br(l);
} else if (cond != TCG_COND_NEVER) {
+ l->refs++;
tcg_gen_op4ii_i32(INDEX_op_brcond_i32, arg1, arg2, cond, label_arg(l));
}
}
void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
{
- TCGv_i32 t0 = tcg_const_i32(arg2);
- tcg_gen_mul_i32(ret, arg1, t0);
- tcg_temp_free_i32(t0);
+ if (arg2 == 0) {
+ tcg_gen_movi_i32(ret, 0);
+ } else if (is_power_of_2(arg2)) {
+ tcg_gen_shli_i32(ret, arg1, ctz32(arg2));
+ } else {
+ TCGv_i32 t0 = tcg_const_i32(arg2);
+ tcg_gen_mul_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+ }
}
void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
return;
}
- mask = (1u << len) - 1;
t1 = tcg_temp_new_i32();
+ if (TCG_TARGET_HAS_extract2_i32) {
+ if (ofs + len == 32) {
+ tcg_gen_shli_i32(t1, arg1, len);
+ tcg_gen_extract2_i32(ret, t1, arg2, len);
+ goto done;
+ }
+ if (ofs == 0) {
+ tcg_gen_extract2_i32(ret, arg1, arg2, len);
+ tcg_gen_rotli_i32(ret, ret, len);
+ goto done;
+ }
+ }
+
+ mask = (1u << len) - 1;
if (ofs + len < 32) {
tcg_gen_andi_i32(t1, arg2, mask);
tcg_gen_shli_i32(t1, t1, ofs);
}
tcg_gen_andi_i32(ret, arg1, ~(mask << ofs));
tcg_gen_or_i32(ret, ret, t1);
-
+ done:
tcg_temp_free_i32(t1);
}
tcg_gen_sari_i32(ret, ret, 32 - len);
}
+/*
+ * Extract 32-bits from a 64-bit input, ah:al, starting from ofs.
+ * Unlike tcg_gen_extract_i32 above, len is fixed at 32.
+ */
+void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah,
+ unsigned int ofs)
+{
+ tcg_debug_assert(ofs <= 32);
+ if (ofs == 0) {
+ tcg_gen_mov_i32(ret, al);
+ } else if (ofs == 32) {
+ tcg_gen_mov_i32(ret, ah);
+ } else if (al == ah) {
+ tcg_gen_rotri_i32(ret, al, ofs);
+ } else if (TCG_TARGET_HAS_extract2_i32) {
+ tcg_gen_op4i_i32(INDEX_op_extract2_i32, ret, al, ah, ofs);
+ } else {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t0, al, ofs);
+ tcg_gen_deposit_i32(ret, t0, ah, 32 - ofs, ofs);
+ tcg_temp_free_i32(t0);
+ }
+}
+
void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2)
{
if (TCG_TARGET_HAS_bswap32_i32) {
tcg_gen_op2_i32(INDEX_op_bswap32_i32, ret, arg);
} else {
- TCGv_i32 t0, t1;
- t0 = tcg_temp_new_i32();
- t1 = tcg_temp_new_i32();
-
- tcg_gen_shli_i32(t0, arg, 24);
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_const_i32(0x00ff00ff);
- tcg_gen_andi_i32(t1, arg, 0x0000ff00);
- tcg_gen_shli_i32(t1, t1, 8);
- tcg_gen_or_i32(t0, t0, t1);
+ /* arg = abcd */
+ tcg_gen_shri_i32(t0, arg, 8); /* t0 = .abc */
+ tcg_gen_and_i32(t1, arg, t2); /* t1 = .b.d */
+ tcg_gen_and_i32(t0, t0, t2); /* t0 = .a.c */
+ tcg_gen_shli_i32(t1, t1, 8); /* t1 = b.d. */
+ tcg_gen_or_i32(ret, t0, t1); /* ret = badc */
- tcg_gen_shri_i32(t1, arg, 8);
- tcg_gen_andi_i32(t1, t1, 0x0000ff00);
- tcg_gen_or_i32(t0, t0, t1);
+ tcg_gen_shri_i32(t0, ret, 16); /* t0 = ..ba */
+ tcg_gen_shli_i32(t1, ret, 16); /* t1 = dc.. */
+ tcg_gen_or_i32(ret, t0, t1); /* ret = dcba */
- tcg_gen_shri_i32(t1, arg, 24);
- tcg_gen_or_i32(ret, t0, t1);
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
}
}
+void tcg_gen_smin_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_movcond_i32(TCG_COND_LT, ret, a, b, a, b);
+}
+
+void tcg_gen_umin_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_movcond_i32(TCG_COND_LTU, ret, a, b, a, b);
+}
+
+void tcg_gen_smax_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_movcond_i32(TCG_COND_LT, ret, a, b, b, a);
+}
+
+void tcg_gen_umax_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_movcond_i32(TCG_COND_LTU, ret, a, b, b, a);
+}
+
+void tcg_gen_abs_i32(TCGv_i32 ret, TCGv_i32 a)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_sari_i32(t, a, 31);
+ tcg_gen_xor_i32(ret, a, t);
+ tcg_gen_sub_i32(ret, ret, t);
+ tcg_temp_free_i32(t);
+}
+
/* 64-bit ops */
#if TCG_TARGET_REG_BITS == 32
}
}
-void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
+void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
{
TCGv_i64 t0;
case 0:
tcg_gen_movi_i64(ret, 0);
return;
- case 0xffffffffffffffffull:
+ case -1:
tcg_gen_mov_i64(ret, arg1);
return;
- case 0xffull:
+ case 0xff:
/* Don't recurse with tcg_gen_ext8u_i64. */
if (TCG_TARGET_HAS_ext8u_i64) {
tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg1);
return;
}
break;
- case 0xffffu:
+ case 0xffff:
if (TCG_TARGET_HAS_ext16u_i64) {
tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg1);
return;
}
break;
- case 0xffffffffull:
+ case 0xffffffffu:
if (TCG_TARGET_HAS_ext32u_i64) {
tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg1);
return;
tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), c);
tcg_gen_movi_i32(TCGV_LOW(ret), 0);
}
- } else {
- TCGv_i32 t0, t1;
-
- t0 = tcg_temp_new_i32();
- t1 = tcg_temp_new_i32();
- if (right) {
- tcg_gen_shli_i32(t0, TCGV_HIGH(arg1), 32 - c);
- if (arith) {
- tcg_gen_sari_i32(t1, TCGV_HIGH(arg1), c);
- } else {
- tcg_gen_shri_i32(t1, TCGV_HIGH(arg1), c);
- }
+ } else if (right) {
+ if (TCG_TARGET_HAS_extract2_i32) {
+ tcg_gen_extract2_i32(TCGV_LOW(ret),
+ TCGV_LOW(arg1), TCGV_HIGH(arg1), c);
+ } else {
tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
- tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t0);
- tcg_gen_mov_i32(TCGV_HIGH(ret), t1);
+ tcg_gen_deposit_i32(TCGV_LOW(ret), TCGV_LOW(ret),
+ TCGV_HIGH(arg1), 32 - c, c);
+ }
+ if (arith) {
+ tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
} else {
+ tcg_gen_shri_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
+ }
+ } else {
+ if (TCG_TARGET_HAS_extract2_i32) {
+ tcg_gen_extract2_i32(TCGV_HIGH(ret),
+ TCGV_LOW(arg1), TCGV_HIGH(arg1), 32 - c);
+ } else {
+ TCGv_i32 t0 = tcg_temp_new_i32();
tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c);
- /* Note: ret can be the same as arg1, so we use t1 */
- tcg_gen_shli_i32(t1, TCGV_LOW(arg1), c);
- tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
- tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t0);
- tcg_gen_mov_i32(TCGV_LOW(ret), t1);
+ tcg_gen_deposit_i32(TCGV_HIGH(ret), t0,
+ TCGV_HIGH(arg1), c, 32 - c);
+ tcg_temp_free_i32(t0);
}
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
+ tcg_gen_shli_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
}
}
-void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
+void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
{
- tcg_debug_assert(arg2 < 64);
+ tcg_debug_assert(arg2 >= 0 && arg2 < 64);
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_shifti_i64(ret, arg1, arg2, 0, 0);
} else if (arg2 == 0) {
}
}
-void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
+void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
{
- tcg_debug_assert(arg2 < 64);
+ tcg_debug_assert(arg2 >= 0 && arg2 < 64);
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_shifti_i64(ret, arg1, arg2, 1, 0);
} else if (arg2 == 0) {
}
}
-void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
+void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
{
- tcg_debug_assert(arg2 < 64);
+ tcg_debug_assert(arg2 >= 0 && arg2 < 64);
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_shifti_i64(ret, arg1, arg2, 1, 1);
} else if (arg2 == 0) {
if (cond == TCG_COND_ALWAYS) {
tcg_gen_br(l);
} else if (cond != TCG_COND_NEVER) {
+ l->refs++;
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1),
TCGV_HIGH(arg1), TCGV_LOW(arg2),
void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
{
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_mul_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
+ if (arg2 == 0) {
+ tcg_gen_movi_i64(ret, 0);
+ } else if (is_power_of_2(arg2)) {
+ tcg_gen_shli_i64(ret, arg1, ctz64(arg2));
+ } else {
+ TCGv_i64 t0 = tcg_const_i64(arg2);
+ tcg_gen_mul_i64(ret, arg1, t0);
+ tcg_temp_free_i64(t0);
+ }
}
void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
} else if (TCG_TARGET_HAS_bswap32_i64) {
tcg_gen_op2_i64(INDEX_op_bswap32_i64, ret, arg);
} else {
- TCGv_i64 t0, t1;
- t0 = tcg_temp_new_i64();
- t1 = tcg_temp_new_i64();
-
- tcg_gen_shli_i64(t0, arg, 24);
- tcg_gen_ext32u_i64(t0, t0);
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_const_i64(0x00ff00ff);
- tcg_gen_andi_i64(t1, arg, 0x0000ff00);
- tcg_gen_shli_i64(t1, t1, 8);
- tcg_gen_or_i64(t0, t0, t1);
+ /* arg = ....abcd */
+ tcg_gen_shri_i64(t0, arg, 8); /* t0 = .....abc */
+ tcg_gen_and_i64(t1, arg, t2); /* t1 = .....b.d */
+ tcg_gen_and_i64(t0, t0, t2); /* t0 = .....a.c */
+ tcg_gen_shli_i64(t1, t1, 8); /* t1 = ....b.d. */
+ tcg_gen_or_i64(ret, t0, t1); /* ret = ....badc */
- tcg_gen_shri_i64(t1, arg, 8);
- tcg_gen_andi_i64(t1, t1, 0x0000ff00);
- tcg_gen_or_i64(t0, t0, t1);
+ tcg_gen_shli_i64(t1, ret, 48); /* t1 = dc...... */
+ tcg_gen_shri_i64(t0, ret, 16); /* t0 = ......ba */
+ tcg_gen_shri_i64(t1, t1, 32); /* t1 = ....dc.. */
+ tcg_gen_or_i64(ret, t0, t1); /* ret = ....dcba */
- tcg_gen_shri_i64(t1, arg, 24);
- tcg_gen_or_i64(ret, t0, t1);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
}
}
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
- tcg_gen_shli_i64(t0, arg, 56);
-
- tcg_gen_andi_i64(t1, arg, 0x0000ff00);
- tcg_gen_shli_i64(t1, t1, 40);
- tcg_gen_or_i64(t0, t0, t1);
-
- tcg_gen_andi_i64(t1, arg, 0x00ff0000);
- tcg_gen_shli_i64(t1, t1, 24);
- tcg_gen_or_i64(t0, t0, t1);
-
- tcg_gen_andi_i64(t1, arg, 0xff000000);
- tcg_gen_shli_i64(t1, t1, 8);
- tcg_gen_or_i64(t0, t0, t1);
-
- tcg_gen_shri_i64(t1, arg, 8);
- tcg_gen_andi_i64(t1, t1, 0xff000000);
- tcg_gen_or_i64(t0, t0, t1);
-
- tcg_gen_shri_i64(t1, arg, 24);
- tcg_gen_andi_i64(t1, t1, 0x00ff0000);
- tcg_gen_or_i64(t0, t0, t1);
-
- tcg_gen_shri_i64(t1, arg, 40);
- tcg_gen_andi_i64(t1, t1, 0x0000ff00);
- tcg_gen_or_i64(t0, t0, t1);
+ /* arg = abcdefgh */
+ tcg_gen_movi_i64(t2, 0x00ff00ff00ff00ffull);
+ tcg_gen_shri_i64(t0, arg, 8); /* t0 = .abcdefg */
+ tcg_gen_and_i64(t1, arg, t2); /* t1 = .b.d.f.h */
+ tcg_gen_and_i64(t0, t0, t2); /* t0 = .a.c.e.g */
+ tcg_gen_shli_i64(t1, t1, 8); /* t1 = b.d.f.h. */
+ tcg_gen_or_i64(ret, t0, t1); /* ret = badcfehg */
+
+ tcg_gen_movi_i64(t2, 0x0000ffff0000ffffull);
+ tcg_gen_shri_i64(t0, ret, 16); /* t0 = ..badcfe */
+ tcg_gen_and_i64(t1, ret, t2); /* t1 = ..dc..hg */
+ tcg_gen_and_i64(t0, t0, t2); /* t0 = ..ba..fe */
+ tcg_gen_shli_i64(t1, t1, 16); /* t1 = dc..hg.. */
+ tcg_gen_or_i64(ret, t0, t1); /* ret = dcbahgfe */
+
+ tcg_gen_shri_i64(t0, ret, 32); /* t0 = ....dcba */
+ tcg_gen_shli_i64(t1, ret, 32); /* t1 = hgfe.... */
+ tcg_gen_or_i64(ret, t0, t1); /* ret = hgfedcba */
- tcg_gen_shri_i64(t1, arg, 56);
- tcg_gen_or_i64(ret, t0, t1);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
}
}
}
}
- mask = (1ull << len) - 1;
t1 = tcg_temp_new_i64();
+ if (TCG_TARGET_HAS_extract2_i64) {
+ if (ofs + len == 64) {
+ tcg_gen_shli_i64(t1, arg1, len);
+ tcg_gen_extract2_i64(ret, t1, arg2, len);
+ goto done;
+ }
+ if (ofs == 0) {
+ tcg_gen_extract2_i64(ret, arg1, arg2, len);
+ tcg_gen_rotli_i64(ret, ret, len);
+ goto done;
+ }
+ }
+
+ mask = (1ull << len) - 1;
if (ofs + len < 64) {
tcg_gen_andi_i64(t1, arg2, mask);
tcg_gen_shli_i64(t1, t1, ofs);
}
tcg_gen_andi_i64(ret, arg1, ~(mask << ofs));
tcg_gen_or_i64(ret, ret, t1);
-
+ done:
tcg_temp_free_i64(t1);
}
tcg_gen_sari_i64(ret, ret, 64 - len);
}
+/*
+ * Extract 64 bits from a 128-bit input, ah:al, starting from ofs.
+ * Unlike tcg_gen_extract_i64 above, len is fixed at 64.
+ */
+void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah,
+ unsigned int ofs)
+{
+ tcg_debug_assert(ofs <= 64);
+ if (ofs == 0) {
+ tcg_gen_mov_i64(ret, al);
+ } else if (ofs == 64) {
+ tcg_gen_mov_i64(ret, ah);
+ } else if (al == ah) {
+ tcg_gen_rotri_i64(ret, al, ofs);
+ } else if (TCG_TARGET_HAS_extract2_i64) {
+ tcg_gen_op4i_i64(INDEX_op_extract2_i64, ret, al, ah, ofs);
+ } else {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t0, al, ofs);
+ tcg_gen_deposit_i64(ret, t0, ah, 64 - ofs, ofs);
+ tcg_temp_free_i64(t0);
+ }
+}
+
void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2)
{
tcg_temp_free_i64(t2);
}
+void tcg_gen_smin_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_movcond_i64(TCG_COND_LT, ret, a, b, a, b);
+}
+
+void tcg_gen_umin_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_movcond_i64(TCG_COND_LTU, ret, a, b, a, b);
+}
+
+void tcg_gen_smax_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_movcond_i64(TCG_COND_LT, ret, a, b, b, a);
+}
+
+void tcg_gen_umax_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_movcond_i64(TCG_COND_LTU, ret, a, b, b, a);
+}
+
+void tcg_gen_abs_i64(TCGv_i64 ret, TCGv_i64 a)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_sari_i64(t, a, 63);
+ tcg_gen_xor_i64(ret, a, t);
+ tcg_gen_sub_i64(ret, ret, t);
+ tcg_temp_free_i64(t);
+}
+
/* Size changing operations. */
void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_mov_i32(ret, TCGV_LOW(arg));
} else if (TCG_TARGET_HAS_extrl_i64_i32) {
- tcg_gen_op2(&tcg_ctx, INDEX_op_extrl_i64_i32,
- GET_TCGV_I32(ret), GET_TCGV_I64(arg));
+ tcg_gen_op2(INDEX_op_extrl_i64_i32,
+ tcgv_i32_arg(ret), tcgv_i64_arg(arg));
} else {
- tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(arg)));
+ tcg_gen_mov_i32(ret, (TCGv_i32)arg);
}
}
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_mov_i32(ret, TCGV_HIGH(arg));
} else if (TCG_TARGET_HAS_extrh_i64_i32) {
- tcg_gen_op2(&tcg_ctx, INDEX_op_extrh_i64_i32,
- GET_TCGV_I32(ret), GET_TCGV_I64(arg));
+ tcg_gen_op2(INDEX_op_extrh_i64_i32,
+ tcgv_i32_arg(ret), tcgv_i64_arg(arg));
} else {
TCGv_i64 t = tcg_temp_new_i64();
tcg_gen_shri_i64(t, arg, 32);
- tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(t)));
+ tcg_gen_mov_i32(ret, (TCGv_i32)t);
tcg_temp_free_i64(t);
}
}
tcg_gen_mov_i32(TCGV_LOW(ret), arg);
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
} else {
- tcg_gen_op2(&tcg_ctx, INDEX_op_extu_i32_i64,
- GET_TCGV_I64(ret), GET_TCGV_I32(arg));
+ tcg_gen_op2(INDEX_op_extu_i32_i64,
+ tcgv_i64_arg(ret), tcgv_i32_arg(arg));
}
}
tcg_gen_mov_i32(TCGV_LOW(ret), arg);
tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
} else {
- tcg_gen_op2(&tcg_ctx, INDEX_op_ext_i32_i64,
- GET_TCGV_I64(ret), GET_TCGV_I32(arg));
+ tcg_gen_op2(INDEX_op_ext_i32_i64,
+ tcgv_i64_arg(ret), tcgv_i32_arg(arg));
}
}
/* QEMU specific operations. */
+void tcg_gen_exit_tb(TranslationBlock *tb, unsigned idx)
+{
+ uintptr_t val = (uintptr_t)tb + idx;
+
+ if (tb == NULL) {
+ tcg_debug_assert(idx == 0);
+ } else if (idx <= TB_EXIT_IDXMAX) {
+#ifdef CONFIG_DEBUG_TCG
+ /* This is an exit following a goto_tb. Verify that we have
+ seen this numbered exit before, via tcg_gen_goto_tb. */
+ tcg_debug_assert(tcg_ctx->goto_tb_issue_mask & (1 << idx));
+#endif
+ /* When not chaining, exit without indicating a link. */
+ if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
+ val = 0;
+ }
+ } else {
+ /* This is an exit via the exitreq label. */
+ tcg_debug_assert(idx == TB_EXIT_REQUESTED);
+ }
+
+ tcg_gen_op1i(INDEX_op_exit_tb, val);
+}
+
void tcg_gen_goto_tb(unsigned idx)
{
/* We only support two chained exits. */
- tcg_debug_assert(idx <= 1);
+ tcg_debug_assert(idx <= TB_EXIT_IDXMAX);
#ifdef CONFIG_DEBUG_TCG
/* Verify that we havn't seen this numbered exit before. */
- tcg_debug_assert((tcg_ctx.goto_tb_issue_mask & (1 << idx)) == 0);
- tcg_ctx.goto_tb_issue_mask |= 1 << idx;
+ tcg_debug_assert((tcg_ctx->goto_tb_issue_mask & (1 << idx)) == 0);
+ tcg_ctx->goto_tb_issue_mask |= 1 << idx;
#endif
- tcg_gen_op1i(INDEX_op_goto_tb, idx);
+ /* When not chaining, we simply fall through to the "fallback" exit. */
+ if (!qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
+ tcg_gen_op1i(INDEX_op_goto_tb, idx);
+ }
}
void tcg_gen_lookup_and_goto_ptr(void)
{
if (TCG_TARGET_HAS_goto_ptr && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
TCGv_ptr ptr = tcg_temp_new_ptr();
- gen_helper_lookup_tb_ptr(ptr, tcg_ctx.tcg_env);
- tcg_gen_op1i(INDEX_op_goto_ptr, GET_TCGV_PTR(ptr));
+ gen_helper_lookup_tb_ptr(ptr, cpu_env);
+ tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr));
tcg_temp_free_ptr(ptr);
} else {
- tcg_gen_exit_tb(0);
+ tcg_gen_exit_tb(NULL, 0);
}
}
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_op4i_i32(opc, val, TCGV_LOW(addr), TCGV_HIGH(addr), oi);
} else {
- tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I32(val), GET_TCGV_I64(addr), oi);
+ tcg_gen_op3(opc, tcgv_i32_arg(val), tcgv_i64_arg(addr), oi);
}
#endif
}
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi);
} else {
- tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I64(val), GET_TCGV_I32(addr), oi);
+ tcg_gen_op3(opc, tcgv_i64_arg(val), tcgv_i32_arg(addr), oi);
}
#else
if (TCG_TARGET_REG_BITS == 32) {
void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
+ TCGMemOp orig_memop;
+
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 0, 0);
- trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
+ trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, 0));
+
+ orig_memop = memop;
+ if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
+ memop &= ~MO_BSWAP;
+ /* The bswap primitive requires zero-extended input. */
+ if ((memop & MO_SSIZE) == MO_SW) {
+ memop &= ~MO_SIGN;
+ }
+ }
+
gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx);
+
+ if ((orig_memop ^ memop) & MO_BSWAP) {
+ switch (orig_memop & MO_SIZE) {
+ case MO_16:
+ tcg_gen_bswap16_i32(val, val);
+ if (orig_memop & MO_SIGN) {
+ tcg_gen_ext16s_i32(val, val);
+ }
+ break;
+ case MO_32:
+ tcg_gen_bswap32_i32(val, val);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
}
void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
+ TCGv_i32 swap = NULL;
+
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 0, 1);
- trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
+ trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, 1));
+
+ if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
+ swap = tcg_temp_new_i32();
+ switch (memop & MO_SIZE) {
+ case MO_16:
+ tcg_gen_ext16u_i32(swap, val);
+ tcg_gen_bswap16_i32(swap, swap);
+ break;
+ case MO_32:
+ tcg_gen_bswap32_i32(swap, val);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ val = swap;
+ memop &= ~MO_BSWAP;
+ }
+
gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
+
+ if (swap) {
+ tcg_temp_free_i32(swap);
+ }
}
void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
- tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ TCGMemOp orig_memop;
+
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
if (memop & MO_SIGN) {
return;
}
+ tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 1, 0);
- trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
+ trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, 0));
+
+ orig_memop = memop;
+ if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
+ memop &= ~MO_BSWAP;
+ /* The bswap primitive requires zero-extended input. */
+ if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) {
+ memop &= ~MO_SIGN;
+ }
+ }
+
gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx);
+
+ if ((orig_memop ^ memop) & MO_BSWAP) {
+ switch (orig_memop & MO_SIZE) {
+ case MO_16:
+ tcg_gen_bswap16_i64(val, val);
+ if (orig_memop & MO_SIGN) {
+ tcg_gen_ext16s_i64(val, val);
+ }
+ break;
+ case MO_32:
+ tcg_gen_bswap32_i64(val, val);
+ if (orig_memop & MO_SIGN) {
+ tcg_gen_ext32s_i64(val, val);
+ }
+ break;
+ case MO_64:
+ tcg_gen_bswap64_i64(val, val);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
}
void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
- tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ TCGv_i64 swap = NULL;
+
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop);
return;
}
+ tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 1, 1);
- trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
+ trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, 1));
+
+ if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
+ swap = tcg_temp_new_i64();
+ switch (memop & MO_SIZE) {
+ case MO_16:
+ tcg_gen_ext16u_i64(swap, val);
+ tcg_gen_bswap16_i64(swap, swap);
+ break;
+ case MO_32:
+ tcg_gen_ext32u_i64(swap, val);
+ tcg_gen_bswap32_i64(swap, swap);
+ break;
+ case MO_64:
+ tcg_gen_bswap64_i64(swap, val);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ val = swap;
+ memop &= ~MO_BSWAP;
+ }
+
gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx);
+
+ if (swap) {
+ tcg_temp_free_i64(swap);
+ }
}
static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, TCGMemOp opc)
{
memop = tcg_canonicalize_memop(memop, 0, 0);
- if (!parallel_cpus) {
+ if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) {
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_temp_new_i32();
#ifdef CONFIG_SOFTMMU
{
TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
- gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv, oi);
+ gen(retv, cpu_env, addr, cmpv, newv, oi);
tcg_temp_free_i32(oi);
}
#else
- gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv);
+ gen(retv, cpu_env, addr, cmpv, newv);
#endif
if (memop & MO_SIGN) {
{
memop = tcg_canonicalize_memop(memop, 1, 0);
- if (!parallel_cpus) {
+ if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) {
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
#ifdef CONFIG_SOFTMMU
{
TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop, idx));
- gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv, oi);
+ gen(retv, cpu_env, addr, cmpv, newv, oi);
tcg_temp_free_i32(oi);
}
#else
- gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv);
+ gen(retv, cpu_env, addr, cmpv, newv);
#endif
#else
- gen_helper_exit_atomic(tcg_ctx.tcg_env);
+ gen_helper_exit_atomic(cpu_env);
/* Produce a result, so that we have a well-formed opcode stream
with respect to uses of the result in the (dead) code following. */
tcg_gen_movi_i64(retv, 0);
#ifdef CONFIG_SOFTMMU
{
TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
- gen(ret, tcg_ctx.tcg_env, addr, val, oi);
+ gen(ret, cpu_env, addr, val, oi);
tcg_temp_free_i32(oi);
}
#else
- gen(ret, tcg_ctx.tcg_env, addr, val);
+ gen(ret, cpu_env, addr, val);
#endif
if (memop & MO_SIGN) {
#ifdef CONFIG_SOFTMMU
{
TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
- gen(ret, tcg_ctx.tcg_env, addr, val, oi);
+ gen(ret, cpu_env, addr, val, oi);
tcg_temp_free_i32(oi);
}
#else
- gen(ret, tcg_ctx.tcg_env, addr, val);
+ gen(ret, cpu_env, addr, val);
#endif
#else
- gen_helper_exit_atomic(tcg_ctx.tcg_env);
+ gen_helper_exit_atomic(cpu_env);
/* Produce a result, so that we have a well-formed opcode stream
with respect to uses of the result in the (dead) code following. */
tcg_gen_movi_i64(ret, 0);
void tcg_gen_atomic_##NAME##_i32 \
(TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \
{ \
- if (parallel_cpus) { \
+ if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
} else { \
do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \
void tcg_gen_atomic_##NAME##_i64 \
(TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \
{ \
- if (parallel_cpus) { \
+ if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \
} else { \
do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \
GEN_ATOMIC_HELPER(fetch_and, and, 0)
GEN_ATOMIC_HELPER(fetch_or, or, 0)
GEN_ATOMIC_HELPER(fetch_xor, xor, 0)
+GEN_ATOMIC_HELPER(fetch_smin, smin, 0)
+GEN_ATOMIC_HELPER(fetch_umin, umin, 0)
+GEN_ATOMIC_HELPER(fetch_smax, smax, 0)
+GEN_ATOMIC_HELPER(fetch_umax, umax, 0)
GEN_ATOMIC_HELPER(add_fetch, add, 1)
GEN_ATOMIC_HELPER(and_fetch, and, 1)
GEN_ATOMIC_HELPER(or_fetch, or, 1)
GEN_ATOMIC_HELPER(xor_fetch, xor, 1)
+GEN_ATOMIC_HELPER(smin_fetch, smin, 1)
+GEN_ATOMIC_HELPER(umin_fetch, umin, 1)
+GEN_ATOMIC_HELPER(smax_fetch, smax, 1)
+GEN_ATOMIC_HELPER(umax_fetch, umax, 1)
static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b)
{