*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "tcg.h"
if (cond == TCG_COND_ALWAYS) {
tcg_gen_br(l);
} else if (cond != TCG_COND_NEVER) {
+ l->refs++;
tcg_gen_op4ii_i32(INDEX_op_brcond_i32, arg1, arg2, cond, label_arg(l));
}
}
return;
}
- mask = (1u << len) - 1;
t1 = tcg_temp_new_i32();
+ if (TCG_TARGET_HAS_extract2_i32) {
+ if (ofs + len == 32) {
+ tcg_gen_shli_i32(t1, arg1, len);
+ tcg_gen_extract2_i32(ret, t1, arg2, len);
+ goto done;
+ }
+ if (ofs == 0) {
+ tcg_gen_extract2_i32(ret, arg1, arg2, len);
+ tcg_gen_rotli_i32(ret, ret, len);
+ goto done;
+ }
+ }
+
+ mask = (1u << len) - 1;
if (ofs + len < 32) {
tcg_gen_andi_i32(t1, arg2, mask);
tcg_gen_shli_i32(t1, t1, ofs);
}
tcg_gen_andi_i32(ret, arg1, ~(mask << ofs));
tcg_gen_or_i32(ret, ret, t1);
-
+ done:
tcg_temp_free_i32(t1);
}
tcg_gen_sari_i32(ret, ret, 32 - len);
}
+/*
+ * Extract 32-bits from a 64-bit input, ah:al, starting from ofs.
+ * Unlike tcg_gen_extract_i32 above, len is fixed at 32.
+ */
+void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah,
+ unsigned int ofs)
+{
+ tcg_debug_assert(ofs <= 32);
+ if (ofs == 0) {
+ tcg_gen_mov_i32(ret, al);
+ } else if (ofs == 32) {
+ tcg_gen_mov_i32(ret, ah);
+ } else if (al == ah) {
+ tcg_gen_rotri_i32(ret, al, ofs);
+ } else if (TCG_TARGET_HAS_extract2_i32) {
+ tcg_gen_op4i_i32(INDEX_op_extract2_i32, ret, al, ah, ofs);
+ } else {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t0, al, ofs);
+ tcg_gen_deposit_i32(ret, t0, ah, 32 - ofs, ofs);
+ tcg_temp_free_i32(t0);
+ }
+}
+
void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2)
{
tcg_gen_movcond_i32(TCG_COND_LTU, ret, a, b, b, a);
}
+void tcg_gen_abs_i32(TCGv_i32 ret, TCGv_i32 a)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_sari_i32(t, a, 31);
+ tcg_gen_xor_i32(ret, a, t);
+ tcg_gen_sub_i32(ret, ret, t);
+ tcg_temp_free_i32(t);
+}
+
/* 64-bit ops */
#if TCG_TARGET_REG_BITS == 32
tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), c);
tcg_gen_movi_i32(TCGV_LOW(ret), 0);
}
- } else {
- TCGv_i32 t0, t1;
-
- t0 = tcg_temp_new_i32();
- t1 = tcg_temp_new_i32();
- if (right) {
- tcg_gen_shli_i32(t0, TCGV_HIGH(arg1), 32 - c);
- if (arith) {
- tcg_gen_sari_i32(t1, TCGV_HIGH(arg1), c);
- } else {
- tcg_gen_shri_i32(t1, TCGV_HIGH(arg1), c);
- }
+ } else if (right) {
+ if (TCG_TARGET_HAS_extract2_i32) {
+ tcg_gen_extract2_i32(TCGV_LOW(ret),
+ TCGV_LOW(arg1), TCGV_HIGH(arg1), c);
+ } else {
tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
- tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t0);
- tcg_gen_mov_i32(TCGV_HIGH(ret), t1);
+ tcg_gen_deposit_i32(TCGV_LOW(ret), TCGV_LOW(ret),
+ TCGV_HIGH(arg1), 32 - c, c);
+ }
+ if (arith) {
+ tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
} else {
+ tcg_gen_shri_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
+ }
+ } else {
+ if (TCG_TARGET_HAS_extract2_i32) {
+ tcg_gen_extract2_i32(TCGV_HIGH(ret),
+ TCGV_LOW(arg1), TCGV_HIGH(arg1), 32 - c);
+ } else {
+ TCGv_i32 t0 = tcg_temp_new_i32();
tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c);
- /* Note: ret can be the same as arg1, so we use t1 */
- tcg_gen_shli_i32(t1, TCGV_LOW(arg1), c);
- tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
- tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t0);
- tcg_gen_mov_i32(TCGV_LOW(ret), t1);
+ tcg_gen_deposit_i32(TCGV_HIGH(ret), t0,
+ TCGV_HIGH(arg1), c, 32 - c);
+ tcg_temp_free_i32(t0);
}
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
+ tcg_gen_shli_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
}
}
if (cond == TCG_COND_ALWAYS) {
tcg_gen_br(l);
} else if (cond != TCG_COND_NEVER) {
+ l->refs++;
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1),
TCGV_HIGH(arg1), TCGV_LOW(arg2),
}
}
- mask = (1ull << len) - 1;
t1 = tcg_temp_new_i64();
+ if (TCG_TARGET_HAS_extract2_i64) {
+ if (ofs + len == 64) {
+ tcg_gen_shli_i64(t1, arg1, len);
+ tcg_gen_extract2_i64(ret, t1, arg2, len);
+ goto done;
+ }
+ if (ofs == 0) {
+ tcg_gen_extract2_i64(ret, arg1, arg2, len);
+ tcg_gen_rotli_i64(ret, ret, len);
+ goto done;
+ }
+ }
+
+ mask = (1ull << len) - 1;
if (ofs + len < 64) {
tcg_gen_andi_i64(t1, arg2, mask);
tcg_gen_shli_i64(t1, t1, ofs);
}
tcg_gen_andi_i64(ret, arg1, ~(mask << ofs));
tcg_gen_or_i64(ret, ret, t1);
-
+ done:
tcg_temp_free_i64(t1);
}
tcg_gen_sari_i64(ret, ret, 64 - len);
}
+/*
+ * Extract 64 bits from a 128-bit input, ah:al, starting from ofs.
+ * Unlike tcg_gen_extract_i64 above, len is fixed at 64.
+ */
+void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah,
+ unsigned int ofs)
+{
+ tcg_debug_assert(ofs <= 64);
+ if (ofs == 0) {
+ tcg_gen_mov_i64(ret, al);
+ } else if (ofs == 64) {
+ tcg_gen_mov_i64(ret, ah);
+ } else if (al == ah) {
+ tcg_gen_rotri_i64(ret, al, ofs);
+ } else if (TCG_TARGET_HAS_extract2_i64) {
+ tcg_gen_op4i_i64(INDEX_op_extract2_i64, ret, al, ah, ofs);
+ } else {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t0, al, ofs);
+ tcg_gen_deposit_i64(ret, t0, ah, 64 - ofs, ofs);
+ tcg_temp_free_i64(t0);
+ }
+}
+
void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2)
{
tcg_gen_movcond_i64(TCG_COND_LTU, ret, a, b, b, a);
}
+void tcg_gen_abs_i64(TCGv_i64 ret, TCGv_i64 a)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_sari_i64(t, a, 63);
+ tcg_gen_xor_i64(ret, a, t);
+ tcg_gen_sub_i64(ret, ret, t);
+ tcg_temp_free_i64(t);
+}
+
/* Size changing operations. */
void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
+ TCGMemOp orig_memop;
+
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 0, 0);
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, 0));
+
+ orig_memop = memop;
+ if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
+ memop &= ~MO_BSWAP;
+ /* The bswap primitive requires zero-extended input. */
+ if ((memop & MO_SSIZE) == MO_SW) {
+ memop &= ~MO_SIGN;
+ }
+ }
+
gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx);
+
+ if ((orig_memop ^ memop) & MO_BSWAP) {
+ switch (orig_memop & MO_SIZE) {
+ case MO_16:
+ tcg_gen_bswap16_i32(val, val);
+ if (orig_memop & MO_SIGN) {
+ tcg_gen_ext16s_i32(val, val);
+ }
+ break;
+ case MO_32:
+ tcg_gen_bswap32_i32(val, val);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
}
void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
+ TCGv_i32 swap = NULL;
+
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 0, 1);
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, 1));
+
+ if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
+ swap = tcg_temp_new_i32();
+ switch (memop & MO_SIZE) {
+ case MO_16:
+ tcg_gen_ext16u_i32(swap, val);
+ tcg_gen_bswap16_i32(swap, swap);
+ break;
+ case MO_32:
+ tcg_gen_bswap32_i32(swap, val);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ val = swap;
+ memop &= ~MO_BSWAP;
+ }
+
gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
+
+ if (swap) {
+ tcg_temp_free_i32(swap);
+ }
}
void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
- tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ TCGMemOp orig_memop;
+
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
if (memop & MO_SIGN) {
return;
}
+ tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 1, 0);
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, 0));
+
+ orig_memop = memop;
+ if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
+ memop &= ~MO_BSWAP;
+ /* The bswap primitive requires zero-extended input. */
+ if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) {
+ memop &= ~MO_SIGN;
+ }
+ }
+
gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx);
+
+ if ((orig_memop ^ memop) & MO_BSWAP) {
+ switch (orig_memop & MO_SIZE) {
+ case MO_16:
+ tcg_gen_bswap16_i64(val, val);
+ if (orig_memop & MO_SIGN) {
+ tcg_gen_ext16s_i64(val, val);
+ }
+ break;
+ case MO_32:
+ tcg_gen_bswap32_i64(val, val);
+ if (orig_memop & MO_SIGN) {
+ tcg_gen_ext32s_i64(val, val);
+ }
+ break;
+ case MO_64:
+ tcg_gen_bswap64_i64(val, val);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
}
void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
- tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ TCGv_i64 swap = NULL;
+
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop);
return;
}
+ tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 1, 1);
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, 1));
+
+ if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
+ swap = tcg_temp_new_i64();
+ switch (memop & MO_SIZE) {
+ case MO_16:
+ tcg_gen_ext16u_i64(swap, val);
+ tcg_gen_bswap16_i64(swap, swap);
+ break;
+ case MO_32:
+ tcg_gen_ext32u_i64(swap, val);
+ tcg_gen_bswap32_i64(swap, swap);
+ break;
+ case MO_64:
+ tcg_gen_bswap64_i64(swap, val);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ val = swap;
+ memop &= ~MO_BSWAP;
+ }
+
gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx);
+
+ if (swap) {
+ tcg_temp_free_i64(swap);
+ }
}
static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, TCGMemOp opc)