tcg_gen_extrl_i64_i32(ret, t1);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
- } else if (TCG_TARGET_HAS_clz_i32) {
- TCGv_i32 t1 = tcg_temp_new_i32();
- TCGv_i32 t2 = tcg_temp_new_i32();
- tcg_gen_neg_i32(t1, arg1);
- tcg_gen_xori_i32(t2, arg2, 31);
- tcg_gen_and_i32(t1, t1, arg1);
- tcg_gen_clz_i32(ret, t1, t2);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
- tcg_gen_xori_i32(ret, ret, 31);
- } else if (TCG_TARGET_HAS_clz_i64) {
- TCGv_i32 t1 = tcg_temp_new_i32();
- TCGv_i32 t2 = tcg_temp_new_i32();
- TCGv_i64 x1 = tcg_temp_new_i64();
- TCGv_i64 x2 = tcg_temp_new_i64();
- tcg_gen_neg_i32(t1, arg1);
- tcg_gen_xori_i32(t2, arg2, 63);
- tcg_gen_and_i32(t1, t1, arg1);
- tcg_gen_extu_i32_i64(x1, t1);
- tcg_gen_extu_i32_i64(x2, t2);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
- tcg_gen_clz_i64(x1, x1, x2);
- tcg_gen_extrl_i64_i32(ret, x1);
- tcg_temp_free_i64(x1);
- tcg_temp_free_i64(x2);
- tcg_gen_xori_i32(ret, ret, 63);
+ } else if (TCG_TARGET_HAS_ctpop_i32
+ || TCG_TARGET_HAS_ctpop_i64
+ || TCG_TARGET_HAS_clz_i32
+ || TCG_TARGET_HAS_clz_i64) {
+ TCGv_i32 z, t = tcg_temp_new_i32();
+
+ if (TCG_TARGET_HAS_ctpop_i32 || TCG_TARGET_HAS_ctpop_i64) {
+ tcg_gen_subi_i32(t, arg1, 1);
+ tcg_gen_andc_i32(t, t, arg1);
+ tcg_gen_ctpop_i32(t, t);
+ } else {
+ /* Since all non-x86 hosts have clz(0) == 32, don't fight it. */
+ tcg_gen_neg_i32(t, arg1);
+ tcg_gen_and_i32(t, t, arg1);
+ tcg_gen_clzi_i32(t, t, 32);
+ tcg_gen_xori_i32(t, t, 31);
+ }
+ z = tcg_const_i32(0);
+ tcg_gen_movcond_i32(TCG_COND_EQ, ret, arg1, z, arg2, t);
+ tcg_temp_free_i32(t);
+ tcg_temp_free_i32(z);
} else {
gen_helper_ctz_i32(ret, arg1, arg2);
}
void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
{
- TCGv_i32 t = tcg_const_i32(arg2);
- tcg_gen_ctz_i32(ret, arg1, t);
- tcg_temp_free_i32(t);
+ if (!TCG_TARGET_HAS_ctz_i32 && TCG_TARGET_HAS_ctpop_i32 && arg2 == 32) {
+ /* This equivalence has the advantage of not requiring a fixup. */
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_subi_i32(t, arg1, 1);
+ tcg_gen_andc_i32(t, t, arg1);
+ tcg_gen_ctpop_i32(ret, t);
+ tcg_temp_free_i32(t);
+ } else {
+ TCGv_i32 t = tcg_const_i32(arg2);
+ tcg_gen_ctz_i32(ret, arg1, t);
+ tcg_temp_free_i32(t);
+ }
+}
+
+void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg)
+{
+ if (TCG_TARGET_HAS_clz_i32) {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_sari_i32(t, arg, 31);
+ tcg_gen_xor_i32(t, t, arg);
+ tcg_gen_clzi_i32(t, t, 32);
+ tcg_gen_subi_i32(ret, t, 1);
+ tcg_temp_free_i32(t);
+ } else {
+ gen_helper_clrsb_i32(ret, arg);
+ }
+}
+
+void tcg_gen_ctpop_i32(TCGv_i32 ret, TCGv_i32 arg1)
+{
+ if (TCG_TARGET_HAS_ctpop_i32) {
+ tcg_gen_op2_i32(INDEX_op_ctpop_i32, ret, arg1);
+ } else if (TCG_TARGET_HAS_ctpop_i64) {
+ TCGv_i64 t = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(t, arg1);
+ tcg_gen_ctpop_i64(t, t);
+ tcg_gen_extrl_i64_i32(ret, t);
+ tcg_temp_free_i64(t);
+ } else {
+ gen_helper_ctpop_i32(ret, arg1);
+ }
}
void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
if (TCG_TARGET_HAS_ctz_i64) {
tcg_gen_op3_i64(INDEX_op_ctz_i64, ret, arg1, arg2);
- } else if (TCG_TARGET_HAS_clz_i64) {
- TCGv_i64 t1 = tcg_temp_new_i64();
- TCGv_i64 t2 = tcg_temp_new_i64();
- tcg_gen_neg_i64(t1, arg1);
- tcg_gen_xori_i64(t2, arg2, 63);
- tcg_gen_and_i64(t1, t1, arg1);
- tcg_gen_clz_i64(ret, t1, t2);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
- tcg_gen_xori_i64(ret, ret, 63);
+ } else if (TCG_TARGET_HAS_ctpop_i64 || TCG_TARGET_HAS_clz_i64) {
+ TCGv_i64 z, t = tcg_temp_new_i64();
+
+ if (TCG_TARGET_HAS_ctpop_i64) {
+ tcg_gen_subi_i64(t, arg1, 1);
+ tcg_gen_andc_i64(t, t, arg1);
+ tcg_gen_ctpop_i64(t, t);
+ } else {
+ /* Since all non-x86 hosts have clz(0) == 64, don't fight it. */
+ tcg_gen_neg_i64(t, arg1);
+ tcg_gen_and_i64(t, t, arg1);
+ tcg_gen_clzi_i64(t, t, 64);
+ tcg_gen_xori_i64(t, t, 63);
+ }
+ z = tcg_const_i64(0);
+ tcg_gen_movcond_i64(TCG_COND_EQ, ret, arg1, z, arg2, t);
+ tcg_temp_free_i64(t);
+ tcg_temp_free_i64(z);
} else {
gen_helper_ctz_i64(ret, arg1, arg2);
}
tcg_gen_ctz_i32(TCGV_LOW(ret), TCGV_LOW(arg1), t32);
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
tcg_temp_free_i32(t32);
+ } else if (!TCG_TARGET_HAS_ctz_i64
+ && TCG_TARGET_HAS_ctpop_i64
+ && arg2 == 64) {
+ /* This equivalence has the advantage of not requiring a fixup. */
+ TCGv_i64 t = tcg_temp_new_i64();
+ tcg_gen_subi_i64(t, arg1, 1);
+ tcg_gen_andc_i64(t, t, arg1);
+ tcg_gen_ctpop_i64(ret, t);
+ tcg_temp_free_i64(t);
} else {
TCGv_i64 t64 = tcg_const_i64(arg2);
tcg_gen_ctz_i64(ret, arg1, t64);
}
}
+void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg)
+{
+ if (TCG_TARGET_HAS_clz_i64 || TCG_TARGET_HAS_clz_i32) {
+ TCGv_i64 t = tcg_temp_new_i64();
+ tcg_gen_sari_i64(t, arg, 63);
+ tcg_gen_xor_i64(t, t, arg);
+ tcg_gen_clzi_i64(t, t, 64);
+ tcg_gen_subi_i64(ret, t, 1);
+ tcg_temp_free_i64(t);
+ } else {
+ gen_helper_clrsb_i64(ret, arg);
+ }
+}
+
+void tcg_gen_ctpop_i64(TCGv_i64 ret, TCGv_i64 arg1)
+{
+ if (TCG_TARGET_HAS_ctpop_i64) {
+ tcg_gen_op2_i64(INDEX_op_ctpop_i64, ret, arg1);
+ } else if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_ctpop_i32) {
+ tcg_gen_ctpop_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
+ tcg_gen_ctpop_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
+ tcg_gen_add_i32(TCGV_LOW(ret), TCGV_LOW(ret), TCGV_HIGH(ret));
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ } else {
+ gen_helper_ctpop_i64(ret, arg1);
+ }
+}
+
void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
if (TCG_TARGET_HAS_rot_i64) {
tcg_gen_op1i(INDEX_op_goto_tb, idx);
}
+void tcg_gen_lookup_and_goto_ptr(TCGv addr)
+{
+ if (TCG_TARGET_HAS_goto_ptr && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
+ TCGv_ptr ptr = tcg_temp_new_ptr();
+ gen_helper_lookup_tb_ptr(ptr, tcg_ctx.tcg_env, addr);
+ tcg_gen_op1i(INDEX_op_goto_ptr, GET_TCGV_PTR(ptr));
+ tcg_temp_free_ptr(ptr);
+ } else {
+ tcg_gen_exit_tb(0);
+ }
+}
+
static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
{
/* Trigger the asserts within as early as possible. */
#endif
#else
gen_helper_exit_atomic(tcg_ctx.tcg_env);
+ /* Produce a result, so that we have a well-formed opcode stream
+ with respect to uses of the result in the (dead) code following. */
+ tcg_gen_movi_i64(retv, 0);
#endif /* CONFIG_ATOMIC64 */
} else {
TCGv_i32 c32 = tcg_temp_new_i32();
#endif
#else
gen_helper_exit_atomic(tcg_ctx.tcg_env);
+ /* Produce a result, so that we have a well-formed opcode stream
+ with respect to uses of the result in the (dead) code following. */
+ tcg_gen_movi_i64(ret, 0);
#endif /* CONFIG_ATOMIC64 */
} else {
TCGv_i32 v32 = tcg_temp_new_i32();