*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
-#include "exec/cpu-common.h"
-#include "tcg-op.h"
+#include "tcg/tcg-op.h"
#define CASE_OP_32_64(x) \
glue(glue(case INDEX_op_, x), _i32): \
glue(glue(case INDEX_op_, x), _i64)
+#define CASE_OP_32_64_VEC(x) \
+ glue(glue(case INDEX_op_, x), _i32): \
+ glue(glue(case INDEX_op_, x), _i64): \
+ glue(glue(case INDEX_op_, x), _vec)
+
struct tcg_temp_info {
bool is_const;
- uint16_t prev_copy;
- uint16_t next_copy;
+ TCGTemp *prev_copy;
+ TCGTemp *next_copy;
tcg_target_ulong val;
tcg_target_ulong mask;
};
-static struct tcg_temp_info temps[TCG_MAX_TEMPS];
-static TCGTempSet temps_used;
+static inline struct tcg_temp_info *ts_info(TCGTemp *ts)
+{
+ return ts->state_ptr;
+}
-static inline bool temp_is_const(TCGArg arg)
+static inline struct tcg_temp_info *arg_info(TCGArg arg)
{
- return temps[arg].is_const;
+ return ts_info(arg_temp(arg));
}
-static inline bool temp_is_copy(TCGArg arg)
+static inline bool ts_is_const(TCGTemp *ts)
{
- return temps[arg].next_copy != arg;
+ return ts_info(ts)->is_const;
}
-/* Reset TEMP's state, possibly removing the temp for the list of copies. */
-static void reset_temp(TCGArg temp)
+static inline bool arg_is_const(TCGArg arg)
{
- temps[temps[temp].next_copy].prev_copy = temps[temp].prev_copy;
- temps[temps[temp].prev_copy].next_copy = temps[temp].next_copy;
- temps[temp].next_copy = temp;
- temps[temp].prev_copy = temp;
- temps[temp].is_const = false;
- temps[temp].mask = -1;
+ return ts_is_const(arg_temp(arg));
}
-/* Reset all temporaries, given that there are NB_TEMPS of them. */
-static void reset_all_temps(int nb_temps)
+static inline bool ts_is_copy(TCGTemp *ts)
{
- bitmap_zero(temps_used.l, nb_temps);
+ return ts_info(ts)->next_copy != ts;
}
-/* Initialize and activate a temporary. */
-static void init_temp_info(TCGArg temp)
+/* Reset TEMP's state, possibly removing the temp for the list of copies. */
+static void reset_ts(TCGTemp *ts)
{
- if (!test_bit(temp, temps_used.l)) {
- temps[temp].next_copy = temp;
- temps[temp].prev_copy = temp;
- temps[temp].is_const = false;
- temps[temp].mask = -1;
- set_bit(temp, temps_used.l);
- }
+ struct tcg_temp_info *ti = ts_info(ts);
+ struct tcg_temp_info *pi = ts_info(ti->prev_copy);
+ struct tcg_temp_info *ni = ts_info(ti->next_copy);
+
+ ni->prev_copy = ti->prev_copy;
+ pi->next_copy = ti->next_copy;
+ ti->next_copy = ts;
+ ti->prev_copy = ts;
+ ti->is_const = false;
+ ti->mask = -1;
}
-static int op_bits(TCGOpcode op)
+static void reset_temp(TCGArg arg)
{
- const TCGOpDef *def = &tcg_op_defs[op];
- return def->flags & TCG_OPF_64BIT ? 64 : 32;
+ reset_ts(arg_temp(arg));
}
-static TCGOpcode op_to_mov(TCGOpcode op)
+/* Initialize and activate a temporary. */
+static void init_ts_info(struct tcg_temp_info *infos,
+ TCGTempSet *temps_used, TCGTemp *ts)
{
- switch (op_bits(op)) {
- case 32:
- return INDEX_op_mov_i32;
- case 64:
- return INDEX_op_mov_i64;
- default:
- fprintf(stderr, "op_to_mov: unexpected return value of "
- "function op_bits.\n");
- tcg_abort();
+ size_t idx = temp_idx(ts);
+ if (!test_bit(idx, temps_used->l)) {
+ struct tcg_temp_info *ti = &infos[idx];
+
+ ts->state_ptr = ti;
+ ti->next_copy = ts;
+ ti->prev_copy = ts;
+ ti->is_const = false;
+ ti->mask = -1;
+ set_bit(idx, temps_used->l);
}
}
-static TCGOpcode op_to_movi(TCGOpcode op)
+static void init_arg_info(struct tcg_temp_info *infos,
+ TCGTempSet *temps_used, TCGArg arg)
{
- switch (op_bits(op)) {
- case 32:
- return INDEX_op_movi_i32;
- case 64:
- return INDEX_op_movi_i64;
- default:
- fprintf(stderr, "op_to_movi: unexpected return value of "
- "function op_bits.\n");
- tcg_abort();
- }
+ init_ts_info(infos, temps_used, arg_temp(arg));
}
-static TCGArg find_better_copy(TCGContext *s, TCGArg arg)
+static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
{
- TCGTemp *ts = arg_temp(arg);
- TCGArg i;
+ TCGTemp *i;
/* If this is already a global, we can't do better. */
if (ts->temp_global) {
- return arg;
+ return ts;
}
/* Search for a global first. */
- for (i = temps[arg].next_copy ; i != arg; i = temps[i].next_copy) {
- if (i < s->nb_globals) {
+ for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
+ if (i->temp_global) {
return i;
}
}
/* If it is a temp, search for a temp local. */
if (!ts->temp_local) {
- for (i = temps[arg].next_copy ; i != arg; i = temps[i].next_copy) {
- if (s->temps[i].temp_local) {
+ for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
+ if (ts->temp_local) {
return i;
}
}
}
/* Failure to find a better representation, return the same temp. */
- return arg;
+ return ts;
}
-static bool temps_are_copies(TCGArg arg1, TCGArg arg2)
+static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2)
{
- TCGArg i;
+ TCGTemp *i;
- if (arg1 == arg2) {
+ if (ts1 == ts2) {
return true;
}
- if (!temp_is_copy(arg1) || !temp_is_copy(arg2)) {
+ if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) {
return false;
}
- for (i = temps[arg1].next_copy ; i != arg1 ; i = temps[i].next_copy) {
- if (i == arg2) {
+ for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) {
+ if (i == ts2) {
return true;
}
}
return false;
}
+static bool args_are_copies(TCGArg arg1, TCGArg arg2)
+{
+ return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
+}
+
static void tcg_opt_gen_movi(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg val)
{
- TCGOpcode new_op = op_to_movi(op->opc);
+ const TCGOpDef *def;
+ TCGOpcode new_op;
tcg_target_ulong mask;
-
+ struct tcg_temp_info *di = arg_info(dst);
+
+ def = &tcg_op_defs[op->opc];
+ if (def->flags & TCG_OPF_VECTOR) {
+ new_op = INDEX_op_dupi_vec;
+ } else if (def->flags & TCG_OPF_64BIT) {
+ new_op = INDEX_op_movi_i64;
+ } else {
+ new_op = INDEX_op_movi_i32;
+ }
op->opc = new_op;
+ /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
+ op->args[0] = dst;
+ op->args[1] = val;
reset_temp(dst);
- temps[dst].is_const = true;
- temps[dst].val = val;
+ di->is_const = true;
+ di->val = val;
mask = val;
if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_movi_i32) {
/* High bits of the destination are now garbage. */
mask |= ~0xffffffffull;
}
- temps[dst].mask = mask;
-
- op->args[0] = dst;
- op->args[1] = val;
+ di->mask = mask;
}
static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
{
- if (temps_are_copies(dst, src)) {
+ TCGTemp *dst_ts = arg_temp(dst);
+ TCGTemp *src_ts = arg_temp(src);
+ const TCGOpDef *def;
+ struct tcg_temp_info *di;
+ struct tcg_temp_info *si;
+ tcg_target_ulong mask;
+ TCGOpcode new_op;
+
+ if (ts_are_copies(dst_ts, src_ts)) {
tcg_op_remove(s, op);
return;
}
- TCGOpcode new_op = op_to_mov(op->opc);
- tcg_target_ulong mask;
-
+ reset_ts(dst_ts);
+ di = ts_info(dst_ts);
+ si = ts_info(src_ts);
+ def = &tcg_op_defs[op->opc];
+ if (def->flags & TCG_OPF_VECTOR) {
+ new_op = INDEX_op_mov_vec;
+ } else if (def->flags & TCG_OPF_64BIT) {
+ new_op = INDEX_op_mov_i64;
+ } else {
+ new_op = INDEX_op_mov_i32;
+ }
op->opc = new_op;
+ /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
+ op->args[0] = dst;
+ op->args[1] = src;
- reset_temp(dst);
- mask = temps[src].mask;
+ mask = si->mask;
if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
/* High bits of the destination are now garbage. */
mask |= ~0xffffffffull;
}
- temps[dst].mask = mask;
-
- if (arg_temp(src)->type == arg_temp(dst)->type) {
- temps[dst].next_copy = temps[src].next_copy;
- temps[dst].prev_copy = src;
- temps[temps[dst].next_copy].prev_copy = dst;
- temps[src].next_copy = dst;
- temps[dst].is_const = temps[src].is_const;
- temps[dst].val = temps[src].val;
- }
+ di->mask = mask;
- op->args[0] = dst;
- op->args[1] = src;
+ if (src_ts->type == dst_ts->type) {
+ struct tcg_temp_info *ni = ts_info(si->next_copy);
+
+ di->next_copy = si->next_copy;
+ di->prev_copy = src_ts;
+ ni->prev_copy = dst_ts;
+ si->next_copy = dst_ts;
+ di->is_const = si->is_const;
+ di->val = si->val;
+ }
}
static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
CASE_OP_32_64(ext16u):
return (uint16_t)x;
+ CASE_OP_32_64(bswap16):
+ return bswap16(x);
+
+ CASE_OP_32_64(bswap32):
+ return bswap32(x);
+
+ case INDEX_op_bswap64_i64:
+ return bswap64(x);
+
case INDEX_op_ext_i32_i64:
case INDEX_op_ext32s_i64:
return (int32_t)x;
static TCGArg do_constant_folding(TCGOpcode op, TCGArg x, TCGArg y)
{
+ const TCGOpDef *def = &tcg_op_defs[op];
TCGArg res = do_constant_folding_2(op, x, y);
- if (op_bits(op) == 32) {
+ if (!(def->flags & TCG_OPF_64BIT)) {
res = (int32_t)res;
}
return res;
static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
TCGArg y, TCGCond c)
{
- if (temp_is_const(x) && temp_is_const(y)) {
- switch (op_bits(op)) {
- case 32:
- return do_constant_folding_cond_32(temps[x].val, temps[y].val, c);
- case 64:
- return do_constant_folding_cond_64(temps[x].val, temps[y].val, c);
- default:
- tcg_abort();
+ tcg_target_ulong xv = arg_info(x)->val;
+ tcg_target_ulong yv = arg_info(y)->val;
+ if (arg_is_const(x) && arg_is_const(y)) {
+ const TCGOpDef *def = &tcg_op_defs[op];
+ tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR));
+ if (def->flags & TCG_OPF_64BIT) {
+ return do_constant_folding_cond_64(xv, yv, c);
+ } else {
+ return do_constant_folding_cond_32(xv, yv, c);
}
- } else if (temps_are_copies(x, y)) {
+ } else if (args_are_copies(x, y)) {
return do_constant_folding_cond_eq(c);
- } else if (temp_is_const(y) && temps[y].val == 0) {
+ } else if (arg_is_const(y) && yv == 0) {
switch (c) {
case TCG_COND_LTU:
return 0;
TCGArg al = p1[0], ah = p1[1];
TCGArg bl = p2[0], bh = p2[1];
- if (temp_is_const(bl) && temp_is_const(bh)) {
- uint64_t b = ((uint64_t)temps[bh].val << 32) | (uint32_t)temps[bl].val;
+ if (arg_is_const(bl) && arg_is_const(bh)) {
+ tcg_target_ulong blv = arg_info(bl)->val;
+ tcg_target_ulong bhv = arg_info(bh)->val;
+ uint64_t b = deposit64(blv, 32, 32, bhv);
- if (temp_is_const(al) && temp_is_const(ah)) {
- uint64_t a;
- a = ((uint64_t)temps[ah].val << 32) | (uint32_t)temps[al].val;
+ if (arg_is_const(al) && arg_is_const(ah)) {
+ tcg_target_ulong alv = arg_info(al)->val;
+ tcg_target_ulong ahv = arg_info(ah)->val;
+ uint64_t a = deposit64(alv, 32, 32, ahv);
return do_constant_folding_cond_64(a, b, c);
}
if (b == 0) {
}
}
}
- if (temps_are_copies(al, bl) && temps_are_copies(ah, bh)) {
+ if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
return do_constant_folding_cond_eq(c);
}
return 2;
{
TCGArg a1 = *p1, a2 = *p2;
int sum = 0;
- sum += temp_is_const(a1);
- sum -= temp_is_const(a2);
+ sum += arg_is_const(a1);
+ sum -= arg_is_const(a2);
/* Prefer the constant in second argument, and then the form
op a, a, b, which is better handled on non-RISC hosts. */
static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
{
int sum = 0;
- sum += temp_is_const(p1[0]);
- sum += temp_is_const(p1[1]);
- sum -= temp_is_const(p2[0]);
- sum -= temp_is_const(p2[1]);
+ sum += arg_is_const(p1[0]);
+ sum += arg_is_const(p1[1]);
+ sum -= arg_is_const(p2[0]);
+ sum -= arg_is_const(p2[1]);
if (sum > 0) {
TCGArg t;
t = p1[0], p1[0] = p2[0], p2[0] = t;
/* Propagate constants and copies, fold constant expressions. */
void tcg_optimize(TCGContext *s)
{
- int oi, oi_next, nb_temps, nb_globals;
- TCGOp *prev_mb = NULL;
+ int nb_temps, nb_globals;
+ TCGOp *op, *op_next, *prev_mb = NULL;
+ struct tcg_temp_info *infos;
+ TCGTempSet temps_used;
/* Array VALS has an element for each temp.
If this temp holds a constant then its value is kept in VALS' element.
nb_temps = s->nb_temps;
nb_globals = s->nb_globals;
- reset_all_temps(nb_temps);
+ bitmap_zero(temps_used.l, nb_temps);
+ infos = tcg_malloc(sizeof(struct tcg_temp_info) * nb_temps);
- for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) {
+ QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
tcg_target_ulong mask, partmask, affected;
int nb_oargs, nb_iargs, i;
TCGArg tmp;
-
- TCGOp * const op = &s->gen_op_buf[oi];
TCGOpcode opc = op->opc;
const TCGOpDef *def = &tcg_op_defs[opc];
- oi_next = op->next;
-
/* Count the arguments, and initialize the temps that are
going to be used */
if (opc == INDEX_op_call) {
- nb_oargs = op->callo;
- nb_iargs = op->calli;
+ nb_oargs = TCGOP_CALLO(op);
+ nb_iargs = TCGOP_CALLI(op);
for (i = 0; i < nb_oargs + nb_iargs; i++) {
- tmp = op->args[i];
- if (tmp != TCG_CALL_DUMMY_ARG) {
- init_temp_info(tmp);
+ TCGTemp *ts = arg_temp(op->args[i]);
+ if (ts) {
+ init_ts_info(infos, &temps_used, ts);
}
}
} else {
nb_oargs = def->nb_oargs;
nb_iargs = def->nb_iargs;
for (i = 0; i < nb_oargs + nb_iargs; i++) {
- init_temp_info(op->args[i]);
+ init_arg_info(infos, &temps_used, op->args[i]);
}
}
/* Do copy propagation */
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
- if (temp_is_copy(op->args[i])) {
- op->args[i] = find_better_copy(s, op->args[i]);
+ TCGTemp *ts = arg_temp(op->args[i]);
+ if (ts && ts_is_copy(ts)) {
+ op->args[i] = temp_arg(find_better_copy(s, ts));
}
}
/* For commutative operations make constant second argument */
switch (opc) {
- CASE_OP_32_64(add):
- CASE_OP_32_64(mul):
- CASE_OP_32_64(and):
- CASE_OP_32_64(or):
- CASE_OP_32_64(xor):
+ CASE_OP_32_64_VEC(add):
+ CASE_OP_32_64_VEC(mul):
+ CASE_OP_32_64_VEC(and):
+ CASE_OP_32_64_VEC(or):
+ CASE_OP_32_64_VEC(xor):
CASE_OP_32_64(eqv):
CASE_OP_32_64(nand):
CASE_OP_32_64(nor):
CASE_OP_32_64(sar):
CASE_OP_32_64(rotl):
CASE_OP_32_64(rotr):
- if (temp_is_const(op->args[1]) && temps[op->args[1]].val == 0) {
+ if (arg_is_const(op->args[1])
+ && arg_info(op->args[1])->val == 0) {
tcg_opt_gen_movi(s, op, op->args[0], 0);
continue;
}
break;
- CASE_OP_32_64(sub):
+ CASE_OP_32_64_VEC(sub):
{
TCGOpcode neg_op;
bool have_neg;
- if (temp_is_const(op->args[2])) {
+ if (arg_is_const(op->args[2])) {
/* Proceed with possible constant folding. */
break;
}
if (opc == INDEX_op_sub_i32) {
neg_op = INDEX_op_neg_i32;
have_neg = TCG_TARGET_HAS_neg_i32;
- } else {
+ } else if (opc == INDEX_op_sub_i64) {
neg_op = INDEX_op_neg_i64;
have_neg = TCG_TARGET_HAS_neg_i64;
+ } else if (TCG_TARGET_HAS_neg_vec) {
+ TCGType type = TCGOP_VECL(op) + TCG_TYPE_V64;
+ unsigned vece = TCGOP_VECE(op);
+ neg_op = INDEX_op_neg_vec;
+ have_neg = tcg_can_emit_vec_op(neg_op, type, vece) > 0;
+ } else {
+ break;
}
if (!have_neg) {
break;
}
- if (temp_is_const(op->args[1])
- && temps[op->args[1]].val == 0) {
+ if (arg_is_const(op->args[1])
+ && arg_info(op->args[1])->val == 0) {
op->opc = neg_op;
reset_temp(op->args[0]);
op->args[1] = op->args[2];
}
}
break;
- CASE_OP_32_64(xor):
+ CASE_OP_32_64_VEC(xor):
CASE_OP_32_64(nand):
- if (!temp_is_const(op->args[1])
- && temp_is_const(op->args[2])
- && temps[op->args[2]].val == -1) {
+ if (!arg_is_const(op->args[1])
+ && arg_is_const(op->args[2])
+ && arg_info(op->args[2])->val == -1) {
i = 1;
goto try_not;
}
break;
CASE_OP_32_64(nor):
- if (!temp_is_const(op->args[1])
- && temp_is_const(op->args[2])
- && temps[op->args[2]].val == 0) {
+ if (!arg_is_const(op->args[1])
+ && arg_is_const(op->args[2])
+ && arg_info(op->args[2])->val == 0) {
i = 1;
goto try_not;
}
break;
- CASE_OP_32_64(andc):
- if (!temp_is_const(op->args[2])
- && temp_is_const(op->args[1])
- && temps[op->args[1]].val == -1) {
+ CASE_OP_32_64_VEC(andc):
+ if (!arg_is_const(op->args[2])
+ && arg_is_const(op->args[1])
+ && arg_info(op->args[1])->val == -1) {
i = 2;
goto try_not;
}
break;
- CASE_OP_32_64(orc):
+ CASE_OP_32_64_VEC(orc):
CASE_OP_32_64(eqv):
- if (!temp_is_const(op->args[2])
- && temp_is_const(op->args[1])
- && temps[op->args[1]].val == 0) {
+ if (!arg_is_const(op->args[2])
+ && arg_is_const(op->args[1])
+ && arg_info(op->args[1])->val == 0) {
i = 2;
goto try_not;
}
TCGOpcode not_op;
bool have_not;
- if (def->flags & TCG_OPF_64BIT) {
+ if (def->flags & TCG_OPF_VECTOR) {
+ not_op = INDEX_op_not_vec;
+ have_not = TCG_TARGET_HAS_not_vec;
+ } else if (def->flags & TCG_OPF_64BIT) {
not_op = INDEX_op_not_i64;
have_not = TCG_TARGET_HAS_not_i64;
} else {
/* Simplify expression for "op r, a, const => mov r, a" cases */
switch (opc) {
- CASE_OP_32_64(add):
- CASE_OP_32_64(sub):
+ CASE_OP_32_64_VEC(add):
+ CASE_OP_32_64_VEC(sub):
+ CASE_OP_32_64_VEC(or):
+ CASE_OP_32_64_VEC(xor):
+ CASE_OP_32_64_VEC(andc):
CASE_OP_32_64(shl):
CASE_OP_32_64(shr):
CASE_OP_32_64(sar):
CASE_OP_32_64(rotl):
CASE_OP_32_64(rotr):
- CASE_OP_32_64(or):
- CASE_OP_32_64(xor):
- CASE_OP_32_64(andc):
- if (!temp_is_const(op->args[1])
- && temp_is_const(op->args[2])
- && temps[op->args[2]].val == 0) {
+ if (!arg_is_const(op->args[1])
+ && arg_is_const(op->args[2])
+ && arg_info(op->args[2])->val == 0) {
tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
continue;
}
break;
- CASE_OP_32_64(and):
- CASE_OP_32_64(orc):
+ CASE_OP_32_64_VEC(and):
+ CASE_OP_32_64_VEC(orc):
CASE_OP_32_64(eqv):
- if (!temp_is_const(op->args[1])
- && temp_is_const(op->args[2])
- && temps[op->args[2]].val == -1) {
+ if (!arg_is_const(op->args[1])
+ && arg_is_const(op->args[2])
+ && arg_info(op->args[2])->val == -1) {
tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
continue;
}
affected = -1;
switch (opc) {
CASE_OP_32_64(ext8s):
- if ((temps[op->args[1]].mask & 0x80) != 0) {
+ if ((arg_info(op->args[1])->mask & 0x80) != 0) {
break;
}
CASE_OP_32_64(ext8u):
mask = 0xff;
goto and_const;
CASE_OP_32_64(ext16s):
- if ((temps[op->args[1]].mask & 0x8000) != 0) {
+ if ((arg_info(op->args[1])->mask & 0x8000) != 0) {
break;
}
CASE_OP_32_64(ext16u):
mask = 0xffff;
goto and_const;
case INDEX_op_ext32s_i64:
- if ((temps[op->args[1]].mask & 0x80000000) != 0) {
+ if ((arg_info(op->args[1])->mask & 0x80000000) != 0) {
break;
}
case INDEX_op_ext32u_i64:
goto and_const;
CASE_OP_32_64(and):
- mask = temps[op->args[2]].mask;
- if (temp_is_const(op->args[2])) {
+ mask = arg_info(op->args[2])->mask;
+ if (arg_is_const(op->args[2])) {
and_const:
- affected = temps[op->args[1]].mask & ~mask;
+ affected = arg_info(op->args[1])->mask & ~mask;
}
- mask = temps[op->args[1]].mask & mask;
+ mask = arg_info(op->args[1])->mask & mask;
break;
case INDEX_op_ext_i32_i64:
- if ((temps[op->args[1]].mask & 0x80000000) != 0) {
+ if ((arg_info(op->args[1])->mask & 0x80000000) != 0) {
break;
}
case INDEX_op_extu_i32_i64:
/* We do not compute affected as it is a size changing op. */
- mask = (uint32_t)temps[op->args[1]].mask;
+ mask = (uint32_t)arg_info(op->args[1])->mask;
break;
CASE_OP_32_64(andc):
/* Known-zeros does not imply known-ones. Therefore unless
op->args[2] is constant, we can't infer anything from it. */
- if (temp_is_const(op->args[2])) {
- mask = ~temps[op->args[2]].mask;
+ if (arg_is_const(op->args[2])) {
+ mask = ~arg_info(op->args[2])->mask;
goto and_const;
}
- /* But we certainly know nothing outside op->args[1] may be set. */
- mask = temps[op->args[1]].mask;
+ /* But we certainly know nothing outside args[1] may be set. */
+ mask = arg_info(op->args[1])->mask;
break;
case INDEX_op_sar_i32:
- if (temp_is_const(op->args[2])) {
- tmp = temps[op->args[2]].val & 31;
- mask = (int32_t)temps[op->args[1]].mask >> tmp;
+ if (arg_is_const(op->args[2])) {
+ tmp = arg_info(op->args[2])->val & 31;
+ mask = (int32_t)arg_info(op->args[1])->mask >> tmp;
}
break;
case INDEX_op_sar_i64:
- if (temp_is_const(op->args[2])) {
- tmp = temps[op->args[2]].val & 63;
- mask = (int64_t)temps[op->args[1]].mask >> tmp;
+ if (arg_is_const(op->args[2])) {
+ tmp = arg_info(op->args[2])->val & 63;
+ mask = (int64_t)arg_info(op->args[1])->mask >> tmp;
}
break;
case INDEX_op_shr_i32:
- if (temp_is_const(op->args[2])) {
- tmp = temps[op->args[2]].val & 31;
- mask = (uint32_t)temps[op->args[1]].mask >> tmp;
+ if (arg_is_const(op->args[2])) {
+ tmp = arg_info(op->args[2])->val & 31;
+ mask = (uint32_t)arg_info(op->args[1])->mask >> tmp;
}
break;
case INDEX_op_shr_i64:
- if (temp_is_const(op->args[2])) {
- tmp = temps[op->args[2]].val & 63;
- mask = (uint64_t)temps[op->args[1]].mask >> tmp;
+ if (arg_is_const(op->args[2])) {
+ tmp = arg_info(op->args[2])->val & 63;
+ mask = (uint64_t)arg_info(op->args[1])->mask >> tmp;
}
break;
case INDEX_op_extrl_i64_i32:
- mask = (uint32_t)temps[op->args[1]].mask;
+ mask = (uint32_t)arg_info(op->args[1])->mask;
break;
case INDEX_op_extrh_i64_i32:
- mask = (uint64_t)temps[op->args[1]].mask >> 32;
+ mask = (uint64_t)arg_info(op->args[1])->mask >> 32;
break;
CASE_OP_32_64(shl):
- if (temp_is_const(op->args[2])) {
- tmp = temps[op->args[2]].val & (TCG_TARGET_REG_BITS - 1);
- mask = temps[op->args[1]].mask << tmp;
+ if (arg_is_const(op->args[2])) {
+ tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1);
+ mask = arg_info(op->args[1])->mask << tmp;
}
break;
CASE_OP_32_64(neg):
/* Set to 1 all bits to the left of the rightmost. */
- mask = -(temps[op->args[1]].mask & -temps[op->args[1]].mask);
+ mask = -(arg_info(op->args[1])->mask
+ & -arg_info(op->args[1])->mask);
break;
CASE_OP_32_64(deposit):
- mask = deposit64(temps[op->args[1]].mask, op->args[3],
- op->args[4], temps[op->args[2]].mask);
+ mask = deposit64(arg_info(op->args[1])->mask,
+ op->args[3], op->args[4],
+ arg_info(op->args[2])->mask);
break;
CASE_OP_32_64(extract):
- mask = extract64(temps[op->args[1]].mask, op->args[2], op->args[3]);
+ mask = extract64(arg_info(op->args[1])->mask,
+ op->args[2], op->args[3]);
if (op->args[2] == 0) {
- affected = temps[op->args[1]].mask & ~mask;
+ affected = arg_info(op->args[1])->mask & ~mask;
}
break;
CASE_OP_32_64(sextract):
- mask = sextract64(temps[op->args[1]].mask,
+ mask = sextract64(arg_info(op->args[1])->mask,
op->args[2], op->args[3]);
if (op->args[2] == 0 && (tcg_target_long)mask >= 0) {
- affected = temps[op->args[1]].mask & ~mask;
+ affected = arg_info(op->args[1])->mask & ~mask;
}
break;
CASE_OP_32_64(or):
CASE_OP_32_64(xor):
- mask = temps[op->args[1]].mask | temps[op->args[2]].mask;
+ mask = arg_info(op->args[1])->mask | arg_info(op->args[2])->mask;
break;
case INDEX_op_clz_i32:
case INDEX_op_ctz_i32:
- mask = temps[op->args[2]].mask | 31;
+ mask = arg_info(op->args[2])->mask | 31;
break;
case INDEX_op_clz_i64:
case INDEX_op_ctz_i64:
- mask = temps[op->args[2]].mask | 63;
+ mask = arg_info(op->args[2])->mask | 63;
break;
case INDEX_op_ctpop_i32:
break;
CASE_OP_32_64(movcond):
- mask = temps[op->args[3]].mask | temps[op->args[4]].mask;
+ mask = arg_info(op->args[3])->mask | arg_info(op->args[4])->mask;
break;
CASE_OP_32_64(ld8u):
CASE_OP_32_64(qemu_ld):
{
TCGMemOpIdx oi = op->args[nb_oargs + nb_iargs];
- TCGMemOp mop = get_memop(oi);
+ MemOp mop = get_memop(oi);
if (!(mop & MO_SIGN)) {
mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
}
/* Simplify expression for "op r, a, 0 => movi r, 0" cases */
switch (opc) {
- CASE_OP_32_64(and):
- CASE_OP_32_64(mul):
+ CASE_OP_32_64_VEC(and):
+ CASE_OP_32_64_VEC(mul):
CASE_OP_32_64(muluh):
CASE_OP_32_64(mulsh):
- if ((temp_is_const(op->args[2]) && temps[op->args[2]].val == 0)) {
+ if (arg_is_const(op->args[2])
+ && arg_info(op->args[2])->val == 0) {
tcg_opt_gen_movi(s, op, op->args[0], 0);
continue;
}
/* Simplify expression for "op r, a, a => mov r, a" cases */
switch (opc) {
- CASE_OP_32_64(or):
- CASE_OP_32_64(and):
- if (temps_are_copies(op->args[1], op->args[2])) {
+ CASE_OP_32_64_VEC(or):
+ CASE_OP_32_64_VEC(and):
+ if (args_are_copies(op->args[1], op->args[2])) {
tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
continue;
}
/* Simplify expression for "op r, a, a => movi r, 0" cases */
switch (opc) {
- CASE_OP_32_64(andc):
- CASE_OP_32_64(sub):
- CASE_OP_32_64(xor):
- if (temps_are_copies(op->args[1], op->args[2])) {
+ CASE_OP_32_64_VEC(andc):
+ CASE_OP_32_64_VEC(sub):
+ CASE_OP_32_64_VEC(xor):
+ if (args_are_copies(op->args[1], op->args[2])) {
tcg_opt_gen_movi(s, op, op->args[0], 0);
continue;
}
folding. Constants will be substituted to arguments by register
allocator where needed and possible. Also detect copies. */
switch (opc) {
- CASE_OP_32_64(mov):
+ CASE_OP_32_64_VEC(mov):
tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
break;
CASE_OP_32_64(movi):
+ case INDEX_op_dupi_vec:
tcg_opt_gen_movi(s, op, op->args[0], op->args[1]);
break;
+ case INDEX_op_dup_vec:
+ if (arg_is_const(op->args[1])) {
+ tmp = arg_info(op->args[1])->val;
+ tmp = dup_const(TCGOP_VECE(op), tmp);
+ tcg_opt_gen_movi(s, op, op->args[0], tmp);
+ break;
+ }
+ goto do_default;
+
CASE_OP_32_64(not):
CASE_OP_32_64(neg):
CASE_OP_32_64(ext8s):
CASE_OP_32_64(ext16s):
CASE_OP_32_64(ext16u):
CASE_OP_32_64(ctpop):
+ CASE_OP_32_64(bswap16):
+ CASE_OP_32_64(bswap32):
+ case INDEX_op_bswap64_i64:
case INDEX_op_ext32s_i64:
case INDEX_op_ext32u_i64:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
case INDEX_op_extrl_i64_i32:
case INDEX_op_extrh_i64_i32:
- if (temp_is_const(op->args[1])) {
- tmp = do_constant_folding(opc, temps[op->args[1]].val, 0);
+ if (arg_is_const(op->args[1])) {
+ tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
tcg_opt_gen_movi(s, op, op->args[0], tmp);
break;
}
CASE_OP_32_64(divu):
CASE_OP_32_64(rem):
CASE_OP_32_64(remu):
- if (temp_is_const(op->args[1]) && temp_is_const(op->args[2])) {
- tmp = do_constant_folding(opc, temps[op->args[1]].val,
- temps[op->args[2]].val);
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
+ tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
+ arg_info(op->args[2])->val);
tcg_opt_gen_movi(s, op, op->args[0], tmp);
break;
}
CASE_OP_32_64(clz):
CASE_OP_32_64(ctz):
- if (temp_is_const(op->args[1])) {
- TCGArg v = temps[op->args[1]].val;
+ if (arg_is_const(op->args[1])) {
+ TCGArg v = arg_info(op->args[1])->val;
if (v != 0) {
tmp = do_constant_folding(opc, v, 0);
tcg_opt_gen_movi(s, op, op->args[0], tmp);
goto do_default;
CASE_OP_32_64(deposit):
- if (temp_is_const(op->args[1]) && temp_is_const(op->args[2])) {
- tmp = deposit64(temps[op->args[1]].val, op->args[3],
- op->args[4], temps[op->args[2]].val);
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
+ tmp = deposit64(arg_info(op->args[1])->val,
+ op->args[3], op->args[4],
+ arg_info(op->args[2])->val);
tcg_opt_gen_movi(s, op, op->args[0], tmp);
break;
}
goto do_default;
CASE_OP_32_64(extract):
- if (temp_is_const(op->args[1])) {
- tmp = extract64(temps[op->args[1]].val,
+ if (arg_is_const(op->args[1])) {
+ tmp = extract64(arg_info(op->args[1])->val,
op->args[2], op->args[3]);
tcg_opt_gen_movi(s, op, op->args[0], tmp);
break;
goto do_default;
CASE_OP_32_64(sextract):
- if (temp_is_const(op->args[1])) {
- tmp = sextract64(temps[op->args[1]].val,
+ if (arg_is_const(op->args[1])) {
+ tmp = sextract64(arg_info(op->args[1])->val,
op->args[2], op->args[3]);
tcg_opt_gen_movi(s, op, op->args[0], tmp);
break;
}
goto do_default;
+ CASE_OP_32_64(extract2):
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
+ TCGArg v1 = arg_info(op->args[1])->val;
+ TCGArg v2 = arg_info(op->args[2])->val;
+
+ if (opc == INDEX_op_extract2_i64) {
+ tmp = (v1 >> op->args[3]) | (v2 << (64 - op->args[3]));
+ } else {
+ tmp = (int32_t)(((uint32_t)v1 >> op->args[3]) |
+ ((uint32_t)v2 << (32 - op->args[3])));
+ }
+ tcg_opt_gen_movi(s, op, op->args[0], tmp);
+ break;
+ }
+ goto do_default;
+
CASE_OP_32_64(setcond):
tmp = do_constant_folding_cond(opc, op->args[1],
op->args[2], op->args[3]);
op->args[1], op->args[2]);
if (tmp != 2) {
if (tmp) {
- reset_all_temps(nb_temps);
+ bitmap_zero(temps_used.l, nb_temps);
op->opc = INDEX_op_br;
op->args[0] = op->args[3];
} else {
tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]);
break;
}
- if (temp_is_const(op->args[3]) && temp_is_const(op->args[4])) {
- tcg_target_ulong tv = temps[op->args[3]].val;
- tcg_target_ulong fv = temps[op->args[4]].val;
+ if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
+ tcg_target_ulong tv = arg_info(op->args[3])->val;
+ tcg_target_ulong fv = arg_info(op->args[4])->val;
TCGCond cond = op->args[5];
if (fv == 1 && tv == 0) {
cond = tcg_invert_cond(cond);
case INDEX_op_add2_i32:
case INDEX_op_sub2_i32:
- if (temp_is_const(op->args[2]) && temp_is_const(op->args[3])
- && temp_is_const(op->args[4]) && temp_is_const(op->args[5])) {
- uint32_t al = temps[op->args[2]].val;
- uint32_t ah = temps[op->args[3]].val;
- uint32_t bl = temps[op->args[4]].val;
- uint32_t bh = temps[op->args[5]].val;
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])
+ && arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
+ uint32_t al = arg_info(op->args[2])->val;
+ uint32_t ah = arg_info(op->args[3])->val;
+ uint32_t bl = arg_info(op->args[4])->val;
+ uint32_t bh = arg_info(op->args[5])->val;
uint64_t a = ((uint64_t)ah << 32) | al;
uint64_t b = ((uint64_t)bh << 32) | bl;
TCGArg rl, rh;
- TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_movi_i32, 2);
+ TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_movi_i32);
if (opc == INDEX_op_add2_i32) {
a += b;
rh = op->args[1];
tcg_opt_gen_movi(s, op, rl, (int32_t)a);
tcg_opt_gen_movi(s, op2, rh, (int32_t)(a >> 32));
-
- /* We've done all we need to do with the movi. Skip it. */
- oi_next = op2->next;
break;
}
goto do_default;
case INDEX_op_mulu2_i32:
- if (temp_is_const(op->args[2]) && temp_is_const(op->args[3])) {
- uint32_t a = temps[op->args[2]].val;
- uint32_t b = temps[op->args[3]].val;
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
+ uint32_t a = arg_info(op->args[2])->val;
+ uint32_t b = arg_info(op->args[3])->val;
uint64_t r = (uint64_t)a * b;
TCGArg rl, rh;
- TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_movi_i32, 2);
+ TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_movi_i32);
rl = op->args[0];
rh = op->args[1];
tcg_opt_gen_movi(s, op, rl, (int32_t)r);
tcg_opt_gen_movi(s, op2, rh, (int32_t)(r >> 32));
-
- /* We've done all we need to do with the movi. Skip it. */
- oi_next = op2->next;
break;
}
goto do_default;
if (tmp != 2) {
if (tmp) {
do_brcond_true:
- reset_all_temps(nb_temps);
+ bitmap_zero(temps_used.l, nb_temps);
op->opc = INDEX_op_br;
op->args[0] = op->args[5];
} else {
}
} else if ((op->args[4] == TCG_COND_LT
|| op->args[4] == TCG_COND_GE)
- && temp_is_const(op->args[2])
- && temps[op->args[2]].val == 0
- && temp_is_const(op->args[3])
- && temps[op->args[3]].val == 0) {
+ && arg_is_const(op->args[2])
+ && arg_info(op->args[2])->val == 0
+ && arg_is_const(op->args[3])
+ && arg_info(op->args[3])->val == 0) {
/* Simplify LT/GE comparisons vs zero to a single compare
vs the high word of the input. */
do_brcond_high:
- reset_all_temps(nb_temps);
+ bitmap_zero(temps_used.l, nb_temps);
op->opc = INDEX_op_brcond_i32;
op->args[0] = op->args[1];
op->args[1] = op->args[3];
goto do_default;
}
do_brcond_low:
- reset_all_temps(nb_temps);
+ bitmap_zero(temps_used.l, nb_temps);
op->opc = INDEX_op_brcond_i32;
op->args[1] = op->args[2];
op->args[2] = op->args[4];
tcg_opt_gen_movi(s, op, op->args[0], tmp);
} else if ((op->args[5] == TCG_COND_LT
|| op->args[5] == TCG_COND_GE)
- && temp_is_const(op->args[3])
- && temps[op->args[3]].val == 0
- && temp_is_const(op->args[4])
- && temps[op->args[4]].val == 0) {
+ && arg_is_const(op->args[3])
+ && arg_info(op->args[3])->val == 0
+ && arg_is_const(op->args[4])
+ && arg_info(op->args[4])->val == 0) {
/* Simplify LT/GE comparisons vs zero to a single compare
vs the high word of the input. */
do_setcond_high:
reset_temp(op->args[0]);
- temps[op->args[0]].mask = 1;
+ arg_info(op->args[0])->mask = 1;
op->opc = INDEX_op_setcond_i32;
op->args[1] = op->args[2];
op->args[2] = op->args[4];
}
do_setcond_low:
reset_temp(op->args[0]);
- temps[op->args[0]].mask = 1;
+ arg_info(op->args[0])->mask = 1;
op->opc = INDEX_op_setcond_i32;
op->args[2] = op->args[3];
op->args[3] = op->args[5];
& (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
for (i = 0; i < nb_globals; i++) {
if (test_bit(i, temps_used.l)) {
- reset_temp(i);
+ reset_ts(&s->temps[i]);
}
}
}
block, otherwise we only trash the output args. "mask" is
the non-zero bits mask for the first output arg. */
if (def->flags & TCG_OPF_BB_END) {
- reset_all_temps(nb_temps);
+ bitmap_zero(temps_used.l, nb_temps);
} else {
do_reset_output:
for (i = 0; i < nb_oargs; i++) {
/* Save the corresponding known-zero bits mask for the
first output argument (only one supported so far). */
if (i == 0) {
- temps[op->args[i]].mask = mask;
+ arg_info(op->args[i])->mask = mask;
}
}
}