2 * Optimizations for Tiny Code Generator for QEMU
4 * Copyright (c) 2010 Samsung Electronics.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "qemu-common.h"
28 #include "exec/cpu-common.h"
31 #define CASE_OP_32_64(x) \
32 glue(glue(case INDEX_op_, x), _i32): \
33 glue(glue(case INDEX_op_, x), _i64)
35 #define CASE_OP_32_64_VEC(x) \
36 glue(glue(case INDEX_op_, x), _i32): \
37 glue(glue(case INDEX_op_, x), _i64): \
38 glue(glue(case INDEX_op_, x), _vec)
40 struct tcg_temp_info {
45 tcg_target_ulong mask;
48 static inline struct tcg_temp_info *ts_info(TCGTemp *ts)
53 static inline struct tcg_temp_info *arg_info(TCGArg arg)
55 return ts_info(arg_temp(arg));
58 static inline bool ts_is_const(TCGTemp *ts)
60 return ts_info(ts)->is_const;
63 static inline bool arg_is_const(TCGArg arg)
65 return ts_is_const(arg_temp(arg));
68 static inline bool ts_is_copy(TCGTemp *ts)
70 return ts_info(ts)->next_copy != ts;
73 /* Reset TEMP's state, possibly removing the temp for the list of copies. */
74 static void reset_ts(TCGTemp *ts)
76 struct tcg_temp_info *ti = ts_info(ts);
77 struct tcg_temp_info *pi = ts_info(ti->prev_copy);
78 struct tcg_temp_info *ni = ts_info(ti->next_copy);
80 ni->prev_copy = ti->prev_copy;
81 pi->next_copy = ti->next_copy;
88 static void reset_temp(TCGArg arg)
90 reset_ts(arg_temp(arg));
93 /* Initialize and activate a temporary. */
94 static void init_ts_info(struct tcg_temp_info *infos,
95 TCGTempSet *temps_used, TCGTemp *ts)
97 size_t idx = temp_idx(ts);
98 if (!test_bit(idx, temps_used->l)) {
99 struct tcg_temp_info *ti = &infos[idx];
104 ti->is_const = false;
106 set_bit(idx, temps_used->l);
110 static void init_arg_info(struct tcg_temp_info *infos,
111 TCGTempSet *temps_used, TCGArg arg)
113 init_ts_info(infos, temps_used, arg_temp(arg));
116 static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
120 /* If this is already a global, we can't do better. */
121 if (ts->temp_global) {
125 /* Search for a global first. */
126 for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
127 if (i->temp_global) {
132 /* If it is a temp, search for a temp local. */
133 if (!ts->temp_local) {
134 for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
135 if (ts->temp_local) {
141 /* Failure to find a better representation, return the same temp. */
145 static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2)
153 if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) {
157 for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) {
166 static bool args_are_copies(TCGArg arg1, TCGArg arg2)
168 return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
171 static void tcg_opt_gen_movi(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg val)
175 tcg_target_ulong mask;
176 struct tcg_temp_info *di = arg_info(dst);
178 def = &tcg_op_defs[op->opc];
179 if (def->flags & TCG_OPF_VECTOR) {
180 new_op = INDEX_op_dupi_vec;
181 } else if (def->flags & TCG_OPF_64BIT) {
182 new_op = INDEX_op_movi_i64;
184 new_op = INDEX_op_movi_i32;
187 /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
195 if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_movi_i32) {
196 /* High bits of the destination are now garbage. */
197 mask |= ~0xffffffffull;
202 static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
204 TCGTemp *dst_ts = arg_temp(dst);
205 TCGTemp *src_ts = arg_temp(src);
207 struct tcg_temp_info *di;
208 struct tcg_temp_info *si;
209 tcg_target_ulong mask;
212 if (ts_are_copies(dst_ts, src_ts)) {
213 tcg_op_remove(s, op);
218 di = ts_info(dst_ts);
219 si = ts_info(src_ts);
220 def = &tcg_op_defs[op->opc];
221 if (def->flags & TCG_OPF_VECTOR) {
222 new_op = INDEX_op_mov_vec;
223 } else if (def->flags & TCG_OPF_64BIT) {
224 new_op = INDEX_op_mov_i64;
226 new_op = INDEX_op_mov_i32;
229 /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
234 if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
235 /* High bits of the destination are now garbage. */
236 mask |= ~0xffffffffull;
240 if (src_ts->type == dst_ts->type) {
241 struct tcg_temp_info *ni = ts_info(si->next_copy);
243 di->next_copy = si->next_copy;
244 di->prev_copy = src_ts;
245 ni->prev_copy = dst_ts;
246 si->next_copy = dst_ts;
247 di->is_const = si->is_const;
252 static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
275 case INDEX_op_shl_i32:
276 return (uint32_t)x << (y & 31);
278 case INDEX_op_shl_i64:
279 return (uint64_t)x << (y & 63);
281 case INDEX_op_shr_i32:
282 return (uint32_t)x >> (y & 31);
284 case INDEX_op_shr_i64:
285 return (uint64_t)x >> (y & 63);
287 case INDEX_op_sar_i32:
288 return (int32_t)x >> (y & 31);
290 case INDEX_op_sar_i64:
291 return (int64_t)x >> (y & 63);
293 case INDEX_op_rotr_i32:
294 return ror32(x, y & 31);
296 case INDEX_op_rotr_i64:
297 return ror64(x, y & 63);
299 case INDEX_op_rotl_i32:
300 return rol32(x, y & 31);
302 case INDEX_op_rotl_i64:
303 return rol64(x, y & 63);
326 case INDEX_op_clz_i32:
327 return (uint32_t)x ? clz32(x) : y;
329 case INDEX_op_clz_i64:
330 return x ? clz64(x) : y;
332 case INDEX_op_ctz_i32:
333 return (uint32_t)x ? ctz32(x) : y;
335 case INDEX_op_ctz_i64:
336 return x ? ctz64(x) : y;
338 case INDEX_op_ctpop_i32:
341 case INDEX_op_ctpop_i64:
344 CASE_OP_32_64(ext8s):
347 CASE_OP_32_64(ext16s):
350 CASE_OP_32_64(ext8u):
353 CASE_OP_32_64(ext16u):
356 CASE_OP_32_64(bswap16):
359 CASE_OP_32_64(bswap32):
362 case INDEX_op_bswap64_i64:
365 case INDEX_op_ext_i32_i64:
366 case INDEX_op_ext32s_i64:
369 case INDEX_op_extu_i32_i64:
370 case INDEX_op_extrl_i64_i32:
371 case INDEX_op_ext32u_i64:
374 case INDEX_op_extrh_i64_i32:
375 return (uint64_t)x >> 32;
377 case INDEX_op_muluh_i32:
378 return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
379 case INDEX_op_mulsh_i32:
380 return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
382 case INDEX_op_muluh_i64:
383 mulu64(&l64, &h64, x, y);
385 case INDEX_op_mulsh_i64:
386 muls64(&l64, &h64, x, y);
389 case INDEX_op_div_i32:
390 /* Avoid crashing on divide by zero, otherwise undefined. */
391 return (int32_t)x / ((int32_t)y ? : 1);
392 case INDEX_op_divu_i32:
393 return (uint32_t)x / ((uint32_t)y ? : 1);
394 case INDEX_op_div_i64:
395 return (int64_t)x / ((int64_t)y ? : 1);
396 case INDEX_op_divu_i64:
397 return (uint64_t)x / ((uint64_t)y ? : 1);
399 case INDEX_op_rem_i32:
400 return (int32_t)x % ((int32_t)y ? : 1);
401 case INDEX_op_remu_i32:
402 return (uint32_t)x % ((uint32_t)y ? : 1);
403 case INDEX_op_rem_i64:
404 return (int64_t)x % ((int64_t)y ? : 1);
405 case INDEX_op_remu_i64:
406 return (uint64_t)x % ((uint64_t)y ? : 1);
410 "Unrecognized operation %d in do_constant_folding.\n", op);
415 static TCGArg do_constant_folding(TCGOpcode op, TCGArg x, TCGArg y)
417 const TCGOpDef *def = &tcg_op_defs[op];
418 TCGArg res = do_constant_folding_2(op, x, y);
419 if (!(def->flags & TCG_OPF_64BIT)) {
425 static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
433 return (int32_t)x < (int32_t)y;
435 return (int32_t)x >= (int32_t)y;
437 return (int32_t)x <= (int32_t)y;
439 return (int32_t)x > (int32_t)y;
453 static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
461 return (int64_t)x < (int64_t)y;
463 return (int64_t)x >= (int64_t)y;
465 return (int64_t)x <= (int64_t)y;
467 return (int64_t)x > (int64_t)y;
481 static bool do_constant_folding_cond_eq(TCGCond c)
501 /* Return 2 if the condition can't be simplified, and the result
502 of the condition (0 or 1) if it can */
503 static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
506 tcg_target_ulong xv = arg_info(x)->val;
507 tcg_target_ulong yv = arg_info(y)->val;
508 if (arg_is_const(x) && arg_is_const(y)) {
509 const TCGOpDef *def = &tcg_op_defs[op];
510 tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR));
511 if (def->flags & TCG_OPF_64BIT) {
512 return do_constant_folding_cond_64(xv, yv, c);
514 return do_constant_folding_cond_32(xv, yv, c);
516 } else if (args_are_copies(x, y)) {
517 return do_constant_folding_cond_eq(c);
518 } else if (arg_is_const(y) && yv == 0) {
531 /* Return 2 if the condition can't be simplified, and the result
532 of the condition (0 or 1) if it can */
533 static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
535 TCGArg al = p1[0], ah = p1[1];
536 TCGArg bl = p2[0], bh = p2[1];
538 if (arg_is_const(bl) && arg_is_const(bh)) {
539 tcg_target_ulong blv = arg_info(bl)->val;
540 tcg_target_ulong bhv = arg_info(bh)->val;
541 uint64_t b = deposit64(blv, 32, 32, bhv);
543 if (arg_is_const(al) && arg_is_const(ah)) {
544 tcg_target_ulong alv = arg_info(al)->val;
545 tcg_target_ulong ahv = arg_info(ah)->val;
546 uint64_t a = deposit64(alv, 32, 32, ahv);
547 return do_constant_folding_cond_64(a, b, c);
560 if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
561 return do_constant_folding_cond_eq(c);
566 static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
568 TCGArg a1 = *p1, a2 = *p2;
570 sum += arg_is_const(a1);
571 sum -= arg_is_const(a2);
573 /* Prefer the constant in second argument, and then the form
574 op a, a, b, which is better handled on non-RISC hosts. */
575 if (sum > 0 || (sum == 0 && dest == a2)) {
583 static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
586 sum += arg_is_const(p1[0]);
587 sum += arg_is_const(p1[1]);
588 sum -= arg_is_const(p2[0]);
589 sum -= arg_is_const(p2[1]);
592 t = p1[0], p1[0] = p2[0], p2[0] = t;
593 t = p1[1], p1[1] = p2[1], p2[1] = t;
599 /* Propagate constants and copies, fold constant expressions. */
600 void tcg_optimize(TCGContext *s)
602 int nb_temps, nb_globals;
603 TCGOp *op, *op_next, *prev_mb = NULL;
604 struct tcg_temp_info *infos;
605 TCGTempSet temps_used;
607 /* Array VALS has an element for each temp.
608 If this temp holds a constant then its value is kept in VALS' element.
609 If this temp is a copy of other ones then the other copies are
610 available through the doubly linked circular list. */
612 nb_temps = s->nb_temps;
613 nb_globals = s->nb_globals;
614 bitmap_zero(temps_used.l, nb_temps);
615 infos = tcg_malloc(sizeof(struct tcg_temp_info) * nb_temps);
617 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
618 tcg_target_ulong mask, partmask, affected;
619 int nb_oargs, nb_iargs, i;
621 TCGOpcode opc = op->opc;
622 const TCGOpDef *def = &tcg_op_defs[opc];
624 /* Count the arguments, and initialize the temps that are
626 if (opc == INDEX_op_call) {
627 nb_oargs = TCGOP_CALLO(op);
628 nb_iargs = TCGOP_CALLI(op);
629 for (i = 0; i < nb_oargs + nb_iargs; i++) {
630 TCGTemp *ts = arg_temp(op->args[i]);
632 init_ts_info(infos, &temps_used, ts);
636 nb_oargs = def->nb_oargs;
637 nb_iargs = def->nb_iargs;
638 for (i = 0; i < nb_oargs + nb_iargs; i++) {
639 init_arg_info(infos, &temps_used, op->args[i]);
643 /* Do copy propagation */
644 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
645 TCGTemp *ts = arg_temp(op->args[i]);
646 if (ts && ts_is_copy(ts)) {
647 op->args[i] = temp_arg(find_better_copy(s, ts));
651 /* For commutative operations make constant second argument */
653 CASE_OP_32_64_VEC(add):
654 CASE_OP_32_64_VEC(mul):
655 CASE_OP_32_64_VEC(and):
656 CASE_OP_32_64_VEC(or):
657 CASE_OP_32_64_VEC(xor):
661 CASE_OP_32_64(muluh):
662 CASE_OP_32_64(mulsh):
663 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
665 CASE_OP_32_64(brcond):
666 if (swap_commutative(-1, &op->args[0], &op->args[1])) {
667 op->args[2] = tcg_swap_cond(op->args[2]);
670 CASE_OP_32_64(setcond):
671 if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
672 op->args[3] = tcg_swap_cond(op->args[3]);
675 CASE_OP_32_64(movcond):
676 if (swap_commutative(-1, &op->args[1], &op->args[2])) {
677 op->args[5] = tcg_swap_cond(op->args[5]);
679 /* For movcond, we canonicalize the "false" input reg to match
680 the destination reg so that the tcg backend can implement
681 a "move if true" operation. */
682 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
683 op->args[5] = tcg_invert_cond(op->args[5]);
687 swap_commutative(op->args[0], &op->args[2], &op->args[4]);
688 swap_commutative(op->args[1], &op->args[3], &op->args[5]);
690 CASE_OP_32_64(mulu2):
691 CASE_OP_32_64(muls2):
692 swap_commutative(op->args[0], &op->args[2], &op->args[3]);
694 case INDEX_op_brcond2_i32:
695 if (swap_commutative2(&op->args[0], &op->args[2])) {
696 op->args[4] = tcg_swap_cond(op->args[4]);
699 case INDEX_op_setcond2_i32:
700 if (swap_commutative2(&op->args[1], &op->args[3])) {
701 op->args[5] = tcg_swap_cond(op->args[5]);
708 /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
709 and "sub r, 0, a => neg r, a" case. */
716 if (arg_is_const(op->args[1])
717 && arg_info(op->args[1])->val == 0) {
718 tcg_opt_gen_movi(s, op, op->args[0], 0);
722 CASE_OP_32_64_VEC(sub):
727 if (arg_is_const(op->args[2])) {
728 /* Proceed with possible constant folding. */
731 if (opc == INDEX_op_sub_i32) {
732 neg_op = INDEX_op_neg_i32;
733 have_neg = TCG_TARGET_HAS_neg_i32;
734 } else if (opc == INDEX_op_sub_i64) {
735 neg_op = INDEX_op_neg_i64;
736 have_neg = TCG_TARGET_HAS_neg_i64;
737 } else if (TCG_TARGET_HAS_neg_vec) {
738 TCGType type = TCGOP_VECL(op) + TCG_TYPE_V64;
739 unsigned vece = TCGOP_VECE(op);
740 neg_op = INDEX_op_neg_vec;
741 have_neg = tcg_can_emit_vec_op(neg_op, type, vece) > 0;
748 if (arg_is_const(op->args[1])
749 && arg_info(op->args[1])->val == 0) {
751 reset_temp(op->args[0]);
752 op->args[1] = op->args[2];
757 CASE_OP_32_64_VEC(xor):
759 if (!arg_is_const(op->args[1])
760 && arg_is_const(op->args[2])
761 && arg_info(op->args[2])->val == -1) {
767 if (!arg_is_const(op->args[1])
768 && arg_is_const(op->args[2])
769 && arg_info(op->args[2])->val == 0) {
774 CASE_OP_32_64_VEC(andc):
775 if (!arg_is_const(op->args[2])
776 && arg_is_const(op->args[1])
777 && arg_info(op->args[1])->val == -1) {
782 CASE_OP_32_64_VEC(orc):
784 if (!arg_is_const(op->args[2])
785 && arg_is_const(op->args[1])
786 && arg_info(op->args[1])->val == 0) {
796 if (def->flags & TCG_OPF_VECTOR) {
797 not_op = INDEX_op_not_vec;
798 have_not = TCG_TARGET_HAS_not_vec;
799 } else if (def->flags & TCG_OPF_64BIT) {
800 not_op = INDEX_op_not_i64;
801 have_not = TCG_TARGET_HAS_not_i64;
803 not_op = INDEX_op_not_i32;
804 have_not = TCG_TARGET_HAS_not_i32;
810 reset_temp(op->args[0]);
811 op->args[1] = op->args[i];
818 /* Simplify expression for "op r, a, const => mov r, a" cases */
820 CASE_OP_32_64_VEC(add):
821 CASE_OP_32_64_VEC(sub):
822 CASE_OP_32_64_VEC(or):
823 CASE_OP_32_64_VEC(xor):
824 CASE_OP_32_64_VEC(andc):
830 if (!arg_is_const(op->args[1])
831 && arg_is_const(op->args[2])
832 && arg_info(op->args[2])->val == 0) {
833 tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
837 CASE_OP_32_64_VEC(and):
838 CASE_OP_32_64_VEC(orc):
840 if (!arg_is_const(op->args[1])
841 && arg_is_const(op->args[2])
842 && arg_info(op->args[2])->val == -1) {
843 tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
851 /* Simplify using known-zero bits. Currently only ops with a single
852 output argument is supported. */
856 CASE_OP_32_64(ext8s):
857 if ((arg_info(op->args[1])->mask & 0x80) != 0) {
860 CASE_OP_32_64(ext8u):
863 CASE_OP_32_64(ext16s):
864 if ((arg_info(op->args[1])->mask & 0x8000) != 0) {
867 CASE_OP_32_64(ext16u):
870 case INDEX_op_ext32s_i64:
871 if ((arg_info(op->args[1])->mask & 0x80000000) != 0) {
874 case INDEX_op_ext32u_i64:
879 mask = arg_info(op->args[2])->mask;
880 if (arg_is_const(op->args[2])) {
882 affected = arg_info(op->args[1])->mask & ~mask;
884 mask = arg_info(op->args[1])->mask & mask;
887 case INDEX_op_ext_i32_i64:
888 if ((arg_info(op->args[1])->mask & 0x80000000) != 0) {
891 case INDEX_op_extu_i32_i64:
892 /* We do not compute affected as it is a size changing op. */
893 mask = (uint32_t)arg_info(op->args[1])->mask;
897 /* Known-zeros does not imply known-ones. Therefore unless
898 op->args[2] is constant, we can't infer anything from it. */
899 if (arg_is_const(op->args[2])) {
900 mask = ~arg_info(op->args[2])->mask;
903 /* But we certainly know nothing outside args[1] may be set. */
904 mask = arg_info(op->args[1])->mask;
907 case INDEX_op_sar_i32:
908 if (arg_is_const(op->args[2])) {
909 tmp = arg_info(op->args[2])->val & 31;
910 mask = (int32_t)arg_info(op->args[1])->mask >> tmp;
913 case INDEX_op_sar_i64:
914 if (arg_is_const(op->args[2])) {
915 tmp = arg_info(op->args[2])->val & 63;
916 mask = (int64_t)arg_info(op->args[1])->mask >> tmp;
920 case INDEX_op_shr_i32:
921 if (arg_is_const(op->args[2])) {
922 tmp = arg_info(op->args[2])->val & 31;
923 mask = (uint32_t)arg_info(op->args[1])->mask >> tmp;
926 case INDEX_op_shr_i64:
927 if (arg_is_const(op->args[2])) {
928 tmp = arg_info(op->args[2])->val & 63;
929 mask = (uint64_t)arg_info(op->args[1])->mask >> tmp;
933 case INDEX_op_extrl_i64_i32:
934 mask = (uint32_t)arg_info(op->args[1])->mask;
936 case INDEX_op_extrh_i64_i32:
937 mask = (uint64_t)arg_info(op->args[1])->mask >> 32;
941 if (arg_is_const(op->args[2])) {
942 tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1);
943 mask = arg_info(op->args[1])->mask << tmp;
948 /* Set to 1 all bits to the left of the rightmost. */
949 mask = -(arg_info(op->args[1])->mask
950 & -arg_info(op->args[1])->mask);
953 CASE_OP_32_64(deposit):
954 mask = deposit64(arg_info(op->args[1])->mask,
955 op->args[3], op->args[4],
956 arg_info(op->args[2])->mask);
959 CASE_OP_32_64(extract):
960 mask = extract64(arg_info(op->args[1])->mask,
961 op->args[2], op->args[3]);
962 if (op->args[2] == 0) {
963 affected = arg_info(op->args[1])->mask & ~mask;
966 CASE_OP_32_64(sextract):
967 mask = sextract64(arg_info(op->args[1])->mask,
968 op->args[2], op->args[3]);
969 if (op->args[2] == 0 && (tcg_target_long)mask >= 0) {
970 affected = arg_info(op->args[1])->mask & ~mask;
976 mask = arg_info(op->args[1])->mask | arg_info(op->args[2])->mask;
979 case INDEX_op_clz_i32:
980 case INDEX_op_ctz_i32:
981 mask = arg_info(op->args[2])->mask | 31;
984 case INDEX_op_clz_i64:
985 case INDEX_op_ctz_i64:
986 mask = arg_info(op->args[2])->mask | 63;
989 case INDEX_op_ctpop_i32:
992 case INDEX_op_ctpop_i64:
996 CASE_OP_32_64(setcond):
997 case INDEX_op_setcond2_i32:
1001 CASE_OP_32_64(movcond):
1002 mask = arg_info(op->args[3])->mask | arg_info(op->args[4])->mask;
1005 CASE_OP_32_64(ld8u):
1008 CASE_OP_32_64(ld16u):
1011 case INDEX_op_ld32u_i64:
1015 CASE_OP_32_64(qemu_ld):
1017 TCGMemOpIdx oi = op->args[nb_oargs + nb_iargs];
1018 TCGMemOp mop = get_memop(oi);
1019 if (!(mop & MO_SIGN)) {
1020 mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
1029 /* 32-bit ops generate 32-bit results. For the result is zero test
1030 below, we can ignore high bits, but for further optimizations we
1031 need to record that the high bits contain garbage. */
1033 if (!(def->flags & TCG_OPF_64BIT)) {
1034 mask |= ~(tcg_target_ulong)0xffffffffu;
1035 partmask &= 0xffffffffu;
1036 affected &= 0xffffffffu;
1039 if (partmask == 0) {
1040 tcg_debug_assert(nb_oargs == 1);
1041 tcg_opt_gen_movi(s, op, op->args[0], 0);
1044 if (affected == 0) {
1045 tcg_debug_assert(nb_oargs == 1);
1046 tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
1050 /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
1052 CASE_OP_32_64_VEC(and):
1053 CASE_OP_32_64_VEC(mul):
1054 CASE_OP_32_64(muluh):
1055 CASE_OP_32_64(mulsh):
1056 if (arg_is_const(op->args[2])
1057 && arg_info(op->args[2])->val == 0) {
1058 tcg_opt_gen_movi(s, op, op->args[0], 0);
1066 /* Simplify expression for "op r, a, a => mov r, a" cases */
1068 CASE_OP_32_64_VEC(or):
1069 CASE_OP_32_64_VEC(and):
1070 if (args_are_copies(op->args[1], op->args[2])) {
1071 tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
1079 /* Simplify expression for "op r, a, a => movi r, 0" cases */
1081 CASE_OP_32_64_VEC(andc):
1082 CASE_OP_32_64_VEC(sub):
1083 CASE_OP_32_64_VEC(xor):
1084 if (args_are_copies(op->args[1], op->args[2])) {
1085 tcg_opt_gen_movi(s, op, op->args[0], 0);
1093 /* Propagate constants through copy operations and do constant
1094 folding. Constants will be substituted to arguments by register
1095 allocator where needed and possible. Also detect copies. */
1097 CASE_OP_32_64_VEC(mov):
1098 tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
1100 CASE_OP_32_64(movi):
1101 case INDEX_op_dupi_vec:
1102 tcg_opt_gen_movi(s, op, op->args[0], op->args[1]);
1105 case INDEX_op_dup_vec:
1106 if (arg_is_const(op->args[1])) {
1107 tmp = arg_info(op->args[1])->val;
1108 tmp = dup_const(TCGOP_VECE(op), tmp);
1109 tcg_opt_gen_movi(s, op, op->args[0], tmp);
1116 CASE_OP_32_64(ext8s):
1117 CASE_OP_32_64(ext8u):
1118 CASE_OP_32_64(ext16s):
1119 CASE_OP_32_64(ext16u):
1120 CASE_OP_32_64(ctpop):
1121 CASE_OP_32_64(bswap16):
1122 CASE_OP_32_64(bswap32):
1123 case INDEX_op_bswap64_i64:
1124 case INDEX_op_ext32s_i64:
1125 case INDEX_op_ext32u_i64:
1126 case INDEX_op_ext_i32_i64:
1127 case INDEX_op_extu_i32_i64:
1128 case INDEX_op_extrl_i64_i32:
1129 case INDEX_op_extrh_i64_i32:
1130 if (arg_is_const(op->args[1])) {
1131 tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
1132 tcg_opt_gen_movi(s, op, op->args[0], tmp);
1146 CASE_OP_32_64(rotl):
1147 CASE_OP_32_64(rotr):
1148 CASE_OP_32_64(andc):
1151 CASE_OP_32_64(nand):
1153 CASE_OP_32_64(muluh):
1154 CASE_OP_32_64(mulsh):
1156 CASE_OP_32_64(divu):
1158 CASE_OP_32_64(remu):
1159 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1160 tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
1161 arg_info(op->args[2])->val);
1162 tcg_opt_gen_movi(s, op, op->args[0], tmp);
1169 if (arg_is_const(op->args[1])) {
1170 TCGArg v = arg_info(op->args[1])->val;
1172 tmp = do_constant_folding(opc, v, 0);
1173 tcg_opt_gen_movi(s, op, op->args[0], tmp);
1175 tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
1181 CASE_OP_32_64(deposit):
1182 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1183 tmp = deposit64(arg_info(op->args[1])->val,
1184 op->args[3], op->args[4],
1185 arg_info(op->args[2])->val);
1186 tcg_opt_gen_movi(s, op, op->args[0], tmp);
1191 CASE_OP_32_64(extract):
1192 if (arg_is_const(op->args[1])) {
1193 tmp = extract64(arg_info(op->args[1])->val,
1194 op->args[2], op->args[3]);
1195 tcg_opt_gen_movi(s, op, op->args[0], tmp);
1200 CASE_OP_32_64(sextract):
1201 if (arg_is_const(op->args[1])) {
1202 tmp = sextract64(arg_info(op->args[1])->val,
1203 op->args[2], op->args[3]);
1204 tcg_opt_gen_movi(s, op, op->args[0], tmp);
1209 CASE_OP_32_64(extract2):
1210 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1211 TCGArg v1 = arg_info(op->args[1])->val;
1212 TCGArg v2 = arg_info(op->args[2])->val;
1214 if (opc == INDEX_op_extract2_i64) {
1215 tmp = (v1 >> op->args[3]) | (v2 << (64 - op->args[3]));
1217 tmp = (v1 >> op->args[3]) | (v2 << (32 - op->args[3]));
1220 tcg_opt_gen_movi(s, op, op->args[0], tmp);
1225 CASE_OP_32_64(setcond):
1226 tmp = do_constant_folding_cond(opc, op->args[1],
1227 op->args[2], op->args[3]);
1229 tcg_opt_gen_movi(s, op, op->args[0], tmp);
1234 CASE_OP_32_64(brcond):
1235 tmp = do_constant_folding_cond(opc, op->args[0],
1236 op->args[1], op->args[2]);
1239 bitmap_zero(temps_used.l, nb_temps);
1240 op->opc = INDEX_op_br;
1241 op->args[0] = op->args[3];
1243 tcg_op_remove(s, op);
1249 CASE_OP_32_64(movcond):
1250 tmp = do_constant_folding_cond(opc, op->args[1],
1251 op->args[2], op->args[5]);
1253 tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]);
1256 if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
1257 tcg_target_ulong tv = arg_info(op->args[3])->val;
1258 tcg_target_ulong fv = arg_info(op->args[4])->val;
1259 TCGCond cond = op->args[5];
1260 if (fv == 1 && tv == 0) {
1261 cond = tcg_invert_cond(cond);
1262 } else if (!(tv == 1 && fv == 0)) {
1266 op->opc = opc = (opc == INDEX_op_movcond_i32
1267 ? INDEX_op_setcond_i32
1268 : INDEX_op_setcond_i64);
1273 case INDEX_op_add2_i32:
1274 case INDEX_op_sub2_i32:
1275 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])
1276 && arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
1277 uint32_t al = arg_info(op->args[2])->val;
1278 uint32_t ah = arg_info(op->args[3])->val;
1279 uint32_t bl = arg_info(op->args[4])->val;
1280 uint32_t bh = arg_info(op->args[5])->val;
1281 uint64_t a = ((uint64_t)ah << 32) | al;
1282 uint64_t b = ((uint64_t)bh << 32) | bl;
1284 TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_movi_i32);
1286 if (opc == INDEX_op_add2_i32) {
1294 tcg_opt_gen_movi(s, op, rl, (int32_t)a);
1295 tcg_opt_gen_movi(s, op2, rh, (int32_t)(a >> 32));
1300 case INDEX_op_mulu2_i32:
1301 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
1302 uint32_t a = arg_info(op->args[2])->val;
1303 uint32_t b = arg_info(op->args[3])->val;
1304 uint64_t r = (uint64_t)a * b;
1306 TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_movi_i32);
1310 tcg_opt_gen_movi(s, op, rl, (int32_t)r);
1311 tcg_opt_gen_movi(s, op2, rh, (int32_t)(r >> 32));
1316 case INDEX_op_brcond2_i32:
1317 tmp = do_constant_folding_cond2(&op->args[0], &op->args[2],
1322 bitmap_zero(temps_used.l, nb_temps);
1323 op->opc = INDEX_op_br;
1324 op->args[0] = op->args[5];
1327 tcg_op_remove(s, op);
1329 } else if ((op->args[4] == TCG_COND_LT
1330 || op->args[4] == TCG_COND_GE)
1331 && arg_is_const(op->args[2])
1332 && arg_info(op->args[2])->val == 0
1333 && arg_is_const(op->args[3])
1334 && arg_info(op->args[3])->val == 0) {
1335 /* Simplify LT/GE comparisons vs zero to a single compare
1336 vs the high word of the input. */
1338 bitmap_zero(temps_used.l, nb_temps);
1339 op->opc = INDEX_op_brcond_i32;
1340 op->args[0] = op->args[1];
1341 op->args[1] = op->args[3];
1342 op->args[2] = op->args[4];
1343 op->args[3] = op->args[5];
1344 } else if (op->args[4] == TCG_COND_EQ) {
1345 /* Simplify EQ comparisons where one of the pairs
1346 can be simplified. */
1347 tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
1348 op->args[0], op->args[2],
1351 goto do_brcond_false;
1352 } else if (tmp == 1) {
1353 goto do_brcond_high;
1355 tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
1356 op->args[1], op->args[3],
1359 goto do_brcond_false;
1360 } else if (tmp != 1) {
1364 bitmap_zero(temps_used.l, nb_temps);
1365 op->opc = INDEX_op_brcond_i32;
1366 op->args[1] = op->args[2];
1367 op->args[2] = op->args[4];
1368 op->args[3] = op->args[5];
1369 } else if (op->args[4] == TCG_COND_NE) {
1370 /* Simplify NE comparisons where one of the pairs
1371 can be simplified. */
1372 tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
1373 op->args[0], op->args[2],
1376 goto do_brcond_high;
1377 } else if (tmp == 1) {
1378 goto do_brcond_true;
1380 tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
1381 op->args[1], op->args[3],
1385 } else if (tmp == 1) {
1386 goto do_brcond_true;
1394 case INDEX_op_setcond2_i32:
1395 tmp = do_constant_folding_cond2(&op->args[1], &op->args[3],
1399 tcg_opt_gen_movi(s, op, op->args[0], tmp);
1400 } else if ((op->args[5] == TCG_COND_LT
1401 || op->args[5] == TCG_COND_GE)
1402 && arg_is_const(op->args[3])
1403 && arg_info(op->args[3])->val == 0
1404 && arg_is_const(op->args[4])
1405 && arg_info(op->args[4])->val == 0) {
1406 /* Simplify LT/GE comparisons vs zero to a single compare
1407 vs the high word of the input. */
1409 reset_temp(op->args[0]);
1410 arg_info(op->args[0])->mask = 1;
1411 op->opc = INDEX_op_setcond_i32;
1412 op->args[1] = op->args[2];
1413 op->args[2] = op->args[4];
1414 op->args[3] = op->args[5];
1415 } else if (op->args[5] == TCG_COND_EQ) {
1416 /* Simplify EQ comparisons where one of the pairs
1417 can be simplified. */
1418 tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
1419 op->args[1], op->args[3],
1422 goto do_setcond_const;
1423 } else if (tmp == 1) {
1424 goto do_setcond_high;
1426 tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
1427 op->args[2], op->args[4],
1430 goto do_setcond_high;
1431 } else if (tmp != 1) {
1435 reset_temp(op->args[0]);
1436 arg_info(op->args[0])->mask = 1;
1437 op->opc = INDEX_op_setcond_i32;
1438 op->args[2] = op->args[3];
1439 op->args[3] = op->args[5];
1440 } else if (op->args[5] == TCG_COND_NE) {
1441 /* Simplify NE comparisons where one of the pairs
1442 can be simplified. */
1443 tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
1444 op->args[1], op->args[3],
1447 goto do_setcond_high;
1448 } else if (tmp == 1) {
1449 goto do_setcond_const;
1451 tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
1452 op->args[2], op->args[4],
1455 goto do_setcond_low;
1456 } else if (tmp == 1) {
1457 goto do_setcond_const;
1466 if (!(op->args[nb_oargs + nb_iargs + 1]
1467 & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
1468 for (i = 0; i < nb_globals; i++) {
1469 if (test_bit(i, temps_used.l)) {
1470 reset_ts(&s->temps[i]);
1474 goto do_reset_output;
1478 /* Default case: we know nothing about operation (or were unable
1479 to compute the operation result) so no propagation is done.
1480 We trash everything if the operation is the end of a basic
1481 block, otherwise we only trash the output args. "mask" is
1482 the non-zero bits mask for the first output arg. */
1483 if (def->flags & TCG_OPF_BB_END) {
1484 bitmap_zero(temps_used.l, nb_temps);
1487 for (i = 0; i < nb_oargs; i++) {
1488 reset_temp(op->args[i]);
1489 /* Save the corresponding known-zero bits mask for the
1490 first output argument (only one supported so far). */
1492 arg_info(op->args[i])->mask = mask;
1499 /* Eliminate duplicate and redundant fence instructions. */
1503 /* Merge two barriers of the same type into one,
1504 * or a weaker barrier into a stronger one,
1505 * or two weaker barriers into a stronger one.
1506 * mb X; mb Y => mb X|Y
1507 * mb; strl => mb; st
1508 * ldaq; mb => ld; mb
1509 * ldaq; strl => ld; mb; st
1510 * Other combinations are also merged into a strong
1511 * barrier. This is stricter than specified but for
1512 * the purposes of TCG is better than not optimizing.
1514 prev_mb->args[0] |= op->args[0];
1515 tcg_op_remove(s, op);
1519 /* Opcodes that end the block stop the optimization. */
1520 if ((def->flags & TCG_OPF_BB_END) == 0) {
1524 case INDEX_op_qemu_ld_i32:
1525 case INDEX_op_qemu_ld_i64:
1526 case INDEX_op_qemu_st_i32:
1527 case INDEX_op_qemu_st_i64:
1529 /* Opcodes that touch guest memory stop the optimization. */
1533 } else if (opc == INDEX_op_mb) {