2 * Optimizations for Tiny Code Generator for QEMU
4 * Copyright (c) 2010 Samsung Electronics.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
31 #include "qemu-common.h"
34 #define CASE_OP_32_64(x) \
35 glue(glue(case INDEX_op_, x), _i32): \
36 glue(glue(case INDEX_op_, x), _i64)
44 struct tcg_temp_info {
49 tcg_target_ulong mask;
52 static struct tcg_temp_info temps[TCG_MAX_TEMPS];
54 /* Reset TEMP's state to TCG_TEMP_UNDEF. If TEMP only had one copy, remove
55 the copy flag from the left temp. */
56 static void reset_temp(TCGArg temp)
58 if (temps[temp].state == TCG_TEMP_COPY) {
59 if (temps[temp].prev_copy == temps[temp].next_copy) {
60 temps[temps[temp].next_copy].state = TCG_TEMP_UNDEF;
62 temps[temps[temp].next_copy].prev_copy = temps[temp].prev_copy;
63 temps[temps[temp].prev_copy].next_copy = temps[temp].next_copy;
66 temps[temp].state = TCG_TEMP_UNDEF;
67 temps[temp].mask = -1;
70 /* Reset all temporaries, given that there are NB_TEMPS of them. */
71 static void reset_all_temps(int nb_temps)
74 for (i = 0; i < nb_temps; i++) {
75 temps[i].state = TCG_TEMP_UNDEF;
80 static int op_bits(TCGOpcode op)
82 const TCGOpDef *def = &tcg_op_defs[op];
83 return def->flags & TCG_OPF_64BIT ? 64 : 32;
86 static TCGOpcode op_to_mov(TCGOpcode op)
88 switch (op_bits(op)) {
90 return INDEX_op_mov_i32;
92 return INDEX_op_mov_i64;
94 fprintf(stderr, "op_to_mov: unexpected return value of "
95 "function op_bits.\n");
100 static TCGOpcode op_to_movi(TCGOpcode op)
102 switch (op_bits(op)) {
104 return INDEX_op_movi_i32;
106 return INDEX_op_movi_i64;
108 fprintf(stderr, "op_to_movi: unexpected return value of "
109 "function op_bits.\n");
114 static TCGArg find_better_copy(TCGContext *s, TCGArg temp)
118 /* If this is already a global, we can't do better. */
119 if (temp < s->nb_globals) {
123 /* Search for a global first. */
124 for (i = temps[temp].next_copy ; i != temp ; i = temps[i].next_copy) {
125 if (i < s->nb_globals) {
130 /* If it is a temp, search for a temp local. */
131 if (!s->temps[temp].temp_local) {
132 for (i = temps[temp].next_copy ; i != temp ; i = temps[i].next_copy) {
133 if (s->temps[i].temp_local) {
139 /* Failure to find a better representation, return the same temp. */
143 static bool temps_are_copies(TCGArg arg1, TCGArg arg2)
151 if (temps[arg1].state != TCG_TEMP_COPY
152 || temps[arg2].state != TCG_TEMP_COPY) {
156 for (i = temps[arg1].next_copy ; i != arg1 ; i = temps[i].next_copy) {
165 static void tcg_opt_gen_mov(TCGContext *s, int op_index, TCGArg *gen_args,
166 TCGOpcode old_op, TCGArg dst, TCGArg src)
168 TCGOpcode new_op = op_to_mov(old_op);
169 tcg_target_ulong mask;
171 s->gen_opc_buf[op_index] = new_op;
174 mask = temps[src].mask;
175 if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
176 /* High bits of the destination are now garbage. */
177 mask |= ~0xffffffffull;
179 temps[dst].mask = mask;
181 assert(temps[src].state != TCG_TEMP_CONST);
183 if (s->temps[src].type == s->temps[dst].type) {
184 if (temps[src].state != TCG_TEMP_COPY) {
185 temps[src].state = TCG_TEMP_COPY;
186 temps[src].next_copy = src;
187 temps[src].prev_copy = src;
189 temps[dst].state = TCG_TEMP_COPY;
190 temps[dst].next_copy = temps[src].next_copy;
191 temps[dst].prev_copy = src;
192 temps[temps[dst].next_copy].prev_copy = dst;
193 temps[src].next_copy = dst;
200 static void tcg_opt_gen_movi(TCGContext *s, int op_index, TCGArg *gen_args,
201 TCGOpcode old_op, TCGArg dst, TCGArg val)
203 TCGOpcode new_op = op_to_movi(old_op);
204 tcg_target_ulong mask;
206 s->gen_opc_buf[op_index] = new_op;
209 temps[dst].state = TCG_TEMP_CONST;
210 temps[dst].val = val;
212 if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
213 /* High bits of the destination are now garbage. */
214 mask |= ~0xffffffffull;
216 temps[dst].mask = mask;
222 static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
245 case INDEX_op_shl_i32:
246 return (uint32_t)x << (y & 31);
248 case INDEX_op_shl_i64:
249 return (uint64_t)x << (y & 63);
251 case INDEX_op_shr_i32:
252 return (uint32_t)x >> (y & 31);
254 case INDEX_op_trunc_shr_i32:
255 case INDEX_op_shr_i64:
256 return (uint64_t)x >> (y & 63);
258 case INDEX_op_sar_i32:
259 return (int32_t)x >> (y & 31);
261 case INDEX_op_sar_i64:
262 return (int64_t)x >> (y & 63);
264 case INDEX_op_rotr_i32:
265 return ror32(x, y & 31);
267 case INDEX_op_rotr_i64:
268 return ror64(x, y & 63);
270 case INDEX_op_rotl_i32:
271 return rol32(x, y & 31);
273 case INDEX_op_rotl_i64:
274 return rol64(x, y & 63);
297 CASE_OP_32_64(ext8s):
300 CASE_OP_32_64(ext16s):
303 CASE_OP_32_64(ext8u):
306 CASE_OP_32_64(ext16u):
309 case INDEX_op_ext32s_i64:
312 case INDEX_op_ext32u_i64:
315 case INDEX_op_muluh_i32:
316 return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
317 case INDEX_op_mulsh_i32:
318 return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
320 case INDEX_op_muluh_i64:
321 mulu64(&l64, &h64, x, y);
323 case INDEX_op_mulsh_i64:
324 muls64(&l64, &h64, x, y);
327 case INDEX_op_div_i32:
328 /* Avoid crashing on divide by zero, otherwise undefined. */
329 return (int32_t)x / ((int32_t)y ? : 1);
330 case INDEX_op_divu_i32:
331 return (uint32_t)x / ((uint32_t)y ? : 1);
332 case INDEX_op_div_i64:
333 return (int64_t)x / ((int64_t)y ? : 1);
334 case INDEX_op_divu_i64:
335 return (uint64_t)x / ((uint64_t)y ? : 1);
337 case INDEX_op_rem_i32:
338 return (int32_t)x % ((int32_t)y ? : 1);
339 case INDEX_op_remu_i32:
340 return (uint32_t)x % ((uint32_t)y ? : 1);
341 case INDEX_op_rem_i64:
342 return (int64_t)x % ((int64_t)y ? : 1);
343 case INDEX_op_remu_i64:
344 return (uint64_t)x % ((uint64_t)y ? : 1);
348 "Unrecognized operation %d in do_constant_folding.\n", op);
353 static TCGArg do_constant_folding(TCGOpcode op, TCGArg x, TCGArg y)
355 TCGArg res = do_constant_folding_2(op, x, y);
356 if (op_bits(op) == 32) {
362 static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
370 return (int32_t)x < (int32_t)y;
372 return (int32_t)x >= (int32_t)y;
374 return (int32_t)x <= (int32_t)y;
376 return (int32_t)x > (int32_t)y;
390 static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
398 return (int64_t)x < (int64_t)y;
400 return (int64_t)x >= (int64_t)y;
402 return (int64_t)x <= (int64_t)y;
404 return (int64_t)x > (int64_t)y;
418 static bool do_constant_folding_cond_eq(TCGCond c)
438 /* Return 2 if the condition can't be simplified, and the result
439 of the condition (0 or 1) if it can */
440 static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
443 if (temps[x].state == TCG_TEMP_CONST && temps[y].state == TCG_TEMP_CONST) {
444 switch (op_bits(op)) {
446 return do_constant_folding_cond_32(temps[x].val, temps[y].val, c);
448 return do_constant_folding_cond_64(temps[x].val, temps[y].val, c);
452 } else if (temps_are_copies(x, y)) {
453 return do_constant_folding_cond_eq(c);
454 } else if (temps[y].state == TCG_TEMP_CONST && temps[y].val == 0) {
468 /* Return 2 if the condition can't be simplified, and the result
469 of the condition (0 or 1) if it can */
470 static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
472 TCGArg al = p1[0], ah = p1[1];
473 TCGArg bl = p2[0], bh = p2[1];
475 if (temps[bl].state == TCG_TEMP_CONST
476 && temps[bh].state == TCG_TEMP_CONST) {
477 uint64_t b = ((uint64_t)temps[bh].val << 32) | (uint32_t)temps[bl].val;
479 if (temps[al].state == TCG_TEMP_CONST
480 && temps[ah].state == TCG_TEMP_CONST) {
482 a = ((uint64_t)temps[ah].val << 32) | (uint32_t)temps[al].val;
483 return do_constant_folding_cond_64(a, b, c);
496 if (temps_are_copies(al, bl) && temps_are_copies(ah, bh)) {
497 return do_constant_folding_cond_eq(c);
502 static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
504 TCGArg a1 = *p1, a2 = *p2;
506 sum += temps[a1].state == TCG_TEMP_CONST;
507 sum -= temps[a2].state == TCG_TEMP_CONST;
509 /* Prefer the constant in second argument, and then the form
510 op a, a, b, which is better handled on non-RISC hosts. */
511 if (sum > 0 || (sum == 0 && dest == a2)) {
519 static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
522 sum += temps[p1[0]].state == TCG_TEMP_CONST;
523 sum += temps[p1[1]].state == TCG_TEMP_CONST;
524 sum -= temps[p2[0]].state == TCG_TEMP_CONST;
525 sum -= temps[p2[1]].state == TCG_TEMP_CONST;
528 t = p1[0], p1[0] = p2[0], p2[0] = t;
529 t = p1[1], p1[1] = p2[1], p2[1] = t;
535 /* Propagate constants and copies, fold constant expressions. */
536 static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
537 TCGArg *args, TCGOpDef *tcg_op_defs)
539 int nb_ops, op_index, nb_temps, nb_globals;
542 /* Array VALS has an element for each temp.
543 If this temp holds a constant then its value is kept in VALS' element.
544 If this temp is a copy of other ones then the other copies are
545 available through the doubly linked circular list. */
547 nb_temps = s->nb_temps;
548 nb_globals = s->nb_globals;
549 reset_all_temps(nb_temps);
551 nb_ops = tcg_opc_ptr - s->gen_opc_buf;
553 for (op_index = 0; op_index < nb_ops; op_index++) {
554 TCGOpcode op = s->gen_opc_buf[op_index];
555 const TCGOpDef *def = &tcg_op_defs[op];
556 tcg_target_ulong mask, partmask, affected;
557 int nb_oargs, nb_iargs, nb_args, i;
560 if (op == INDEX_op_call) {
561 *gen_args++ = tmp = *args++;
562 nb_oargs = tmp >> 16;
563 nb_iargs = tmp & 0xffff;
564 nb_args = nb_oargs + nb_iargs + def->nb_cargs;
566 nb_oargs = def->nb_oargs;
567 nb_iargs = def->nb_iargs;
568 nb_args = def->nb_args;
571 /* Do copy propagation */
572 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
573 if (temps[args[i]].state == TCG_TEMP_COPY) {
574 args[i] = find_better_copy(s, args[i]);
578 /* For commutative operations make constant second argument */
588 CASE_OP_32_64(muluh):
589 CASE_OP_32_64(mulsh):
590 swap_commutative(args[0], &args[1], &args[2]);
592 CASE_OP_32_64(brcond):
593 if (swap_commutative(-1, &args[0], &args[1])) {
594 args[2] = tcg_swap_cond(args[2]);
597 CASE_OP_32_64(setcond):
598 if (swap_commutative(args[0], &args[1], &args[2])) {
599 args[3] = tcg_swap_cond(args[3]);
602 CASE_OP_32_64(movcond):
603 if (swap_commutative(-1, &args[1], &args[2])) {
604 args[5] = tcg_swap_cond(args[5]);
606 /* For movcond, we canonicalize the "false" input reg to match
607 the destination reg so that the tcg backend can implement
608 a "move if true" operation. */
609 if (swap_commutative(args[0], &args[4], &args[3])) {
610 args[5] = tcg_invert_cond(args[5]);
614 swap_commutative(args[0], &args[2], &args[4]);
615 swap_commutative(args[1], &args[3], &args[5]);
617 CASE_OP_32_64(mulu2):
618 CASE_OP_32_64(muls2):
619 swap_commutative(args[0], &args[2], &args[3]);
621 case INDEX_op_brcond2_i32:
622 if (swap_commutative2(&args[0], &args[2])) {
623 args[4] = tcg_swap_cond(args[4]);
626 case INDEX_op_setcond2_i32:
627 if (swap_commutative2(&args[1], &args[3])) {
628 args[5] = tcg_swap_cond(args[5]);
635 /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
636 and "sub r, 0, a => neg r, a" case. */
643 if (temps[args[1]].state == TCG_TEMP_CONST
644 && temps[args[1]].val == 0) {
645 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0);
656 if (temps[args[2]].state == TCG_TEMP_CONST) {
657 /* Proceed with possible constant folding. */
660 if (op == INDEX_op_sub_i32) {
661 neg_op = INDEX_op_neg_i32;
662 have_neg = TCG_TARGET_HAS_neg_i32;
664 neg_op = INDEX_op_neg_i64;
665 have_neg = TCG_TARGET_HAS_neg_i64;
670 if (temps[args[1]].state == TCG_TEMP_CONST
671 && temps[args[1]].val == 0) {
672 s->gen_opc_buf[op_index] = neg_op;
674 gen_args[0] = args[0];
675 gen_args[1] = args[2];
684 if (temps[args[1]].state != TCG_TEMP_CONST
685 && temps[args[2]].state == TCG_TEMP_CONST
686 && temps[args[2]].val == -1) {
692 if (temps[args[1]].state != TCG_TEMP_CONST
693 && temps[args[2]].state == TCG_TEMP_CONST
694 && temps[args[2]].val == 0) {
700 if (temps[args[2]].state != TCG_TEMP_CONST
701 && temps[args[1]].state == TCG_TEMP_CONST
702 && temps[args[1]].val == -1) {
709 if (temps[args[2]].state != TCG_TEMP_CONST
710 && temps[args[1]].state == TCG_TEMP_CONST
711 && temps[args[1]].val == 0) {
721 if (def->flags & TCG_OPF_64BIT) {
722 not_op = INDEX_op_not_i64;
723 have_not = TCG_TARGET_HAS_not_i64;
725 not_op = INDEX_op_not_i32;
726 have_not = TCG_TARGET_HAS_not_i32;
731 s->gen_opc_buf[op_index] = not_op;
733 gen_args[0] = args[0];
734 gen_args[1] = args[i];
743 /* Simplify expression for "op r, a, const => mov r, a" cases */
755 if (temps[args[1]].state != TCG_TEMP_CONST
756 && temps[args[2]].state == TCG_TEMP_CONST
757 && temps[args[2]].val == 0) {
764 if (temps[args[1]].state != TCG_TEMP_CONST
765 && temps[args[2]].state == TCG_TEMP_CONST
766 && temps[args[2]].val == -1) {
771 if (temps_are_copies(args[0], args[1])) {
772 s->gen_opc_buf[op_index] = INDEX_op_nop;
774 tcg_opt_gen_mov(s, op_index, gen_args, op, args[0], args[1]);
783 /* Simplify using known-zero bits. Currently only ops with a single
784 output argument is supported. */
788 CASE_OP_32_64(ext8s):
789 if ((temps[args[1]].mask & 0x80) != 0) {
792 CASE_OP_32_64(ext8u):
795 CASE_OP_32_64(ext16s):
796 if ((temps[args[1]].mask & 0x8000) != 0) {
799 CASE_OP_32_64(ext16u):
802 case INDEX_op_ext32s_i64:
803 if ((temps[args[1]].mask & 0x80000000) != 0) {
806 case INDEX_op_ext32u_i64:
811 mask = temps[args[2]].mask;
812 if (temps[args[2]].state == TCG_TEMP_CONST) {
814 affected = temps[args[1]].mask & ~mask;
816 mask = temps[args[1]].mask & mask;
820 /* Known-zeros does not imply known-ones. Therefore unless
821 args[2] is constant, we can't infer anything from it. */
822 if (temps[args[2]].state == TCG_TEMP_CONST) {
823 mask = ~temps[args[2]].mask;
826 /* But we certainly know nothing outside args[1] may be set. */
827 mask = temps[args[1]].mask;
830 case INDEX_op_sar_i32:
831 if (temps[args[2]].state == TCG_TEMP_CONST) {
832 tmp = temps[args[2]].val & 31;
833 mask = (int32_t)temps[args[1]].mask >> tmp;
836 case INDEX_op_sar_i64:
837 if (temps[args[2]].state == TCG_TEMP_CONST) {
838 tmp = temps[args[2]].val & 63;
839 mask = (int64_t)temps[args[1]].mask >> tmp;
843 case INDEX_op_shr_i32:
844 if (temps[args[2]].state == TCG_TEMP_CONST) {
845 tmp = temps[args[2]].val & 31;
846 mask = (uint32_t)temps[args[1]].mask >> tmp;
849 case INDEX_op_shr_i64:
850 if (temps[args[2]].state == TCG_TEMP_CONST) {
851 tmp = temps[args[2]].val & 63;
852 mask = (uint64_t)temps[args[1]].mask >> tmp;
856 case INDEX_op_trunc_shr_i32:
857 mask = (uint64_t)temps[args[1]].mask >> args[2];
861 if (temps[args[2]].state == TCG_TEMP_CONST) {
862 tmp = temps[args[2]].val & (TCG_TARGET_REG_BITS - 1);
863 mask = temps[args[1]].mask << tmp;
868 /* Set to 1 all bits to the left of the rightmost. */
869 mask = -(temps[args[1]].mask & -temps[args[1]].mask);
872 CASE_OP_32_64(deposit):
873 mask = deposit64(temps[args[1]].mask, args[3], args[4],
874 temps[args[2]].mask);
879 mask = temps[args[1]].mask | temps[args[2]].mask;
882 CASE_OP_32_64(setcond):
883 case INDEX_op_setcond2_i32:
887 CASE_OP_32_64(movcond):
888 mask = temps[args[3]].mask | temps[args[4]].mask;
894 CASE_OP_32_64(ld16u):
897 case INDEX_op_ld32u_i64:
901 CASE_OP_32_64(qemu_ld):
903 TCGMemOp mop = args[nb_oargs + nb_iargs];
904 if (!(mop & MO_SIGN)) {
905 mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
914 /* 32-bit ops generate 32-bit results. For the result is zero test
915 below, we can ignore high bits, but for further optimizations we
916 need to record that the high bits contain garbage. */
918 if (!(def->flags & TCG_OPF_64BIT)) {
919 mask |= ~(tcg_target_ulong)0xffffffffu;
920 partmask &= 0xffffffffu;
921 affected &= 0xffffffffu;
925 assert(nb_oargs == 1);
926 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0);
932 assert(nb_oargs == 1);
933 if (temps_are_copies(args[0], args[1])) {
934 s->gen_opc_buf[op_index] = INDEX_op_nop;
935 } else if (temps[args[1]].state != TCG_TEMP_CONST) {
936 tcg_opt_gen_mov(s, op_index, gen_args, op, args[0], args[1]);
939 tcg_opt_gen_movi(s, op_index, gen_args, op,
940 args[0], temps[args[1]].val);
947 /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
951 CASE_OP_32_64(muluh):
952 CASE_OP_32_64(mulsh):
953 if ((temps[args[2]].state == TCG_TEMP_CONST
954 && temps[args[2]].val == 0)) {
955 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0);
965 /* Simplify expression for "op r, a, a => mov r, a" cases */
969 if (temps_are_copies(args[1], args[2])) {
970 if (temps_are_copies(args[0], args[1])) {
971 s->gen_opc_buf[op_index] = INDEX_op_nop;
973 tcg_opt_gen_mov(s, op_index, gen_args, op,
985 /* Simplify expression for "op r, a, a => movi r, 0" cases */
990 if (temps_are_copies(args[1], args[2])) {
991 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0);
1001 /* Propagate constants through copy operations and do constant
1002 folding. Constants will be substituted to arguments by register
1003 allocator where needed and possible. Also detect copies. */
1006 if (temps_are_copies(args[0], args[1])) {
1008 s->gen_opc_buf[op_index] = INDEX_op_nop;
1011 if (temps[args[1]].state != TCG_TEMP_CONST) {
1012 tcg_opt_gen_mov(s, op_index, gen_args, op, args[0], args[1]);
1017 /* Source argument is constant. Rewrite the operation and
1018 let movi case handle it. */
1019 args[1] = temps[args[1]].val;
1021 CASE_OP_32_64(movi):
1022 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], args[1]);
1029 CASE_OP_32_64(ext8s):
1030 CASE_OP_32_64(ext8u):
1031 CASE_OP_32_64(ext16s):
1032 CASE_OP_32_64(ext16u):
1033 case INDEX_op_ext32s_i64:
1034 case INDEX_op_ext32u_i64:
1035 if (temps[args[1]].state == TCG_TEMP_CONST) {
1036 tmp = do_constant_folding(op, temps[args[1]].val, 0);
1037 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
1044 case INDEX_op_trunc_shr_i32:
1045 if (temps[args[1]].state == TCG_TEMP_CONST) {
1046 tmp = do_constant_folding(op, temps[args[1]].val, args[2]);
1047 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
1063 CASE_OP_32_64(rotl):
1064 CASE_OP_32_64(rotr):
1065 CASE_OP_32_64(andc):
1068 CASE_OP_32_64(nand):
1070 CASE_OP_32_64(muluh):
1071 CASE_OP_32_64(mulsh):
1073 CASE_OP_32_64(divu):
1075 CASE_OP_32_64(remu):
1076 if (temps[args[1]].state == TCG_TEMP_CONST
1077 && temps[args[2]].state == TCG_TEMP_CONST) {
1078 tmp = do_constant_folding(op, temps[args[1]].val,
1079 temps[args[2]].val);
1080 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
1087 CASE_OP_32_64(deposit):
1088 if (temps[args[1]].state == TCG_TEMP_CONST
1089 && temps[args[2]].state == TCG_TEMP_CONST) {
1090 tmp = deposit64(temps[args[1]].val, args[3], args[4],
1091 temps[args[2]].val);
1092 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
1099 CASE_OP_32_64(setcond):
1100 tmp = do_constant_folding_cond(op, args[1], args[2], args[3]);
1102 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
1109 CASE_OP_32_64(brcond):
1110 tmp = do_constant_folding_cond(op, args[0], args[1], args[2]);
1113 reset_all_temps(nb_temps);
1114 s->gen_opc_buf[op_index] = INDEX_op_br;
1115 gen_args[0] = args[3];
1118 s->gen_opc_buf[op_index] = INDEX_op_nop;
1125 CASE_OP_32_64(movcond):
1126 tmp = do_constant_folding_cond(op, args[1], args[2], args[5]);
1128 if (temps_are_copies(args[0], args[4-tmp])) {
1129 s->gen_opc_buf[op_index] = INDEX_op_nop;
1130 } else if (temps[args[4-tmp]].state == TCG_TEMP_CONST) {
1131 tcg_opt_gen_movi(s, op_index, gen_args, op,
1132 args[0], temps[args[4-tmp]].val);
1135 tcg_opt_gen_mov(s, op_index, gen_args, op,
1136 args[0], args[4-tmp]);
1144 case INDEX_op_add2_i32:
1145 case INDEX_op_sub2_i32:
1146 if (temps[args[2]].state == TCG_TEMP_CONST
1147 && temps[args[3]].state == TCG_TEMP_CONST
1148 && temps[args[4]].state == TCG_TEMP_CONST
1149 && temps[args[5]].state == TCG_TEMP_CONST) {
1150 uint32_t al = temps[args[2]].val;
1151 uint32_t ah = temps[args[3]].val;
1152 uint32_t bl = temps[args[4]].val;
1153 uint32_t bh = temps[args[5]].val;
1154 uint64_t a = ((uint64_t)ah << 32) | al;
1155 uint64_t b = ((uint64_t)bh << 32) | bl;
1158 if (op == INDEX_op_add2_i32) {
1164 /* We emit the extra nop when we emit the add2/sub2. */
1165 assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop);
1169 tcg_opt_gen_movi(s, op_index, &gen_args[0],
1170 op, rl, (uint32_t)a);
1171 tcg_opt_gen_movi(s, ++op_index, &gen_args[2],
1172 op, rh, (uint32_t)(a >> 32));
1179 case INDEX_op_mulu2_i32:
1180 if (temps[args[2]].state == TCG_TEMP_CONST
1181 && temps[args[3]].state == TCG_TEMP_CONST) {
1182 uint32_t a = temps[args[2]].val;
1183 uint32_t b = temps[args[3]].val;
1184 uint64_t r = (uint64_t)a * b;
1187 /* We emit the extra nop when we emit the mulu2. */
1188 assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop);
1192 tcg_opt_gen_movi(s, op_index, &gen_args[0],
1193 op, rl, (uint32_t)r);
1194 tcg_opt_gen_movi(s, ++op_index, &gen_args[2],
1195 op, rh, (uint32_t)(r >> 32));
1202 case INDEX_op_brcond2_i32:
1203 tmp = do_constant_folding_cond2(&args[0], &args[2], args[4]);
1207 reset_all_temps(nb_temps);
1208 s->gen_opc_buf[op_index] = INDEX_op_br;
1209 gen_args[0] = args[5];
1213 s->gen_opc_buf[op_index] = INDEX_op_nop;
1215 } else if ((args[4] == TCG_COND_LT || args[4] == TCG_COND_GE)
1216 && temps[args[2]].state == TCG_TEMP_CONST
1217 && temps[args[3]].state == TCG_TEMP_CONST
1218 && temps[args[2]].val == 0
1219 && temps[args[3]].val == 0) {
1220 /* Simplify LT/GE comparisons vs zero to a single compare
1221 vs the high word of the input. */
1223 reset_all_temps(nb_temps);
1224 s->gen_opc_buf[op_index] = INDEX_op_brcond_i32;
1225 gen_args[0] = args[1];
1226 gen_args[1] = args[3];
1227 gen_args[2] = args[4];
1228 gen_args[3] = args[5];
1230 } else if (args[4] == TCG_COND_EQ) {
1231 /* Simplify EQ comparisons where one of the pairs
1232 can be simplified. */
1233 tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
1234 args[0], args[2], TCG_COND_EQ);
1236 goto do_brcond_false;
1237 } else if (tmp == 1) {
1238 goto do_brcond_high;
1240 tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
1241 args[1], args[3], TCG_COND_EQ);
1243 goto do_brcond_false;
1244 } else if (tmp != 1) {
1248 reset_all_temps(nb_temps);
1249 s->gen_opc_buf[op_index] = INDEX_op_brcond_i32;
1250 gen_args[0] = args[0];
1251 gen_args[1] = args[2];
1252 gen_args[2] = args[4];
1253 gen_args[3] = args[5];
1255 } else if (args[4] == TCG_COND_NE) {
1256 /* Simplify NE comparisons where one of the pairs
1257 can be simplified. */
1258 tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
1259 args[0], args[2], TCG_COND_NE);
1261 goto do_brcond_high;
1262 } else if (tmp == 1) {
1263 goto do_brcond_true;
1265 tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
1266 args[1], args[3], TCG_COND_NE);
1269 } else if (tmp == 1) {
1270 goto do_brcond_true;
1279 case INDEX_op_setcond2_i32:
1280 tmp = do_constant_folding_cond2(&args[1], &args[3], args[5]);
1283 tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
1285 } else if ((args[5] == TCG_COND_LT || args[5] == TCG_COND_GE)
1286 && temps[args[3]].state == TCG_TEMP_CONST
1287 && temps[args[4]].state == TCG_TEMP_CONST
1288 && temps[args[3]].val == 0
1289 && temps[args[4]].val == 0) {
1290 /* Simplify LT/GE comparisons vs zero to a single compare
1291 vs the high word of the input. */
1293 s->gen_opc_buf[op_index] = INDEX_op_setcond_i32;
1294 reset_temp(args[0]);
1295 temps[args[0]].mask = 1;
1296 gen_args[0] = args[0];
1297 gen_args[1] = args[2];
1298 gen_args[2] = args[4];
1299 gen_args[3] = args[5];
1301 } else if (args[5] == TCG_COND_EQ) {
1302 /* Simplify EQ comparisons where one of the pairs
1303 can be simplified. */
1304 tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
1305 args[1], args[3], TCG_COND_EQ);
1307 goto do_setcond_const;
1308 } else if (tmp == 1) {
1309 goto do_setcond_high;
1311 tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
1312 args[2], args[4], TCG_COND_EQ);
1314 goto do_setcond_high;
1315 } else if (tmp != 1) {
1319 reset_temp(args[0]);
1320 temps[args[0]].mask = 1;
1321 s->gen_opc_buf[op_index] = INDEX_op_setcond_i32;
1322 gen_args[0] = args[0];
1323 gen_args[1] = args[1];
1324 gen_args[2] = args[3];
1325 gen_args[3] = args[5];
1327 } else if (args[5] == TCG_COND_NE) {
1328 /* Simplify NE comparisons where one of the pairs
1329 can be simplified. */
1330 tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
1331 args[1], args[3], TCG_COND_NE);
1333 goto do_setcond_high;
1334 } else if (tmp == 1) {
1335 goto do_setcond_const;
1337 tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
1338 args[2], args[4], TCG_COND_NE);
1340 goto do_setcond_low;
1341 } else if (tmp == 1) {
1342 goto do_setcond_const;
1352 if (!(args[nb_oargs + nb_iargs + 1]
1353 & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
1354 for (i = 0; i < nb_globals; i++) {
1358 goto do_reset_output;
1362 /* Default case: we know nothing about operation (or were unable
1363 to compute the operation result) so no propagation is done.
1364 We trash everything if the operation is the end of a basic
1365 block, otherwise we only trash the output args. "mask" is
1366 the non-zero bits mask for the first output arg. */
1367 if (def->flags & TCG_OPF_BB_END) {
1368 reset_all_temps(nb_temps);
1371 for (i = 0; i < nb_oargs; i++) {
1372 reset_temp(args[i]);
1373 /* Save the corresponding known-zero bits mask for the
1374 first output argument (only one supported so far). */
1376 temps[args[i]].mask = mask;
1380 for (i = 0; i < nb_args; i++) {
1381 gen_args[i] = args[i];
1384 gen_args += nb_args;
1392 TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr,
1393 TCGArg *args, TCGOpDef *tcg_op_defs)
1396 res = tcg_constant_folding(s, tcg_opc_ptr, args, tcg_op_defs);