2 * Optimizations for Tiny Code Generator for QEMU
4 * Copyright (c) 2010 Samsung Electronics.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
31 #include "qemu-common.h"
34 #define CASE_OP_32_64(x) \
35 glue(glue(case INDEX_op_, x), _i32): \
36 glue(glue(case INDEX_op_, x), _i64)
44 struct tcg_temp_info {
49 tcg_target_ulong mask;
52 static struct tcg_temp_info temps[TCG_MAX_TEMPS];
54 /* Reset TEMP's state to TCG_TEMP_UNDEF. If TEMP only had one copy, remove
55 the copy flag from the left temp. */
56 static void reset_temp(TCGArg temp)
58 if (temps[temp].state == TCG_TEMP_COPY) {
59 if (temps[temp].prev_copy == temps[temp].next_copy) {
60 temps[temps[temp].next_copy].state = TCG_TEMP_UNDEF;
62 temps[temps[temp].next_copy].prev_copy = temps[temp].prev_copy;
63 temps[temps[temp].prev_copy].next_copy = temps[temp].next_copy;
66 temps[temp].state = TCG_TEMP_UNDEF;
67 temps[temp].mask = -1;
70 /* Reset all temporaries, given that there are NB_TEMPS of them. */
71 static void reset_all_temps(int nb_temps)
74 for (i = 0; i < nb_temps; i++) {
75 temps[i].state = TCG_TEMP_UNDEF;
80 static int op_bits(TCGOpcode op)
82 const TCGOpDef *def = &tcg_op_defs[op];
83 return def->flags & TCG_OPF_64BIT ? 64 : 32;
86 static TCGOpcode op_to_movi(TCGOpcode op)
88 switch (op_bits(op)) {
90 return INDEX_op_movi_i32;
92 return INDEX_op_movi_i64;
94 fprintf(stderr, "op_to_movi: unexpected return value of "
95 "function op_bits.\n");
100 static TCGArg find_better_copy(TCGContext *s, TCGArg temp)
104 /* If this is already a global, we can't do better. */
105 if (temp < s->nb_globals) {
109 /* Search for a global first. */
110 for (i = temps[temp].next_copy ; i != temp ; i = temps[i].next_copy) {
111 if (i < s->nb_globals) {
116 /* If it is a temp, search for a temp local. */
117 if (!s->temps[temp].temp_local) {
118 for (i = temps[temp].next_copy ; i != temp ; i = temps[i].next_copy) {
119 if (s->temps[i].temp_local) {
125 /* Failure to find a better representation, return the same temp. */
129 static bool temps_are_copies(TCGArg arg1, TCGArg arg2)
137 if (temps[arg1].state != TCG_TEMP_COPY
138 || temps[arg2].state != TCG_TEMP_COPY) {
142 for (i = temps[arg1].next_copy ; i != arg1 ; i = temps[i].next_copy) {
151 static void tcg_opt_gen_mov(TCGContext *s, TCGArg *gen_args,
152 TCGArg dst, TCGArg src)
155 temps[dst].mask = temps[src].mask;
156 assert(temps[src].state != TCG_TEMP_CONST);
158 if (s->temps[src].type == s->temps[dst].type) {
159 if (temps[src].state != TCG_TEMP_COPY) {
160 temps[src].state = TCG_TEMP_COPY;
161 temps[src].next_copy = src;
162 temps[src].prev_copy = src;
164 temps[dst].state = TCG_TEMP_COPY;
165 temps[dst].next_copy = temps[src].next_copy;
166 temps[dst].prev_copy = src;
167 temps[temps[dst].next_copy].prev_copy = dst;
168 temps[src].next_copy = dst;
175 static void tcg_opt_gen_movi(TCGArg *gen_args, TCGArg dst, TCGArg val)
178 temps[dst].state = TCG_TEMP_CONST;
179 temps[dst].val = val;
180 temps[dst].mask = val;
185 static TCGOpcode op_to_mov(TCGOpcode op)
187 switch (op_bits(op)) {
189 return INDEX_op_mov_i32;
191 return INDEX_op_mov_i64;
193 fprintf(stderr, "op_to_mov: unexpected return value of "
194 "function op_bits.\n");
199 static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
222 case INDEX_op_shl_i32:
223 return (uint32_t)x << (y & 31);
225 case INDEX_op_shl_i64:
226 return (uint64_t)x << (y & 63);
228 case INDEX_op_shr_i32:
229 return (uint32_t)x >> (y & 31);
231 case INDEX_op_trunc_shr_i32:
232 case INDEX_op_shr_i64:
233 return (uint64_t)x >> (y & 63);
235 case INDEX_op_sar_i32:
236 return (int32_t)x >> (y & 31);
238 case INDEX_op_sar_i64:
239 return (int64_t)x >> (y & 63);
241 case INDEX_op_rotr_i32:
242 return ror32(x, y & 31);
244 case INDEX_op_rotr_i64:
245 return ror64(x, y & 63);
247 case INDEX_op_rotl_i32:
248 return rol32(x, y & 31);
250 case INDEX_op_rotl_i64:
251 return rol64(x, y & 63);
274 CASE_OP_32_64(ext8s):
277 CASE_OP_32_64(ext16s):
280 CASE_OP_32_64(ext8u):
283 CASE_OP_32_64(ext16u):
286 case INDEX_op_ext32s_i64:
289 case INDEX_op_ext32u_i64:
292 case INDEX_op_muluh_i32:
293 return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
294 case INDEX_op_mulsh_i32:
295 return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
297 case INDEX_op_muluh_i64:
298 mulu64(&l64, &h64, x, y);
300 case INDEX_op_mulsh_i64:
301 muls64(&l64, &h64, x, y);
304 case INDEX_op_div_i32:
305 /* Avoid crashing on divide by zero, otherwise undefined. */
306 return (int32_t)x / ((int32_t)y ? : 1);
307 case INDEX_op_divu_i32:
308 return (uint32_t)x / ((uint32_t)y ? : 1);
309 case INDEX_op_div_i64:
310 return (int64_t)x / ((int64_t)y ? : 1);
311 case INDEX_op_divu_i64:
312 return (uint64_t)x / ((uint64_t)y ? : 1);
314 case INDEX_op_rem_i32:
315 return (int32_t)x % ((int32_t)y ? : 1);
316 case INDEX_op_remu_i32:
317 return (uint32_t)x % ((uint32_t)y ? : 1);
318 case INDEX_op_rem_i64:
319 return (int64_t)x % ((int64_t)y ? : 1);
320 case INDEX_op_remu_i64:
321 return (uint64_t)x % ((uint64_t)y ? : 1);
325 "Unrecognized operation %d in do_constant_folding.\n", op);
330 static TCGArg do_constant_folding(TCGOpcode op, TCGArg x, TCGArg y)
332 TCGArg res = do_constant_folding_2(op, x, y);
333 if (op_bits(op) == 32) {
339 static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
347 return (int32_t)x < (int32_t)y;
349 return (int32_t)x >= (int32_t)y;
351 return (int32_t)x <= (int32_t)y;
353 return (int32_t)x > (int32_t)y;
367 static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
375 return (int64_t)x < (int64_t)y;
377 return (int64_t)x >= (int64_t)y;
379 return (int64_t)x <= (int64_t)y;
381 return (int64_t)x > (int64_t)y;
395 static bool do_constant_folding_cond_eq(TCGCond c)
415 /* Return 2 if the condition can't be simplified, and the result
416 of the condition (0 or 1) if it can */
417 static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
420 if (temps[x].state == TCG_TEMP_CONST && temps[y].state == TCG_TEMP_CONST) {
421 switch (op_bits(op)) {
423 return do_constant_folding_cond_32(temps[x].val, temps[y].val, c);
425 return do_constant_folding_cond_64(temps[x].val, temps[y].val, c);
429 } else if (temps_are_copies(x, y)) {
430 return do_constant_folding_cond_eq(c);
431 } else if (temps[y].state == TCG_TEMP_CONST && temps[y].val == 0) {
445 /* Return 2 if the condition can't be simplified, and the result
446 of the condition (0 or 1) if it can */
447 static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
449 TCGArg al = p1[0], ah = p1[1];
450 TCGArg bl = p2[0], bh = p2[1];
452 if (temps[bl].state == TCG_TEMP_CONST
453 && temps[bh].state == TCG_TEMP_CONST) {
454 uint64_t b = ((uint64_t)temps[bh].val << 32) | (uint32_t)temps[bl].val;
456 if (temps[al].state == TCG_TEMP_CONST
457 && temps[ah].state == TCG_TEMP_CONST) {
459 a = ((uint64_t)temps[ah].val << 32) | (uint32_t)temps[al].val;
460 return do_constant_folding_cond_64(a, b, c);
473 if (temps_are_copies(al, bl) && temps_are_copies(ah, bh)) {
474 return do_constant_folding_cond_eq(c);
479 static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
481 TCGArg a1 = *p1, a2 = *p2;
483 sum += temps[a1].state == TCG_TEMP_CONST;
484 sum -= temps[a2].state == TCG_TEMP_CONST;
486 /* Prefer the constant in second argument, and then the form
487 op a, a, b, which is better handled on non-RISC hosts. */
488 if (sum > 0 || (sum == 0 && dest == a2)) {
496 static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
499 sum += temps[p1[0]].state == TCG_TEMP_CONST;
500 sum += temps[p1[1]].state == TCG_TEMP_CONST;
501 sum -= temps[p2[0]].state == TCG_TEMP_CONST;
502 sum -= temps[p2[1]].state == TCG_TEMP_CONST;
505 t = p1[0], p1[0] = p2[0], p2[0] = t;
506 t = p1[1], p1[1] = p2[1], p2[1] = t;
512 /* Propagate constants and copies, fold constant expressions. */
513 static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
514 TCGArg *args, TCGOpDef *tcg_op_defs)
516 int nb_ops, op_index, nb_temps, nb_globals;
519 /* Array VALS has an element for each temp.
520 If this temp holds a constant then its value is kept in VALS' element.
521 If this temp is a copy of other ones then the other copies are
522 available through the doubly linked circular list. */
524 nb_temps = s->nb_temps;
525 nb_globals = s->nb_globals;
526 reset_all_temps(nb_temps);
528 nb_ops = tcg_opc_ptr - s->gen_opc_buf;
530 for (op_index = 0; op_index < nb_ops; op_index++) {
531 TCGOpcode op = s->gen_opc_buf[op_index];
532 const TCGOpDef *def = &tcg_op_defs[op];
533 tcg_target_ulong mask, affected;
534 int nb_oargs, nb_iargs, nb_args, i;
537 if (op == INDEX_op_call) {
538 *gen_args++ = tmp = *args++;
539 nb_oargs = tmp >> 16;
540 nb_iargs = tmp & 0xffff;
541 nb_args = nb_oargs + nb_iargs + def->nb_cargs;
543 nb_oargs = def->nb_oargs;
544 nb_iargs = def->nb_iargs;
545 nb_args = def->nb_args;
548 /* Do copy propagation */
549 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
550 if (temps[args[i]].state == TCG_TEMP_COPY) {
551 args[i] = find_better_copy(s, args[i]);
555 /* For commutative operations make constant second argument */
565 CASE_OP_32_64(muluh):
566 CASE_OP_32_64(mulsh):
567 swap_commutative(args[0], &args[1], &args[2]);
569 CASE_OP_32_64(brcond):
570 if (swap_commutative(-1, &args[0], &args[1])) {
571 args[2] = tcg_swap_cond(args[2]);
574 CASE_OP_32_64(setcond):
575 if (swap_commutative(args[0], &args[1], &args[2])) {
576 args[3] = tcg_swap_cond(args[3]);
579 CASE_OP_32_64(movcond):
580 if (swap_commutative(-1, &args[1], &args[2])) {
581 args[5] = tcg_swap_cond(args[5]);
583 /* For movcond, we canonicalize the "false" input reg to match
584 the destination reg so that the tcg backend can implement
585 a "move if true" operation. */
586 if (swap_commutative(args[0], &args[4], &args[3])) {
587 args[5] = tcg_invert_cond(args[5]);
591 swap_commutative(args[0], &args[2], &args[4]);
592 swap_commutative(args[1], &args[3], &args[5]);
594 CASE_OP_32_64(mulu2):
595 CASE_OP_32_64(muls2):
596 swap_commutative(args[0], &args[2], &args[3]);
598 case INDEX_op_brcond2_i32:
599 if (swap_commutative2(&args[0], &args[2])) {
600 args[4] = tcg_swap_cond(args[4]);
603 case INDEX_op_setcond2_i32:
604 if (swap_commutative2(&args[1], &args[3])) {
605 args[5] = tcg_swap_cond(args[5]);
612 /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
613 and "sub r, 0, a => neg r, a" case. */
620 if (temps[args[1]].state == TCG_TEMP_CONST
621 && temps[args[1]].val == 0) {
622 s->gen_opc_buf[op_index] = op_to_movi(op);
623 tcg_opt_gen_movi(gen_args, args[0], 0);
634 if (temps[args[2]].state == TCG_TEMP_CONST) {
635 /* Proceed with possible constant folding. */
638 if (op == INDEX_op_sub_i32) {
639 neg_op = INDEX_op_neg_i32;
640 have_neg = TCG_TARGET_HAS_neg_i32;
642 neg_op = INDEX_op_neg_i64;
643 have_neg = TCG_TARGET_HAS_neg_i64;
648 if (temps[args[1]].state == TCG_TEMP_CONST
649 && temps[args[1]].val == 0) {
650 s->gen_opc_buf[op_index] = neg_op;
652 gen_args[0] = args[0];
653 gen_args[1] = args[2];
662 if (temps[args[1]].state != TCG_TEMP_CONST
663 && temps[args[2]].state == TCG_TEMP_CONST
664 && temps[args[2]].val == -1) {
670 if (temps[args[1]].state != TCG_TEMP_CONST
671 && temps[args[2]].state == TCG_TEMP_CONST
672 && temps[args[2]].val == 0) {
678 if (temps[args[2]].state != TCG_TEMP_CONST
679 && temps[args[1]].state == TCG_TEMP_CONST
680 && temps[args[1]].val == -1) {
687 if (temps[args[2]].state != TCG_TEMP_CONST
688 && temps[args[1]].state == TCG_TEMP_CONST
689 && temps[args[1]].val == 0) {
699 if (def->flags & TCG_OPF_64BIT) {
700 not_op = INDEX_op_not_i64;
701 have_not = TCG_TARGET_HAS_not_i64;
703 not_op = INDEX_op_not_i32;
704 have_not = TCG_TARGET_HAS_not_i32;
709 s->gen_opc_buf[op_index] = not_op;
711 gen_args[0] = args[0];
712 gen_args[1] = args[i];
721 /* Simplify expression for "op r, a, const => mov r, a" cases */
733 if (temps[args[1]].state != TCG_TEMP_CONST
734 && temps[args[2]].state == TCG_TEMP_CONST
735 && temps[args[2]].val == 0) {
742 if (temps[args[1]].state != TCG_TEMP_CONST
743 && temps[args[2]].state == TCG_TEMP_CONST
744 && temps[args[2]].val == -1) {
749 if (temps_are_copies(args[0], args[1])) {
750 s->gen_opc_buf[op_index] = INDEX_op_nop;
752 s->gen_opc_buf[op_index] = op_to_mov(op);
753 tcg_opt_gen_mov(s, gen_args, args[0], args[1]);
762 /* Simplify using known-zero bits. Currently only ops with a single
763 output argument is supported. */
767 CASE_OP_32_64(ext8s):
768 if ((temps[args[1]].mask & 0x80) != 0) {
771 CASE_OP_32_64(ext8u):
774 CASE_OP_32_64(ext16s):
775 if ((temps[args[1]].mask & 0x8000) != 0) {
778 CASE_OP_32_64(ext16u):
781 case INDEX_op_ext32s_i64:
782 if ((temps[args[1]].mask & 0x80000000) != 0) {
785 case INDEX_op_ext32u_i64:
790 mask = temps[args[2]].mask;
791 if (temps[args[2]].state == TCG_TEMP_CONST) {
793 affected = temps[args[1]].mask & ~mask;
795 mask = temps[args[1]].mask & mask;
799 /* Known-zeros does not imply known-ones. Therefore unless
800 args[2] is constant, we can't infer anything from it. */
801 if (temps[args[2]].state == TCG_TEMP_CONST) {
802 mask = ~temps[args[2]].mask;
805 /* But we certainly know nothing outside args[1] may be set. */
806 mask = temps[args[1]].mask;
809 case INDEX_op_sar_i32:
810 if (temps[args[2]].state == TCG_TEMP_CONST) {
811 tmp = temps[args[2]].val & 31;
812 mask = (int32_t)temps[args[1]].mask >> tmp;
815 case INDEX_op_sar_i64:
816 if (temps[args[2]].state == TCG_TEMP_CONST) {
817 tmp = temps[args[2]].val & 63;
818 mask = (int64_t)temps[args[1]].mask >> tmp;
822 case INDEX_op_shr_i32:
823 if (temps[args[2]].state == TCG_TEMP_CONST) {
824 tmp = temps[args[2]].val & 31;
825 mask = (uint32_t)temps[args[1]].mask >> tmp;
828 case INDEX_op_shr_i64:
829 if (temps[args[2]].state == TCG_TEMP_CONST) {
830 tmp = temps[args[2]].val & 63;
831 mask = (uint64_t)temps[args[1]].mask >> tmp;
835 case INDEX_op_trunc_shr_i32:
836 mask = (uint64_t)temps[args[1]].mask >> args[2];
840 if (temps[args[2]].state == TCG_TEMP_CONST) {
841 tmp = temps[args[2]].val & (TCG_TARGET_REG_BITS - 1);
842 mask = temps[args[1]].mask << tmp;
847 /* Set to 1 all bits to the left of the rightmost. */
848 mask = -(temps[args[1]].mask & -temps[args[1]].mask);
851 CASE_OP_32_64(deposit):
852 mask = deposit64(temps[args[1]].mask, args[3], args[4],
853 temps[args[2]].mask);
858 mask = temps[args[1]].mask | temps[args[2]].mask;
861 CASE_OP_32_64(setcond):
865 CASE_OP_32_64(movcond):
866 mask = temps[args[3]].mask | temps[args[4]].mask;
870 case INDEX_op_qemu_ld8u:
873 CASE_OP_32_64(ld16u):
874 case INDEX_op_qemu_ld16u:
877 case INDEX_op_ld32u_i64:
878 #if TCG_TARGET_REG_BITS == 64
879 case INDEX_op_qemu_ld32u:
884 CASE_OP_32_64(qemu_ld):
886 TCGMemOp mop = args[nb_oargs + nb_iargs];
887 if (!(mop & MO_SIGN)) {
888 mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
897 /* 32-bit ops (non 64-bit ops and non load/store ops) generate 32-bit
899 if (!(def->flags & (TCG_OPF_CALL_CLOBBER | TCG_OPF_64BIT))) {
904 assert(nb_oargs == 1);
905 s->gen_opc_buf[op_index] = op_to_movi(op);
906 tcg_opt_gen_movi(gen_args, args[0], 0);
912 assert(nb_oargs == 1);
913 if (temps_are_copies(args[0], args[1])) {
914 s->gen_opc_buf[op_index] = INDEX_op_nop;
915 } else if (temps[args[1]].state != TCG_TEMP_CONST) {
916 s->gen_opc_buf[op_index] = op_to_mov(op);
917 tcg_opt_gen_mov(s, gen_args, args[0], args[1]);
920 s->gen_opc_buf[op_index] = op_to_movi(op);
921 tcg_opt_gen_movi(gen_args, args[0], temps[args[1]].val);
928 /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
932 CASE_OP_32_64(muluh):
933 CASE_OP_32_64(mulsh):
934 if ((temps[args[2]].state == TCG_TEMP_CONST
935 && temps[args[2]].val == 0)) {
936 s->gen_opc_buf[op_index] = op_to_movi(op);
937 tcg_opt_gen_movi(gen_args, args[0], 0);
947 /* Simplify expression for "op r, a, a => mov r, a" cases */
951 if (temps_are_copies(args[1], args[2])) {
952 if (temps_are_copies(args[0], args[1])) {
953 s->gen_opc_buf[op_index] = INDEX_op_nop;
955 s->gen_opc_buf[op_index] = op_to_mov(op);
956 tcg_opt_gen_mov(s, gen_args, args[0], args[1]);
967 /* Simplify expression for "op r, a, a => movi r, 0" cases */
972 if (temps_are_copies(args[1], args[2])) {
973 s->gen_opc_buf[op_index] = op_to_movi(op);
974 tcg_opt_gen_movi(gen_args, args[0], 0);
984 /* Propagate constants through copy operations and do constant
985 folding. Constants will be substituted to arguments by register
986 allocator where needed and possible. Also detect copies. */
989 if (temps_are_copies(args[0], args[1])) {
991 s->gen_opc_buf[op_index] = INDEX_op_nop;
994 if (temps[args[1]].state != TCG_TEMP_CONST) {
995 tcg_opt_gen_mov(s, gen_args, args[0], args[1]);
1000 /* Source argument is constant. Rewrite the operation and
1001 let movi case handle it. */
1002 op = op_to_movi(op);
1003 s->gen_opc_buf[op_index] = op;
1004 args[1] = temps[args[1]].val;
1006 CASE_OP_32_64(movi):
1007 tcg_opt_gen_movi(gen_args, args[0], args[1]);
1014 CASE_OP_32_64(ext8s):
1015 CASE_OP_32_64(ext8u):
1016 CASE_OP_32_64(ext16s):
1017 CASE_OP_32_64(ext16u):
1018 case INDEX_op_ext32s_i64:
1019 case INDEX_op_ext32u_i64:
1020 if (temps[args[1]].state == TCG_TEMP_CONST) {
1021 s->gen_opc_buf[op_index] = op_to_movi(op);
1022 tmp = do_constant_folding(op, temps[args[1]].val, 0);
1023 tcg_opt_gen_movi(gen_args, args[0], tmp);
1030 case INDEX_op_trunc_shr_i32:
1031 if (temps[args[1]].state == TCG_TEMP_CONST) {
1032 s->gen_opc_buf[op_index] = op_to_movi(op);
1033 tmp = do_constant_folding(op, temps[args[1]].val, args[2]);
1034 tcg_opt_gen_movi(gen_args, args[0], tmp);
1050 CASE_OP_32_64(rotl):
1051 CASE_OP_32_64(rotr):
1052 CASE_OP_32_64(andc):
1055 CASE_OP_32_64(nand):
1057 CASE_OP_32_64(muluh):
1058 CASE_OP_32_64(mulsh):
1060 CASE_OP_32_64(divu):
1062 CASE_OP_32_64(remu):
1063 if (temps[args[1]].state == TCG_TEMP_CONST
1064 && temps[args[2]].state == TCG_TEMP_CONST) {
1065 s->gen_opc_buf[op_index] = op_to_movi(op);
1066 tmp = do_constant_folding(op, temps[args[1]].val,
1067 temps[args[2]].val);
1068 tcg_opt_gen_movi(gen_args, args[0], tmp);
1075 CASE_OP_32_64(deposit):
1076 if (temps[args[1]].state == TCG_TEMP_CONST
1077 && temps[args[2]].state == TCG_TEMP_CONST) {
1078 s->gen_opc_buf[op_index] = op_to_movi(op);
1079 tmp = deposit64(temps[args[1]].val, args[3], args[4],
1080 temps[args[2]].val);
1081 tcg_opt_gen_movi(gen_args, args[0], tmp);
1088 CASE_OP_32_64(setcond):
1089 tmp = do_constant_folding_cond(op, args[1], args[2], args[3]);
1091 s->gen_opc_buf[op_index] = op_to_movi(op);
1092 tcg_opt_gen_movi(gen_args, args[0], tmp);
1099 CASE_OP_32_64(brcond):
1100 tmp = do_constant_folding_cond(op, args[0], args[1], args[2]);
1103 reset_all_temps(nb_temps);
1104 s->gen_opc_buf[op_index] = INDEX_op_br;
1105 gen_args[0] = args[3];
1108 s->gen_opc_buf[op_index] = INDEX_op_nop;
1115 CASE_OP_32_64(movcond):
1116 tmp = do_constant_folding_cond(op, args[1], args[2], args[5]);
1118 if (temps_are_copies(args[0], args[4-tmp])) {
1119 s->gen_opc_buf[op_index] = INDEX_op_nop;
1120 } else if (temps[args[4-tmp]].state == TCG_TEMP_CONST) {
1121 s->gen_opc_buf[op_index] = op_to_movi(op);
1122 tcg_opt_gen_movi(gen_args, args[0], temps[args[4-tmp]].val);
1125 s->gen_opc_buf[op_index] = op_to_mov(op);
1126 tcg_opt_gen_mov(s, gen_args, args[0], args[4-tmp]);
1134 case INDEX_op_add2_i32:
1135 case INDEX_op_sub2_i32:
1136 if (temps[args[2]].state == TCG_TEMP_CONST
1137 && temps[args[3]].state == TCG_TEMP_CONST
1138 && temps[args[4]].state == TCG_TEMP_CONST
1139 && temps[args[5]].state == TCG_TEMP_CONST) {
1140 uint32_t al = temps[args[2]].val;
1141 uint32_t ah = temps[args[3]].val;
1142 uint32_t bl = temps[args[4]].val;
1143 uint32_t bh = temps[args[5]].val;
1144 uint64_t a = ((uint64_t)ah << 32) | al;
1145 uint64_t b = ((uint64_t)bh << 32) | bl;
1148 if (op == INDEX_op_add2_i32) {
1154 /* We emit the extra nop when we emit the add2/sub2. */
1155 assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop);
1159 s->gen_opc_buf[op_index] = INDEX_op_movi_i32;
1160 s->gen_opc_buf[++op_index] = INDEX_op_movi_i32;
1161 tcg_opt_gen_movi(&gen_args[0], rl, (uint32_t)a);
1162 tcg_opt_gen_movi(&gen_args[2], rh, (uint32_t)(a >> 32));
1169 case INDEX_op_mulu2_i32:
1170 if (temps[args[2]].state == TCG_TEMP_CONST
1171 && temps[args[3]].state == TCG_TEMP_CONST) {
1172 uint32_t a = temps[args[2]].val;
1173 uint32_t b = temps[args[3]].val;
1174 uint64_t r = (uint64_t)a * b;
1177 /* We emit the extra nop when we emit the mulu2. */
1178 assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop);
1182 s->gen_opc_buf[op_index] = INDEX_op_movi_i32;
1183 s->gen_opc_buf[++op_index] = INDEX_op_movi_i32;
1184 tcg_opt_gen_movi(&gen_args[0], rl, (uint32_t)r);
1185 tcg_opt_gen_movi(&gen_args[2], rh, (uint32_t)(r >> 32));
1192 case INDEX_op_brcond2_i32:
1193 tmp = do_constant_folding_cond2(&args[0], &args[2], args[4]);
1196 reset_all_temps(nb_temps);
1197 s->gen_opc_buf[op_index] = INDEX_op_br;
1198 gen_args[0] = args[5];
1201 s->gen_opc_buf[op_index] = INDEX_op_nop;
1203 } else if ((args[4] == TCG_COND_LT || args[4] == TCG_COND_GE)
1204 && temps[args[2]].state == TCG_TEMP_CONST
1205 && temps[args[3]].state == TCG_TEMP_CONST
1206 && temps[args[2]].val == 0
1207 && temps[args[3]].val == 0) {
1208 /* Simplify LT/GE comparisons vs zero to a single compare
1209 vs the high word of the input. */
1210 reset_all_temps(nb_temps);
1211 s->gen_opc_buf[op_index] = INDEX_op_brcond_i32;
1212 gen_args[0] = args[1];
1213 gen_args[1] = args[3];
1214 gen_args[2] = args[4];
1215 gen_args[3] = args[5];
1223 case INDEX_op_setcond2_i32:
1224 tmp = do_constant_folding_cond2(&args[1], &args[3], args[5]);
1226 s->gen_opc_buf[op_index] = INDEX_op_movi_i32;
1227 tcg_opt_gen_movi(gen_args, args[0], tmp);
1229 } else if ((args[5] == TCG_COND_LT || args[5] == TCG_COND_GE)
1230 && temps[args[3]].state == TCG_TEMP_CONST
1231 && temps[args[4]].state == TCG_TEMP_CONST
1232 && temps[args[3]].val == 0
1233 && temps[args[4]].val == 0) {
1234 /* Simplify LT/GE comparisons vs zero to a single compare
1235 vs the high word of the input. */
1236 s->gen_opc_buf[op_index] = INDEX_op_setcond_i32;
1237 reset_temp(args[0]);
1238 gen_args[0] = args[0];
1239 gen_args[1] = args[2];
1240 gen_args[2] = args[4];
1241 gen_args[3] = args[5];
1250 if (!(args[nb_oargs + nb_iargs + 1]
1251 & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
1252 for (i = 0; i < nb_globals; i++) {
1256 goto do_reset_output;
1260 /* Default case: we know nothing about operation (or were unable
1261 to compute the operation result) so no propagation is done.
1262 We trash everything if the operation is the end of a basic
1263 block, otherwise we only trash the output args. "mask" is
1264 the non-zero bits mask for the first output arg. */
1265 if (def->flags & TCG_OPF_BB_END) {
1266 reset_all_temps(nb_temps);
1269 for (i = 0; i < nb_oargs; i++) {
1270 reset_temp(args[i]);
1271 /* Save the corresponding known-zero bits mask for the
1272 first output argument (only one supported so far). */
1274 temps[args[i]].mask = mask;
1278 for (i = 0; i < nb_args; i++) {
1279 gen_args[i] = args[i];
1282 gen_args += nb_args;
1290 TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr,
1291 TCGArg *args, TCGOpDef *tcg_op_defs)
1294 res = tcg_constant_folding(s, tcg_opc_ptr, args, tcg_op_defs);