]>
Commit | Line | Data |
---|---|---|
8f2e8c07 KB |
1 | /* |
2 | * Optimizations for Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2010 Samsung Electronics. | |
5 | * Contributed by Kirill Batuzov <[email protected]> | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
8 | * of this software and associated documentation files (the "Software"), to deal | |
9 | * in the Software without restriction, including without limitation the rights | |
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
11 | * copies of the Software, and to permit persons to whom the Software is | |
12 | * furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice shall be included in | |
15 | * all copies or substantial portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
23 | * THE SOFTWARE. | |
24 | */ | |
25 | ||
757e725b | 26 | #include "qemu/osdep.h" |
dcb32f1d | 27 | #include "tcg/tcg-op.h" |
90163900 | 28 | #include "tcg-internal.h" |
8f2e8c07 | 29 | |
8f2e8c07 KB |
30 | #define CASE_OP_32_64(x) \ |
31 | glue(glue(case INDEX_op_, x), _i32): \ | |
32 | glue(glue(case INDEX_op_, x), _i64) | |
8f2e8c07 | 33 | |
170ba88f RH |
34 | #define CASE_OP_32_64_VEC(x) \ |
35 | glue(glue(case INDEX_op_, x), _i32): \ | |
36 | glue(glue(case INDEX_op_, x), _i64): \ | |
37 | glue(glue(case INDEX_op_, x), _vec) | |
38 | ||
6fcb98ed | 39 | typedef struct TempOptInfo { |
b41059dd | 40 | bool is_const; |
6349039d RH |
41 | TCGTemp *prev_copy; |
42 | TCGTemp *next_copy; | |
54795544 | 43 | uint64_t val; |
b1fde411 | 44 | uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */ |
6fcb98ed | 45 | } TempOptInfo; |
22613af4 | 46 | |
3b3f847d | 47 | typedef struct OptContext { |
dc84988a | 48 | TCGContext *tcg; |
d0ed5151 | 49 | TCGOp *prev_mb; |
3b3f847d | 50 | TCGTempSet temps_used; |
137f1f44 RH |
51 | |
52 | /* In flight values from optimization. */ | |
53 | uint64_t z_mask; | |
3b3f847d RH |
54 | } OptContext; |
55 | ||
6fcb98ed | 56 | static inline TempOptInfo *ts_info(TCGTemp *ts) |
d9c769c6 | 57 | { |
6349039d | 58 | return ts->state_ptr; |
d9c769c6 AJ |
59 | } |
60 | ||
6fcb98ed | 61 | static inline TempOptInfo *arg_info(TCGArg arg) |
d9c769c6 | 62 | { |
6349039d RH |
63 | return ts_info(arg_temp(arg)); |
64 | } | |
65 | ||
66 | static inline bool ts_is_const(TCGTemp *ts) | |
67 | { | |
68 | return ts_info(ts)->is_const; | |
69 | } | |
70 | ||
71 | static inline bool arg_is_const(TCGArg arg) | |
72 | { | |
73 | return ts_is_const(arg_temp(arg)); | |
74 | } | |
75 | ||
76 | static inline bool ts_is_copy(TCGTemp *ts) | |
77 | { | |
78 | return ts_info(ts)->next_copy != ts; | |
d9c769c6 AJ |
79 | } |
80 | ||
b41059dd | 81 | /* Reset TEMP's state, possibly removing the temp for the list of copies. */ |
6349039d RH |
82 | static void reset_ts(TCGTemp *ts) |
83 | { | |
6fcb98ed RH |
84 | TempOptInfo *ti = ts_info(ts); |
85 | TempOptInfo *pi = ts_info(ti->prev_copy); | |
86 | TempOptInfo *ni = ts_info(ti->next_copy); | |
6349039d RH |
87 | |
88 | ni->prev_copy = ti->prev_copy; | |
89 | pi->next_copy = ti->next_copy; | |
90 | ti->next_copy = ts; | |
91 | ti->prev_copy = ts; | |
92 | ti->is_const = false; | |
b1fde411 | 93 | ti->z_mask = -1; |
6349039d RH |
94 | } |
95 | ||
96 | static void reset_temp(TCGArg arg) | |
22613af4 | 97 | { |
6349039d | 98 | reset_ts(arg_temp(arg)); |
22613af4 KB |
99 | } |
100 | ||
1208d7dd | 101 | /* Initialize and activate a temporary. */ |
3b3f847d | 102 | static void init_ts_info(OptContext *ctx, TCGTemp *ts) |
1208d7dd | 103 | { |
6349039d | 104 | size_t idx = temp_idx(ts); |
8f17a975 | 105 | TempOptInfo *ti; |
6349039d | 106 | |
3b3f847d | 107 | if (test_bit(idx, ctx->temps_used.l)) { |
8f17a975 RH |
108 | return; |
109 | } | |
3b3f847d | 110 | set_bit(idx, ctx->temps_used.l); |
8f17a975 RH |
111 | |
112 | ti = ts->state_ptr; | |
113 | if (ti == NULL) { | |
114 | ti = tcg_malloc(sizeof(TempOptInfo)); | |
6349039d | 115 | ts->state_ptr = ti; |
8f17a975 RH |
116 | } |
117 | ||
118 | ti->next_copy = ts; | |
119 | ti->prev_copy = ts; | |
120 | if (ts->kind == TEMP_CONST) { | |
121 | ti->is_const = true; | |
122 | ti->val = ts->val; | |
b1fde411 | 123 | ti->z_mask = ts->val; |
8f17a975 RH |
124 | if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) { |
125 | /* High bits of a 32-bit quantity are garbage. */ | |
b1fde411 | 126 | ti->z_mask |= ~0xffffffffull; |
c0522136 | 127 | } |
8f17a975 RH |
128 | } else { |
129 | ti->is_const = false; | |
b1fde411 | 130 | ti->z_mask = -1; |
1208d7dd AJ |
131 | } |
132 | } | |
133 | ||
6349039d | 134 | static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts) |
e590d4e6 | 135 | { |
4c868ce6 | 136 | TCGTemp *i, *g, *l; |
e590d4e6 | 137 | |
4c868ce6 RH |
138 | /* If this is already readonly, we can't do better. */ |
139 | if (temp_readonly(ts)) { | |
6349039d | 140 | return ts; |
e590d4e6 AJ |
141 | } |
142 | ||
4c868ce6 | 143 | g = l = NULL; |
6349039d | 144 | for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) { |
4c868ce6 | 145 | if (temp_readonly(i)) { |
e590d4e6 | 146 | return i; |
4c868ce6 RH |
147 | } else if (i->kind > ts->kind) { |
148 | if (i->kind == TEMP_GLOBAL) { | |
149 | g = i; | |
150 | } else if (i->kind == TEMP_LOCAL) { | |
151 | l = i; | |
e590d4e6 AJ |
152 | } |
153 | } | |
154 | } | |
155 | ||
4c868ce6 RH |
156 | /* If we didn't find a better representation, return the same temp. */ |
157 | return g ? g : l ? l : ts; | |
e590d4e6 AJ |
158 | } |
159 | ||
6349039d | 160 | static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2) |
e590d4e6 | 161 | { |
6349039d | 162 | TCGTemp *i; |
e590d4e6 | 163 | |
6349039d | 164 | if (ts1 == ts2) { |
e590d4e6 AJ |
165 | return true; |
166 | } | |
167 | ||
6349039d | 168 | if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) { |
e590d4e6 AJ |
169 | return false; |
170 | } | |
171 | ||
6349039d RH |
172 | for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) { |
173 | if (i == ts2) { | |
e590d4e6 AJ |
174 | return true; |
175 | } | |
176 | } | |
177 | ||
178 | return false; | |
179 | } | |
180 | ||
6349039d RH |
181 | static bool args_are_copies(TCGArg arg1, TCGArg arg2) |
182 | { | |
183 | return ts_are_copies(arg_temp(arg1), arg_temp(arg2)); | |
184 | } | |
185 | ||
6b99d5bf | 186 | static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) |
22613af4 | 187 | { |
6349039d RH |
188 | TCGTemp *dst_ts = arg_temp(dst); |
189 | TCGTemp *src_ts = arg_temp(src); | |
170ba88f | 190 | const TCGOpDef *def; |
6fcb98ed RH |
191 | TempOptInfo *di; |
192 | TempOptInfo *si; | |
b1fde411 | 193 | uint64_t z_mask; |
6349039d RH |
194 | TCGOpcode new_op; |
195 | ||
196 | if (ts_are_copies(dst_ts, src_ts)) { | |
dc84988a | 197 | tcg_op_remove(ctx->tcg, op); |
6b99d5bf | 198 | return true; |
5365718a AJ |
199 | } |
200 | ||
6349039d RH |
201 | reset_ts(dst_ts); |
202 | di = ts_info(dst_ts); | |
203 | si = ts_info(src_ts); | |
170ba88f RH |
204 | def = &tcg_op_defs[op->opc]; |
205 | if (def->flags & TCG_OPF_VECTOR) { | |
206 | new_op = INDEX_op_mov_vec; | |
207 | } else if (def->flags & TCG_OPF_64BIT) { | |
208 | new_op = INDEX_op_mov_i64; | |
209 | } else { | |
210 | new_op = INDEX_op_mov_i32; | |
211 | } | |
c45cb8bb | 212 | op->opc = new_op; |
170ba88f | 213 | /* TCGOP_VECL and TCGOP_VECE remain unchanged. */ |
6349039d RH |
214 | op->args[0] = dst; |
215 | op->args[1] = src; | |
a62f6f56 | 216 | |
b1fde411 | 217 | z_mask = si->z_mask; |
24666baf RH |
218 | if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) { |
219 | /* High bits of the destination are now garbage. */ | |
b1fde411 | 220 | z_mask |= ~0xffffffffull; |
24666baf | 221 | } |
b1fde411 | 222 | di->z_mask = z_mask; |
e590d4e6 | 223 | |
6349039d | 224 | if (src_ts->type == dst_ts->type) { |
6fcb98ed | 225 | TempOptInfo *ni = ts_info(si->next_copy); |
6349039d RH |
226 | |
227 | di->next_copy = si->next_copy; | |
228 | di->prev_copy = src_ts; | |
229 | ni->prev_copy = dst_ts; | |
230 | si->next_copy = dst_ts; | |
231 | di->is_const = si->is_const; | |
232 | di->val = si->val; | |
233 | } | |
6b99d5bf | 234 | return true; |
22613af4 KB |
235 | } |
236 | ||
6b99d5bf | 237 | static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op, |
dc84988a | 238 | TCGArg dst, uint64_t val) |
8fe35e04 RH |
239 | { |
240 | const TCGOpDef *def = &tcg_op_defs[op->opc]; | |
241 | TCGType type; | |
242 | TCGTemp *tv; | |
243 | ||
244 | if (def->flags & TCG_OPF_VECTOR) { | |
245 | type = TCGOP_VECL(op) + TCG_TYPE_V64; | |
246 | } else if (def->flags & TCG_OPF_64BIT) { | |
247 | type = TCG_TYPE_I64; | |
248 | } else { | |
249 | type = TCG_TYPE_I32; | |
250 | } | |
251 | ||
252 | /* Convert movi to mov with constant temp. */ | |
253 | tv = tcg_constant_internal(type, val); | |
3b3f847d | 254 | init_ts_info(ctx, tv); |
6b99d5bf | 255 | return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv)); |
8fe35e04 RH |
256 | } |
257 | ||
54795544 | 258 | static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) |
53108fb5 | 259 | { |
03271524 RH |
260 | uint64_t l64, h64; |
261 | ||
53108fb5 KB |
262 | switch (op) { |
263 | CASE_OP_32_64(add): | |
264 | return x + y; | |
265 | ||
266 | CASE_OP_32_64(sub): | |
267 | return x - y; | |
268 | ||
269 | CASE_OP_32_64(mul): | |
270 | return x * y; | |
271 | ||
9a81090b KB |
272 | CASE_OP_32_64(and): |
273 | return x & y; | |
274 | ||
275 | CASE_OP_32_64(or): | |
276 | return x | y; | |
277 | ||
278 | CASE_OP_32_64(xor): | |
279 | return x ^ y; | |
280 | ||
55c0975c | 281 | case INDEX_op_shl_i32: |
50c5c4d1 | 282 | return (uint32_t)x << (y & 31); |
55c0975c | 283 | |
55c0975c | 284 | case INDEX_op_shl_i64: |
50c5c4d1 | 285 | return (uint64_t)x << (y & 63); |
55c0975c KB |
286 | |
287 | case INDEX_op_shr_i32: | |
50c5c4d1 | 288 | return (uint32_t)x >> (y & 31); |
55c0975c | 289 | |
55c0975c | 290 | case INDEX_op_shr_i64: |
50c5c4d1 | 291 | return (uint64_t)x >> (y & 63); |
55c0975c KB |
292 | |
293 | case INDEX_op_sar_i32: | |
50c5c4d1 | 294 | return (int32_t)x >> (y & 31); |
55c0975c | 295 | |
55c0975c | 296 | case INDEX_op_sar_i64: |
50c5c4d1 | 297 | return (int64_t)x >> (y & 63); |
55c0975c KB |
298 | |
299 | case INDEX_op_rotr_i32: | |
50c5c4d1 | 300 | return ror32(x, y & 31); |
55c0975c | 301 | |
55c0975c | 302 | case INDEX_op_rotr_i64: |
50c5c4d1 | 303 | return ror64(x, y & 63); |
55c0975c KB |
304 | |
305 | case INDEX_op_rotl_i32: | |
50c5c4d1 | 306 | return rol32(x, y & 31); |
55c0975c | 307 | |
55c0975c | 308 | case INDEX_op_rotl_i64: |
50c5c4d1 | 309 | return rol64(x, y & 63); |
25c4d9cc RH |
310 | |
311 | CASE_OP_32_64(not): | |
a640f031 | 312 | return ~x; |
25c4d9cc | 313 | |
cb25c80a RH |
314 | CASE_OP_32_64(neg): |
315 | return -x; | |
316 | ||
317 | CASE_OP_32_64(andc): | |
318 | return x & ~y; | |
319 | ||
320 | CASE_OP_32_64(orc): | |
321 | return x | ~y; | |
322 | ||
323 | CASE_OP_32_64(eqv): | |
324 | return ~(x ^ y); | |
325 | ||
326 | CASE_OP_32_64(nand): | |
327 | return ~(x & y); | |
328 | ||
329 | CASE_OP_32_64(nor): | |
330 | return ~(x | y); | |
331 | ||
0e28d006 RH |
332 | case INDEX_op_clz_i32: |
333 | return (uint32_t)x ? clz32(x) : y; | |
334 | ||
335 | case INDEX_op_clz_i64: | |
336 | return x ? clz64(x) : y; | |
337 | ||
338 | case INDEX_op_ctz_i32: | |
339 | return (uint32_t)x ? ctz32(x) : y; | |
340 | ||
341 | case INDEX_op_ctz_i64: | |
342 | return x ? ctz64(x) : y; | |
343 | ||
a768e4e9 RH |
344 | case INDEX_op_ctpop_i32: |
345 | return ctpop32(x); | |
346 | ||
347 | case INDEX_op_ctpop_i64: | |
348 | return ctpop64(x); | |
349 | ||
25c4d9cc | 350 | CASE_OP_32_64(ext8s): |
a640f031 | 351 | return (int8_t)x; |
25c4d9cc RH |
352 | |
353 | CASE_OP_32_64(ext16s): | |
a640f031 | 354 | return (int16_t)x; |
25c4d9cc RH |
355 | |
356 | CASE_OP_32_64(ext8u): | |
a640f031 | 357 | return (uint8_t)x; |
25c4d9cc RH |
358 | |
359 | CASE_OP_32_64(ext16u): | |
a640f031 KB |
360 | return (uint16_t)x; |
361 | ||
6498594c | 362 | CASE_OP_32_64(bswap16): |
0b76ff8f RH |
363 | x = bswap16(x); |
364 | return y & TCG_BSWAP_OS ? (int16_t)x : x; | |
6498594c RH |
365 | |
366 | CASE_OP_32_64(bswap32): | |
0b76ff8f RH |
367 | x = bswap32(x); |
368 | return y & TCG_BSWAP_OS ? (int32_t)x : x; | |
6498594c RH |
369 | |
370 | case INDEX_op_bswap64_i64: | |
371 | return bswap64(x); | |
372 | ||
8bcb5c8f | 373 | case INDEX_op_ext_i32_i64: |
a640f031 KB |
374 | case INDEX_op_ext32s_i64: |
375 | return (int32_t)x; | |
376 | ||
8bcb5c8f | 377 | case INDEX_op_extu_i32_i64: |
609ad705 | 378 | case INDEX_op_extrl_i64_i32: |
a640f031 KB |
379 | case INDEX_op_ext32u_i64: |
380 | return (uint32_t)x; | |
a640f031 | 381 | |
609ad705 RH |
382 | case INDEX_op_extrh_i64_i32: |
383 | return (uint64_t)x >> 32; | |
384 | ||
03271524 RH |
385 | case INDEX_op_muluh_i32: |
386 | return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32; | |
387 | case INDEX_op_mulsh_i32: | |
388 | return ((int64_t)(int32_t)x * (int32_t)y) >> 32; | |
389 | ||
390 | case INDEX_op_muluh_i64: | |
391 | mulu64(&l64, &h64, x, y); | |
392 | return h64; | |
393 | case INDEX_op_mulsh_i64: | |
394 | muls64(&l64, &h64, x, y); | |
395 | return h64; | |
396 | ||
01547f7f RH |
397 | case INDEX_op_div_i32: |
398 | /* Avoid crashing on divide by zero, otherwise undefined. */ | |
399 | return (int32_t)x / ((int32_t)y ? : 1); | |
400 | case INDEX_op_divu_i32: | |
401 | return (uint32_t)x / ((uint32_t)y ? : 1); | |
402 | case INDEX_op_div_i64: | |
403 | return (int64_t)x / ((int64_t)y ? : 1); | |
404 | case INDEX_op_divu_i64: | |
405 | return (uint64_t)x / ((uint64_t)y ? : 1); | |
406 | ||
407 | case INDEX_op_rem_i32: | |
408 | return (int32_t)x % ((int32_t)y ? : 1); | |
409 | case INDEX_op_remu_i32: | |
410 | return (uint32_t)x % ((uint32_t)y ? : 1); | |
411 | case INDEX_op_rem_i64: | |
412 | return (int64_t)x % ((int64_t)y ? : 1); | |
413 | case INDEX_op_remu_i64: | |
414 | return (uint64_t)x % ((uint64_t)y ? : 1); | |
415 | ||
53108fb5 KB |
416 | default: |
417 | fprintf(stderr, | |
418 | "Unrecognized operation %d in do_constant_folding.\n", op); | |
419 | tcg_abort(); | |
420 | } | |
421 | } | |
422 | ||
54795544 | 423 | static uint64_t do_constant_folding(TCGOpcode op, uint64_t x, uint64_t y) |
53108fb5 | 424 | { |
170ba88f | 425 | const TCGOpDef *def = &tcg_op_defs[op]; |
54795544 | 426 | uint64_t res = do_constant_folding_2(op, x, y); |
170ba88f | 427 | if (!(def->flags & TCG_OPF_64BIT)) { |
29f3ff8d | 428 | res = (int32_t)res; |
53108fb5 | 429 | } |
53108fb5 KB |
430 | return res; |
431 | } | |
432 | ||
9519da7e RH |
433 | static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c) |
434 | { | |
435 | switch (c) { | |
436 | case TCG_COND_EQ: | |
437 | return x == y; | |
438 | case TCG_COND_NE: | |
439 | return x != y; | |
440 | case TCG_COND_LT: | |
441 | return (int32_t)x < (int32_t)y; | |
442 | case TCG_COND_GE: | |
443 | return (int32_t)x >= (int32_t)y; | |
444 | case TCG_COND_LE: | |
445 | return (int32_t)x <= (int32_t)y; | |
446 | case TCG_COND_GT: | |
447 | return (int32_t)x > (int32_t)y; | |
448 | case TCG_COND_LTU: | |
449 | return x < y; | |
450 | case TCG_COND_GEU: | |
451 | return x >= y; | |
452 | case TCG_COND_LEU: | |
453 | return x <= y; | |
454 | case TCG_COND_GTU: | |
455 | return x > y; | |
456 | default: | |
457 | tcg_abort(); | |
458 | } | |
459 | } | |
460 | ||
461 | static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c) | |
462 | { | |
463 | switch (c) { | |
464 | case TCG_COND_EQ: | |
465 | return x == y; | |
466 | case TCG_COND_NE: | |
467 | return x != y; | |
468 | case TCG_COND_LT: | |
469 | return (int64_t)x < (int64_t)y; | |
470 | case TCG_COND_GE: | |
471 | return (int64_t)x >= (int64_t)y; | |
472 | case TCG_COND_LE: | |
473 | return (int64_t)x <= (int64_t)y; | |
474 | case TCG_COND_GT: | |
475 | return (int64_t)x > (int64_t)y; | |
476 | case TCG_COND_LTU: | |
477 | return x < y; | |
478 | case TCG_COND_GEU: | |
479 | return x >= y; | |
480 | case TCG_COND_LEU: | |
481 | return x <= y; | |
482 | case TCG_COND_GTU: | |
483 | return x > y; | |
484 | default: | |
485 | tcg_abort(); | |
486 | } | |
487 | } | |
488 | ||
489 | static bool do_constant_folding_cond_eq(TCGCond c) | |
490 | { | |
491 | switch (c) { | |
492 | case TCG_COND_GT: | |
493 | case TCG_COND_LTU: | |
494 | case TCG_COND_LT: | |
495 | case TCG_COND_GTU: | |
496 | case TCG_COND_NE: | |
497 | return 0; | |
498 | case TCG_COND_GE: | |
499 | case TCG_COND_GEU: | |
500 | case TCG_COND_LE: | |
501 | case TCG_COND_LEU: | |
502 | case TCG_COND_EQ: | |
503 | return 1; | |
504 | default: | |
505 | tcg_abort(); | |
506 | } | |
507 | } | |
508 | ||
8d57bf1e RH |
509 | /* |
510 | * Return -1 if the condition can't be simplified, | |
511 | * and the result of the condition (0 or 1) if it can. | |
512 | */ | |
513 | static int do_constant_folding_cond(TCGOpcode op, TCGArg x, | |
514 | TCGArg y, TCGCond c) | |
f8dd19e5 | 515 | { |
54795544 RH |
516 | uint64_t xv = arg_info(x)->val; |
517 | uint64_t yv = arg_info(y)->val; | |
518 | ||
6349039d | 519 | if (arg_is_const(x) && arg_is_const(y)) { |
170ba88f RH |
520 | const TCGOpDef *def = &tcg_op_defs[op]; |
521 | tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR)); | |
522 | if (def->flags & TCG_OPF_64BIT) { | |
6349039d | 523 | return do_constant_folding_cond_64(xv, yv, c); |
170ba88f RH |
524 | } else { |
525 | return do_constant_folding_cond_32(xv, yv, c); | |
b336ceb6 | 526 | } |
6349039d | 527 | } else if (args_are_copies(x, y)) { |
9519da7e | 528 | return do_constant_folding_cond_eq(c); |
6349039d | 529 | } else if (arg_is_const(y) && yv == 0) { |
b336ceb6 | 530 | switch (c) { |
f8dd19e5 | 531 | case TCG_COND_LTU: |
b336ceb6 | 532 | return 0; |
f8dd19e5 | 533 | case TCG_COND_GEU: |
b336ceb6 AJ |
534 | return 1; |
535 | default: | |
8d57bf1e | 536 | return -1; |
f8dd19e5 | 537 | } |
f8dd19e5 | 538 | } |
8d57bf1e | 539 | return -1; |
f8dd19e5 AJ |
540 | } |
541 | ||
8d57bf1e RH |
542 | /* |
543 | * Return -1 if the condition can't be simplified, | |
544 | * and the result of the condition (0 or 1) if it can. | |
545 | */ | |
546 | static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c) | |
6c4382f8 RH |
547 | { |
548 | TCGArg al = p1[0], ah = p1[1]; | |
549 | TCGArg bl = p2[0], bh = p2[1]; | |
550 | ||
6349039d RH |
551 | if (arg_is_const(bl) && arg_is_const(bh)) { |
552 | tcg_target_ulong blv = arg_info(bl)->val; | |
553 | tcg_target_ulong bhv = arg_info(bh)->val; | |
554 | uint64_t b = deposit64(blv, 32, 32, bhv); | |
6c4382f8 | 555 | |
6349039d RH |
556 | if (arg_is_const(al) && arg_is_const(ah)) { |
557 | tcg_target_ulong alv = arg_info(al)->val; | |
558 | tcg_target_ulong ahv = arg_info(ah)->val; | |
559 | uint64_t a = deposit64(alv, 32, 32, ahv); | |
6c4382f8 RH |
560 | return do_constant_folding_cond_64(a, b, c); |
561 | } | |
562 | if (b == 0) { | |
563 | switch (c) { | |
564 | case TCG_COND_LTU: | |
565 | return 0; | |
566 | case TCG_COND_GEU: | |
567 | return 1; | |
568 | default: | |
569 | break; | |
570 | } | |
571 | } | |
572 | } | |
6349039d | 573 | if (args_are_copies(al, bl) && args_are_copies(ah, bh)) { |
6c4382f8 RH |
574 | return do_constant_folding_cond_eq(c); |
575 | } | |
8d57bf1e | 576 | return -1; |
6c4382f8 RH |
577 | } |
578 | ||
24c9ae4e RH |
579 | static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2) |
580 | { | |
581 | TCGArg a1 = *p1, a2 = *p2; | |
582 | int sum = 0; | |
6349039d RH |
583 | sum += arg_is_const(a1); |
584 | sum -= arg_is_const(a2); | |
24c9ae4e RH |
585 | |
586 | /* Prefer the constant in second argument, and then the form | |
587 | op a, a, b, which is better handled on non-RISC hosts. */ | |
588 | if (sum > 0 || (sum == 0 && dest == a2)) { | |
589 | *p1 = a2; | |
590 | *p2 = a1; | |
591 | return true; | |
592 | } | |
593 | return false; | |
594 | } | |
595 | ||
0bfcb865 RH |
596 | static bool swap_commutative2(TCGArg *p1, TCGArg *p2) |
597 | { | |
598 | int sum = 0; | |
6349039d RH |
599 | sum += arg_is_const(p1[0]); |
600 | sum += arg_is_const(p1[1]); | |
601 | sum -= arg_is_const(p2[0]); | |
602 | sum -= arg_is_const(p2[1]); | |
0bfcb865 RH |
603 | if (sum > 0) { |
604 | TCGArg t; | |
605 | t = p1[0], p1[0] = p2[0], p2[0] = t; | |
606 | t = p1[1], p1[1] = p2[1], p2[1] = t; | |
607 | return true; | |
608 | } | |
609 | return false; | |
610 | } | |
611 | ||
e2577ea2 RH |
612 | static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args) |
613 | { | |
614 | for (int i = 0; i < nb_args; i++) { | |
615 | TCGTemp *ts = arg_temp(op->args[i]); | |
616 | if (ts) { | |
617 | init_ts_info(ctx, ts); | |
618 | } | |
619 | } | |
620 | } | |
621 | ||
8774dded RH |
622 | static void copy_propagate(OptContext *ctx, TCGOp *op, |
623 | int nb_oargs, int nb_iargs) | |
624 | { | |
625 | TCGContext *s = ctx->tcg; | |
626 | ||
627 | for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) { | |
628 | TCGTemp *ts = arg_temp(op->args[i]); | |
629 | if (ts && ts_is_copy(ts)) { | |
630 | op->args[i] = temp_arg(find_better_copy(s, ts)); | |
631 | } | |
632 | } | |
633 | } | |
634 | ||
137f1f44 RH |
635 | static void finish_folding(OptContext *ctx, TCGOp *op) |
636 | { | |
637 | const TCGOpDef *def = &tcg_op_defs[op->opc]; | |
638 | int i, nb_oargs; | |
639 | ||
640 | /* | |
641 | * For an opcode that ends a BB, reset all temp data. | |
642 | * We do no cross-BB optimization. | |
643 | */ | |
644 | if (def->flags & TCG_OPF_BB_END) { | |
645 | memset(&ctx->temps_used, 0, sizeof(ctx->temps_used)); | |
646 | ctx->prev_mb = NULL; | |
647 | return; | |
648 | } | |
649 | ||
650 | nb_oargs = def->nb_oargs; | |
651 | for (i = 0; i < nb_oargs; i++) { | |
652 | reset_temp(op->args[i]); | |
653 | /* | |
654 | * Save the corresponding known-zero bits mask for the | |
655 | * first output argument (only one supported so far). | |
656 | */ | |
657 | if (i == 0) { | |
658 | arg_info(op->args[i])->z_mask = ctx->z_mask; | |
659 | } | |
660 | } | |
661 | } | |
662 | ||
2f9f08ba RH |
663 | /* |
664 | * The fold_* functions return true when processing is complete, | |
665 | * usually by folding the operation to a constant or to a copy, | |
666 | * and calling tcg_opt_gen_{mov,movi}. They may do other things, | |
667 | * like collect information about the value produced, for use in | |
668 | * optimizing a subsequent operation. | |
669 | * | |
670 | * These first fold_* functions are all helpers, used by other | |
671 | * folders for more specific operations. | |
672 | */ | |
673 | ||
674 | static bool fold_const1(OptContext *ctx, TCGOp *op) | |
675 | { | |
676 | if (arg_is_const(op->args[1])) { | |
677 | uint64_t t; | |
678 | ||
679 | t = arg_info(op->args[1])->val; | |
680 | t = do_constant_folding(op->opc, t, 0); | |
681 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | |
682 | } | |
683 | return false; | |
684 | } | |
685 | ||
686 | static bool fold_const2(OptContext *ctx, TCGOp *op) | |
687 | { | |
688 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
689 | uint64_t t1 = arg_info(op->args[1])->val; | |
690 | uint64_t t2 = arg_info(op->args[2])->val; | |
691 | ||
692 | t1 = do_constant_folding(op->opc, t1, t2); | |
693 | return tcg_opt_gen_movi(ctx, op, op->args[0], t1); | |
694 | } | |
695 | return false; | |
696 | } | |
697 | ||
698 | /* | |
699 | * These outermost fold_<op> functions are sorted alphabetically. | |
700 | */ | |
701 | ||
702 | static bool fold_add(OptContext *ctx, TCGOp *op) | |
703 | { | |
704 | return fold_const2(ctx, op); | |
705 | } | |
706 | ||
e3f7dc21 RH |
707 | static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add) |
708 | { | |
709 | if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) && | |
710 | arg_is_const(op->args[4]) && arg_is_const(op->args[5])) { | |
711 | uint32_t al = arg_info(op->args[2])->val; | |
712 | uint32_t ah = arg_info(op->args[3])->val; | |
713 | uint32_t bl = arg_info(op->args[4])->val; | |
714 | uint32_t bh = arg_info(op->args[5])->val; | |
715 | uint64_t a = ((uint64_t)ah << 32) | al; | |
716 | uint64_t b = ((uint64_t)bh << 32) | bl; | |
717 | TCGArg rl, rh; | |
718 | TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32); | |
719 | ||
720 | if (add) { | |
721 | a += b; | |
722 | } else { | |
723 | a -= b; | |
724 | } | |
725 | ||
726 | rl = op->args[0]; | |
727 | rh = op->args[1]; | |
728 | tcg_opt_gen_movi(ctx, op, rl, (int32_t)a); | |
729 | tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32)); | |
730 | return true; | |
731 | } | |
732 | return false; | |
733 | } | |
734 | ||
735 | static bool fold_add2_i32(OptContext *ctx, TCGOp *op) | |
736 | { | |
737 | return fold_addsub2_i32(ctx, op, true); | |
738 | } | |
739 | ||
2f9f08ba RH |
740 | static bool fold_and(OptContext *ctx, TCGOp *op) |
741 | { | |
742 | return fold_const2(ctx, op); | |
743 | } | |
744 | ||
745 | static bool fold_andc(OptContext *ctx, TCGOp *op) | |
746 | { | |
747 | return fold_const2(ctx, op); | |
748 | } | |
749 | ||
079b0804 RH |
750 | static bool fold_brcond(OptContext *ctx, TCGOp *op) |
751 | { | |
752 | TCGCond cond = op->args[2]; | |
753 | int i = do_constant_folding_cond(op->opc, op->args[0], op->args[1], cond); | |
754 | ||
755 | if (i == 0) { | |
756 | tcg_op_remove(ctx->tcg, op); | |
757 | return true; | |
758 | } | |
759 | if (i > 0) { | |
760 | op->opc = INDEX_op_br; | |
761 | op->args[0] = op->args[3]; | |
762 | } | |
763 | return false; | |
764 | } | |
765 | ||
764d2aba RH |
766 | static bool fold_brcond2(OptContext *ctx, TCGOp *op) |
767 | { | |
768 | TCGCond cond = op->args[4]; | |
769 | int i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond); | |
770 | TCGArg label = op->args[5]; | |
771 | int inv = 0; | |
772 | ||
773 | if (i >= 0) { | |
774 | goto do_brcond_const; | |
775 | } | |
776 | ||
777 | switch (cond) { | |
778 | case TCG_COND_LT: | |
779 | case TCG_COND_GE: | |
780 | /* | |
781 | * Simplify LT/GE comparisons vs zero to a single compare | |
782 | * vs the high word of the input. | |
783 | */ | |
784 | if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 && | |
785 | arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) { | |
786 | goto do_brcond_high; | |
787 | } | |
788 | break; | |
789 | ||
790 | case TCG_COND_NE: | |
791 | inv = 1; | |
792 | QEMU_FALLTHROUGH; | |
793 | case TCG_COND_EQ: | |
794 | /* | |
795 | * Simplify EQ/NE comparisons where one of the pairs | |
796 | * can be simplified. | |
797 | */ | |
798 | i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[0], | |
799 | op->args[2], cond); | |
800 | switch (i ^ inv) { | |
801 | case 0: | |
802 | goto do_brcond_const; | |
803 | case 1: | |
804 | goto do_brcond_high; | |
805 | } | |
806 | ||
807 | i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[1], | |
808 | op->args[3], cond); | |
809 | switch (i ^ inv) { | |
810 | case 0: | |
811 | goto do_brcond_const; | |
812 | case 1: | |
813 | op->opc = INDEX_op_brcond_i32; | |
814 | op->args[1] = op->args[2]; | |
815 | op->args[2] = cond; | |
816 | op->args[3] = label; | |
817 | break; | |
818 | } | |
819 | break; | |
820 | ||
821 | default: | |
822 | break; | |
823 | ||
824 | do_brcond_high: | |
825 | op->opc = INDEX_op_brcond_i32; | |
826 | op->args[0] = op->args[1]; | |
827 | op->args[1] = op->args[3]; | |
828 | op->args[2] = cond; | |
829 | op->args[3] = label; | |
830 | break; | |
831 | ||
832 | do_brcond_const: | |
833 | if (i == 0) { | |
834 | tcg_op_remove(ctx->tcg, op); | |
835 | return true; | |
836 | } | |
837 | op->opc = INDEX_op_br; | |
838 | op->args[0] = label; | |
839 | break; | |
840 | } | |
841 | return false; | |
842 | } | |
843 | ||
09bacdc2 RH |
844 | static bool fold_bswap(OptContext *ctx, TCGOp *op) |
845 | { | |
846 | if (arg_is_const(op->args[1])) { | |
847 | uint64_t t = arg_info(op->args[1])->val; | |
848 | ||
849 | t = do_constant_folding(op->opc, t, op->args[2]); | |
850 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | |
851 | } | |
852 | return false; | |
853 | } | |
854 | ||
5cf32be7 RH |
855 | static bool fold_call(OptContext *ctx, TCGOp *op) |
856 | { | |
857 | TCGContext *s = ctx->tcg; | |
858 | int nb_oargs = TCGOP_CALLO(op); | |
859 | int nb_iargs = TCGOP_CALLI(op); | |
860 | int flags, i; | |
861 | ||
862 | init_arguments(ctx, op, nb_oargs + nb_iargs); | |
863 | copy_propagate(ctx, op, nb_oargs, nb_iargs); | |
864 | ||
865 | /* If the function reads or writes globals, reset temp data. */ | |
866 | flags = tcg_call_flags(op); | |
867 | if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) { | |
868 | int nb_globals = s->nb_globals; | |
869 | ||
870 | for (i = 0; i < nb_globals; i++) { | |
871 | if (test_bit(i, ctx->temps_used.l)) { | |
872 | reset_ts(&ctx->tcg->temps[i]); | |
873 | } | |
874 | } | |
875 | } | |
876 | ||
877 | /* Reset temp data for outputs. */ | |
878 | for (i = 0; i < nb_oargs; i++) { | |
879 | reset_temp(op->args[i]); | |
880 | } | |
881 | ||
882 | /* Stop optimizing MB across calls. */ | |
883 | ctx->prev_mb = NULL; | |
884 | return true; | |
885 | } | |
886 | ||
30dd0bfe RH |
887 | static bool fold_count_zeros(OptContext *ctx, TCGOp *op) |
888 | { | |
889 | if (arg_is_const(op->args[1])) { | |
890 | uint64_t t = arg_info(op->args[1])->val; | |
891 | ||
892 | if (t != 0) { | |
893 | t = do_constant_folding(op->opc, t, 0); | |
894 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | |
895 | } | |
896 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]); | |
897 | } | |
898 | return false; | |
899 | } | |
900 | ||
2f9f08ba RH |
901 | static bool fold_ctpop(OptContext *ctx, TCGOp *op) |
902 | { | |
903 | return fold_const1(ctx, op); | |
904 | } | |
905 | ||
1b1907b8 RH |
906 | static bool fold_deposit(OptContext *ctx, TCGOp *op) |
907 | { | |
908 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
909 | uint64_t t1 = arg_info(op->args[1])->val; | |
910 | uint64_t t2 = arg_info(op->args[2])->val; | |
911 | ||
912 | t1 = deposit64(t1, op->args[3], op->args[4], t2); | |
913 | return tcg_opt_gen_movi(ctx, op, op->args[0], t1); | |
914 | } | |
915 | return false; | |
916 | } | |
917 | ||
2f9f08ba RH |
918 | static bool fold_divide(OptContext *ctx, TCGOp *op) |
919 | { | |
920 | return fold_const2(ctx, op); | |
921 | } | |
922 | ||
923 | static bool fold_eqv(OptContext *ctx, TCGOp *op) | |
924 | { | |
925 | return fold_const2(ctx, op); | |
926 | } | |
927 | ||
b6617c88 RH |
928 | static bool fold_extract(OptContext *ctx, TCGOp *op) |
929 | { | |
930 | if (arg_is_const(op->args[1])) { | |
931 | uint64_t t; | |
932 | ||
933 | t = arg_info(op->args[1])->val; | |
934 | t = extract64(t, op->args[2], op->args[3]); | |
935 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | |
936 | } | |
937 | return false; | |
938 | } | |
939 | ||
dcd08996 RH |
940 | static bool fold_extract2(OptContext *ctx, TCGOp *op) |
941 | { | |
942 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
943 | uint64_t v1 = arg_info(op->args[1])->val; | |
944 | uint64_t v2 = arg_info(op->args[2])->val; | |
945 | int shr = op->args[3]; | |
946 | ||
947 | if (op->opc == INDEX_op_extract2_i64) { | |
948 | v1 >>= shr; | |
949 | v2 <<= 64 - shr; | |
950 | } else { | |
951 | v1 = (uint32_t)v1 >> shr; | |
952 | v2 = (int32_t)v2 << (32 - shr); | |
953 | } | |
954 | return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2); | |
955 | } | |
956 | return false; | |
957 | } | |
958 | ||
2f9f08ba RH |
959 | static bool fold_exts(OptContext *ctx, TCGOp *op) |
960 | { | |
961 | return fold_const1(ctx, op); | |
962 | } | |
963 | ||
964 | static bool fold_extu(OptContext *ctx, TCGOp *op) | |
965 | { | |
966 | return fold_const1(ctx, op); | |
967 | } | |
968 | ||
3eefdf2b RH |
969 | static bool fold_mb(OptContext *ctx, TCGOp *op) |
970 | { | |
971 | /* Eliminate duplicate and redundant fence instructions. */ | |
972 | if (ctx->prev_mb) { | |
973 | /* | |
974 | * Merge two barriers of the same type into one, | |
975 | * or a weaker barrier into a stronger one, | |
976 | * or two weaker barriers into a stronger one. | |
977 | * mb X; mb Y => mb X|Y | |
978 | * mb; strl => mb; st | |
979 | * ldaq; mb => ld; mb | |
980 | * ldaq; strl => ld; mb; st | |
981 | * Other combinations are also merged into a strong | |
982 | * barrier. This is stricter than specified but for | |
983 | * the purposes of TCG is better than not optimizing. | |
984 | */ | |
985 | ctx->prev_mb->args[0] |= op->args[0]; | |
986 | tcg_op_remove(ctx->tcg, op); | |
987 | } else { | |
988 | ctx->prev_mb = op; | |
989 | } | |
990 | return true; | |
991 | } | |
992 | ||
0c310a30 RH |
993 | static bool fold_movcond(OptContext *ctx, TCGOp *op) |
994 | { | |
995 | TCGOpcode opc = op->opc; | |
996 | TCGCond cond = op->args[5]; | |
997 | int i = do_constant_folding_cond(opc, op->args[1], op->args[2], cond); | |
998 | ||
999 | if (i >= 0) { | |
1000 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]); | |
1001 | } | |
1002 | ||
1003 | if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { | |
1004 | uint64_t tv = arg_info(op->args[3])->val; | |
1005 | uint64_t fv = arg_info(op->args[4])->val; | |
1006 | ||
1007 | opc = (opc == INDEX_op_movcond_i32 | |
1008 | ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64); | |
1009 | ||
1010 | if (tv == 1 && fv == 0) { | |
1011 | op->opc = opc; | |
1012 | op->args[3] = cond; | |
1013 | } else if (fv == 1 && tv == 0) { | |
1014 | op->opc = opc; | |
1015 | op->args[3] = tcg_invert_cond(cond); | |
1016 | } | |
1017 | } | |
1018 | return false; | |
1019 | } | |
1020 | ||
2f9f08ba RH |
1021 | static bool fold_mul(OptContext *ctx, TCGOp *op) |
1022 | { | |
1023 | return fold_const2(ctx, op); | |
1024 | } | |
1025 | ||
1026 | static bool fold_mul_highpart(OptContext *ctx, TCGOp *op) | |
1027 | { | |
1028 | return fold_const2(ctx, op); | |
1029 | } | |
1030 | ||
6b8ac0d1 RH |
1031 | static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op) |
1032 | { | |
1033 | if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { | |
1034 | uint32_t a = arg_info(op->args[2])->val; | |
1035 | uint32_t b = arg_info(op->args[3])->val; | |
1036 | uint64_t r = (uint64_t)a * b; | |
1037 | TCGArg rl, rh; | |
1038 | TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32); | |
1039 | ||
1040 | rl = op->args[0]; | |
1041 | rh = op->args[1]; | |
1042 | tcg_opt_gen_movi(ctx, op, rl, (int32_t)r); | |
1043 | tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32)); | |
1044 | return true; | |
1045 | } | |
1046 | return false; | |
1047 | } | |
1048 | ||
2f9f08ba RH |
1049 | static bool fold_nand(OptContext *ctx, TCGOp *op) |
1050 | { | |
1051 | return fold_const2(ctx, op); | |
1052 | } | |
1053 | ||
1054 | static bool fold_neg(OptContext *ctx, TCGOp *op) | |
1055 | { | |
1056 | return fold_const1(ctx, op); | |
1057 | } | |
1058 | ||
1059 | static bool fold_nor(OptContext *ctx, TCGOp *op) | |
1060 | { | |
1061 | return fold_const2(ctx, op); | |
1062 | } | |
1063 | ||
1064 | static bool fold_not(OptContext *ctx, TCGOp *op) | |
1065 | { | |
1066 | return fold_const1(ctx, op); | |
1067 | } | |
1068 | ||
1069 | static bool fold_or(OptContext *ctx, TCGOp *op) | |
1070 | { | |
1071 | return fold_const2(ctx, op); | |
1072 | } | |
1073 | ||
1074 | static bool fold_orc(OptContext *ctx, TCGOp *op) | |
1075 | { | |
1076 | return fold_const2(ctx, op); | |
1077 | } | |
1078 | ||
3eefdf2b RH |
1079 | static bool fold_qemu_ld(OptContext *ctx, TCGOp *op) |
1080 | { | |
1081 | /* Opcodes that touch guest memory stop the mb optimization. */ | |
1082 | ctx->prev_mb = NULL; | |
1083 | return false; | |
1084 | } | |
1085 | ||
1086 | static bool fold_qemu_st(OptContext *ctx, TCGOp *op) | |
1087 | { | |
1088 | /* Opcodes that touch guest memory stop the mb optimization. */ | |
1089 | ctx->prev_mb = NULL; | |
1090 | return false; | |
1091 | } | |
1092 | ||
2f9f08ba RH |
1093 | static bool fold_remainder(OptContext *ctx, TCGOp *op) |
1094 | { | |
1095 | return fold_const2(ctx, op); | |
1096 | } | |
1097 | ||
c63ff55c RH |
1098 | static bool fold_setcond(OptContext *ctx, TCGOp *op) |
1099 | { | |
1100 | TCGCond cond = op->args[3]; | |
1101 | int i = do_constant_folding_cond(op->opc, op->args[1], op->args[2], cond); | |
1102 | ||
1103 | if (i >= 0) { | |
1104 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
1105 | } | |
1106 | return false; | |
1107 | } | |
1108 | ||
bc47b1aa RH |
1109 | static bool fold_setcond2(OptContext *ctx, TCGOp *op) |
1110 | { | |
1111 | TCGCond cond = op->args[5]; | |
1112 | int i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond); | |
1113 | int inv = 0; | |
1114 | ||
1115 | if (i >= 0) { | |
1116 | goto do_setcond_const; | |
1117 | } | |
1118 | ||
1119 | switch (cond) { | |
1120 | case TCG_COND_LT: | |
1121 | case TCG_COND_GE: | |
1122 | /* | |
1123 | * Simplify LT/GE comparisons vs zero to a single compare | |
1124 | * vs the high word of the input. | |
1125 | */ | |
1126 | if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 && | |
1127 | arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) { | |
1128 | goto do_setcond_high; | |
1129 | } | |
1130 | break; | |
1131 | ||
1132 | case TCG_COND_NE: | |
1133 | inv = 1; | |
1134 | QEMU_FALLTHROUGH; | |
1135 | case TCG_COND_EQ: | |
1136 | /* | |
1137 | * Simplify EQ/NE comparisons where one of the pairs | |
1138 | * can be simplified. | |
1139 | */ | |
1140 | i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[1], | |
1141 | op->args[3], cond); | |
1142 | switch (i ^ inv) { | |
1143 | case 0: | |
1144 | goto do_setcond_const; | |
1145 | case 1: | |
1146 | goto do_setcond_high; | |
1147 | } | |
1148 | ||
1149 | i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[2], | |
1150 | op->args[4], cond); | |
1151 | switch (i ^ inv) { | |
1152 | case 0: | |
1153 | goto do_setcond_const; | |
1154 | case 1: | |
1155 | op->args[2] = op->args[3]; | |
1156 | op->args[3] = cond; | |
1157 | op->opc = INDEX_op_setcond_i32; | |
1158 | break; | |
1159 | } | |
1160 | break; | |
1161 | ||
1162 | default: | |
1163 | break; | |
1164 | ||
1165 | do_setcond_high: | |
1166 | op->args[1] = op->args[2]; | |
1167 | op->args[2] = op->args[4]; | |
1168 | op->args[3] = cond; | |
1169 | op->opc = INDEX_op_setcond_i32; | |
1170 | break; | |
1171 | } | |
1172 | return false; | |
1173 | ||
1174 | do_setcond_const: | |
1175 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
1176 | } | |
1177 | ||
b6617c88 RH |
1178 | static bool fold_sextract(OptContext *ctx, TCGOp *op) |
1179 | { | |
1180 | if (arg_is_const(op->args[1])) { | |
1181 | uint64_t t; | |
1182 | ||
1183 | t = arg_info(op->args[1])->val; | |
1184 | t = sextract64(t, op->args[2], op->args[3]); | |
1185 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | |
1186 | } | |
1187 | return false; | |
1188 | } | |
1189 | ||
2f9f08ba RH |
1190 | static bool fold_shift(OptContext *ctx, TCGOp *op) |
1191 | { | |
1192 | return fold_const2(ctx, op); | |
1193 | } | |
1194 | ||
1195 | static bool fold_sub(OptContext *ctx, TCGOp *op) | |
1196 | { | |
1197 | return fold_const2(ctx, op); | |
1198 | } | |
1199 | ||
e3f7dc21 RH |
1200 | static bool fold_sub2_i32(OptContext *ctx, TCGOp *op) |
1201 | { | |
1202 | return fold_addsub2_i32(ctx, op, false); | |
1203 | } | |
1204 | ||
2f9f08ba RH |
1205 | static bool fold_xor(OptContext *ctx, TCGOp *op) |
1206 | { | |
1207 | return fold_const2(ctx, op); | |
1208 | } | |
1209 | ||
22613af4 | 1210 | /* Propagate constants and copies, fold constant expressions. */ |
36e60ef6 | 1211 | void tcg_optimize(TCGContext *s) |
8f2e8c07 | 1212 | { |
5cf32be7 | 1213 | int nb_temps, i; |
d0ed5151 | 1214 | TCGOp *op, *op_next; |
dc84988a | 1215 | OptContext ctx = { .tcg = s }; |
5d8f5363 | 1216 | |
22613af4 KB |
1217 | /* Array VALS has an element for each temp. |
1218 | If this temp holds a constant then its value is kept in VALS' element. | |
e590d4e6 AJ |
1219 | If this temp is a copy of other ones then the other copies are |
1220 | available through the doubly linked circular list. */ | |
8f2e8c07 KB |
1221 | |
1222 | nb_temps = s->nb_temps; | |
8f17a975 RH |
1223 | for (i = 0; i < nb_temps; ++i) { |
1224 | s->temps[i].state_ptr = NULL; | |
1225 | } | |
8f2e8c07 | 1226 | |
15fa08f8 | 1227 | QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { |
b1fde411 | 1228 | uint64_t z_mask, partmask, affected, tmp; |
c45cb8bb | 1229 | TCGOpcode opc = op->opc; |
5cf32be7 | 1230 | const TCGOpDef *def; |
404a148d | 1231 | bool done = false; |
c45cb8bb | 1232 | |
5cf32be7 | 1233 | /* Calls are special. */ |
c45cb8bb | 1234 | if (opc == INDEX_op_call) { |
5cf32be7 RH |
1235 | fold_call(&ctx, op); |
1236 | continue; | |
cf066674 | 1237 | } |
5cf32be7 RH |
1238 | |
1239 | def = &tcg_op_defs[opc]; | |
ec5d4cbe RH |
1240 | init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs); |
1241 | copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs); | |
22613af4 | 1242 | |
53108fb5 | 1243 | /* For commutative operations make constant second argument */ |
c45cb8bb | 1244 | switch (opc) { |
170ba88f RH |
1245 | CASE_OP_32_64_VEC(add): |
1246 | CASE_OP_32_64_VEC(mul): | |
1247 | CASE_OP_32_64_VEC(and): | |
1248 | CASE_OP_32_64_VEC(or): | |
1249 | CASE_OP_32_64_VEC(xor): | |
cb25c80a RH |
1250 | CASE_OP_32_64(eqv): |
1251 | CASE_OP_32_64(nand): | |
1252 | CASE_OP_32_64(nor): | |
03271524 RH |
1253 | CASE_OP_32_64(muluh): |
1254 | CASE_OP_32_64(mulsh): | |
acd93701 | 1255 | swap_commutative(op->args[0], &op->args[1], &op->args[2]); |
53108fb5 | 1256 | break; |
65a7cce1 | 1257 | CASE_OP_32_64(brcond): |
acd93701 RH |
1258 | if (swap_commutative(-1, &op->args[0], &op->args[1])) { |
1259 | op->args[2] = tcg_swap_cond(op->args[2]); | |
65a7cce1 AJ |
1260 | } |
1261 | break; | |
1262 | CASE_OP_32_64(setcond): | |
acd93701 RH |
1263 | if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) { |
1264 | op->args[3] = tcg_swap_cond(op->args[3]); | |
65a7cce1 AJ |
1265 | } |
1266 | break; | |
fa01a208 | 1267 | CASE_OP_32_64(movcond): |
acd93701 RH |
1268 | if (swap_commutative(-1, &op->args[1], &op->args[2])) { |
1269 | op->args[5] = tcg_swap_cond(op->args[5]); | |
5d8f5363 RH |
1270 | } |
1271 | /* For movcond, we canonicalize the "false" input reg to match | |
1272 | the destination reg so that the tcg backend can implement | |
1273 | a "move if true" operation. */ | |
acd93701 RH |
1274 | if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { |
1275 | op->args[5] = tcg_invert_cond(op->args[5]); | |
fa01a208 | 1276 | } |
1e484e61 | 1277 | break; |
d7156f7c | 1278 | CASE_OP_32_64(add2): |
acd93701 RH |
1279 | swap_commutative(op->args[0], &op->args[2], &op->args[4]); |
1280 | swap_commutative(op->args[1], &op->args[3], &op->args[5]); | |
1e484e61 | 1281 | break; |
d7156f7c | 1282 | CASE_OP_32_64(mulu2): |
4d3203fd | 1283 | CASE_OP_32_64(muls2): |
acd93701 | 1284 | swap_commutative(op->args[0], &op->args[2], &op->args[3]); |
1414968a | 1285 | break; |
0bfcb865 | 1286 | case INDEX_op_brcond2_i32: |
acd93701 RH |
1287 | if (swap_commutative2(&op->args[0], &op->args[2])) { |
1288 | op->args[4] = tcg_swap_cond(op->args[4]); | |
0bfcb865 RH |
1289 | } |
1290 | break; | |
1291 | case INDEX_op_setcond2_i32: | |
acd93701 RH |
1292 | if (swap_commutative2(&op->args[1], &op->args[3])) { |
1293 | op->args[5] = tcg_swap_cond(op->args[5]); | |
0bfcb865 RH |
1294 | } |
1295 | break; | |
53108fb5 KB |
1296 | default: |
1297 | break; | |
1298 | } | |
1299 | ||
2d497542 RH |
1300 | /* Simplify expressions for "shift/rot r, 0, a => movi r, 0", |
1301 | and "sub r, 0, a => neg r, a" case. */ | |
c45cb8bb | 1302 | switch (opc) { |
01ee5282 AJ |
1303 | CASE_OP_32_64(shl): |
1304 | CASE_OP_32_64(shr): | |
1305 | CASE_OP_32_64(sar): | |
1306 | CASE_OP_32_64(rotl): | |
1307 | CASE_OP_32_64(rotr): | |
6349039d RH |
1308 | if (arg_is_const(op->args[1]) |
1309 | && arg_info(op->args[1])->val == 0) { | |
dc84988a | 1310 | tcg_opt_gen_movi(&ctx, op, op->args[0], 0); |
01ee5282 AJ |
1311 | continue; |
1312 | } | |
1313 | break; | |
170ba88f | 1314 | CASE_OP_32_64_VEC(sub): |
2d497542 RH |
1315 | { |
1316 | TCGOpcode neg_op; | |
1317 | bool have_neg; | |
1318 | ||
6349039d | 1319 | if (arg_is_const(op->args[2])) { |
2d497542 RH |
1320 | /* Proceed with possible constant folding. */ |
1321 | break; | |
1322 | } | |
c45cb8bb | 1323 | if (opc == INDEX_op_sub_i32) { |
2d497542 RH |
1324 | neg_op = INDEX_op_neg_i32; |
1325 | have_neg = TCG_TARGET_HAS_neg_i32; | |
170ba88f | 1326 | } else if (opc == INDEX_op_sub_i64) { |
2d497542 RH |
1327 | neg_op = INDEX_op_neg_i64; |
1328 | have_neg = TCG_TARGET_HAS_neg_i64; | |
ac383dde RH |
1329 | } else if (TCG_TARGET_HAS_neg_vec) { |
1330 | TCGType type = TCGOP_VECL(op) + TCG_TYPE_V64; | |
1331 | unsigned vece = TCGOP_VECE(op); | |
170ba88f | 1332 | neg_op = INDEX_op_neg_vec; |
ac383dde RH |
1333 | have_neg = tcg_can_emit_vec_op(neg_op, type, vece) > 0; |
1334 | } else { | |
1335 | break; | |
2d497542 RH |
1336 | } |
1337 | if (!have_neg) { | |
1338 | break; | |
1339 | } | |
6349039d RH |
1340 | if (arg_is_const(op->args[1]) |
1341 | && arg_info(op->args[1])->val == 0) { | |
c45cb8bb | 1342 | op->opc = neg_op; |
acd93701 RH |
1343 | reset_temp(op->args[0]); |
1344 | op->args[1] = op->args[2]; | |
2d497542 RH |
1345 | continue; |
1346 | } | |
1347 | } | |
1348 | break; | |
170ba88f | 1349 | CASE_OP_32_64_VEC(xor): |
e201b564 | 1350 | CASE_OP_32_64(nand): |
6349039d RH |
1351 | if (!arg_is_const(op->args[1]) |
1352 | && arg_is_const(op->args[2]) | |
1353 | && arg_info(op->args[2])->val == -1) { | |
e201b564 RH |
1354 | i = 1; |
1355 | goto try_not; | |
1356 | } | |
1357 | break; | |
1358 | CASE_OP_32_64(nor): | |
6349039d RH |
1359 | if (!arg_is_const(op->args[1]) |
1360 | && arg_is_const(op->args[2]) | |
1361 | && arg_info(op->args[2])->val == 0) { | |
e201b564 RH |
1362 | i = 1; |
1363 | goto try_not; | |
1364 | } | |
1365 | break; | |
170ba88f | 1366 | CASE_OP_32_64_VEC(andc): |
6349039d RH |
1367 | if (!arg_is_const(op->args[2]) |
1368 | && arg_is_const(op->args[1]) | |
1369 | && arg_info(op->args[1])->val == -1) { | |
e201b564 RH |
1370 | i = 2; |
1371 | goto try_not; | |
1372 | } | |
1373 | break; | |
170ba88f | 1374 | CASE_OP_32_64_VEC(orc): |
e201b564 | 1375 | CASE_OP_32_64(eqv): |
6349039d RH |
1376 | if (!arg_is_const(op->args[2]) |
1377 | && arg_is_const(op->args[1]) | |
1378 | && arg_info(op->args[1])->val == 0) { | |
e201b564 RH |
1379 | i = 2; |
1380 | goto try_not; | |
1381 | } | |
1382 | break; | |
1383 | try_not: | |
1384 | { | |
1385 | TCGOpcode not_op; | |
1386 | bool have_not; | |
1387 | ||
170ba88f RH |
1388 | if (def->flags & TCG_OPF_VECTOR) { |
1389 | not_op = INDEX_op_not_vec; | |
1390 | have_not = TCG_TARGET_HAS_not_vec; | |
1391 | } else if (def->flags & TCG_OPF_64BIT) { | |
e201b564 RH |
1392 | not_op = INDEX_op_not_i64; |
1393 | have_not = TCG_TARGET_HAS_not_i64; | |
1394 | } else { | |
1395 | not_op = INDEX_op_not_i32; | |
1396 | have_not = TCG_TARGET_HAS_not_i32; | |
1397 | } | |
1398 | if (!have_not) { | |
1399 | break; | |
1400 | } | |
c45cb8bb | 1401 | op->opc = not_op; |
acd93701 RH |
1402 | reset_temp(op->args[0]); |
1403 | op->args[1] = op->args[i]; | |
e201b564 RH |
1404 | continue; |
1405 | } | |
01ee5282 AJ |
1406 | default: |
1407 | break; | |
1408 | } | |
1409 | ||
464a1441 | 1410 | /* Simplify expression for "op r, a, const => mov r, a" cases */ |
c45cb8bb | 1411 | switch (opc) { |
170ba88f RH |
1412 | CASE_OP_32_64_VEC(add): |
1413 | CASE_OP_32_64_VEC(sub): | |
1414 | CASE_OP_32_64_VEC(or): | |
1415 | CASE_OP_32_64_VEC(xor): | |
1416 | CASE_OP_32_64_VEC(andc): | |
55c0975c KB |
1417 | CASE_OP_32_64(shl): |
1418 | CASE_OP_32_64(shr): | |
1419 | CASE_OP_32_64(sar): | |
25c4d9cc RH |
1420 | CASE_OP_32_64(rotl): |
1421 | CASE_OP_32_64(rotr): | |
6349039d RH |
1422 | if (!arg_is_const(op->args[1]) |
1423 | && arg_is_const(op->args[2]) | |
1424 | && arg_info(op->args[2])->val == 0) { | |
dc84988a | 1425 | tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); |
97a79eb7 | 1426 | continue; |
53108fb5 KB |
1427 | } |
1428 | break; | |
170ba88f RH |
1429 | CASE_OP_32_64_VEC(and): |
1430 | CASE_OP_32_64_VEC(orc): | |
464a1441 | 1431 | CASE_OP_32_64(eqv): |
6349039d RH |
1432 | if (!arg_is_const(op->args[1]) |
1433 | && arg_is_const(op->args[2]) | |
1434 | && arg_info(op->args[2])->val == -1) { | |
dc84988a | 1435 | tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); |
97a79eb7 | 1436 | continue; |
464a1441 RH |
1437 | } |
1438 | break; | |
56e49438 AJ |
1439 | default: |
1440 | break; | |
1441 | } | |
1442 | ||
3031244b AJ |
1443 | /* Simplify using known-zero bits. Currently only ops with a single |
1444 | output argument is supported. */ | |
b1fde411 | 1445 | z_mask = -1; |
633f6502 | 1446 | affected = -1; |
c45cb8bb | 1447 | switch (opc) { |
3a9d8b17 | 1448 | CASE_OP_32_64(ext8s): |
b1fde411 | 1449 | if ((arg_info(op->args[1])->z_mask & 0x80) != 0) { |
3a9d8b17 PB |
1450 | break; |
1451 | } | |
d84568b7 | 1452 | QEMU_FALLTHROUGH; |
3a9d8b17 | 1453 | CASE_OP_32_64(ext8u): |
b1fde411 | 1454 | z_mask = 0xff; |
3a9d8b17 PB |
1455 | goto and_const; |
1456 | CASE_OP_32_64(ext16s): | |
b1fde411 | 1457 | if ((arg_info(op->args[1])->z_mask & 0x8000) != 0) { |
3a9d8b17 PB |
1458 | break; |
1459 | } | |
d84568b7 | 1460 | QEMU_FALLTHROUGH; |
3a9d8b17 | 1461 | CASE_OP_32_64(ext16u): |
b1fde411 | 1462 | z_mask = 0xffff; |
3a9d8b17 PB |
1463 | goto and_const; |
1464 | case INDEX_op_ext32s_i64: | |
b1fde411 | 1465 | if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) { |
3a9d8b17 PB |
1466 | break; |
1467 | } | |
d84568b7 | 1468 | QEMU_FALLTHROUGH; |
3a9d8b17 | 1469 | case INDEX_op_ext32u_i64: |
b1fde411 | 1470 | z_mask = 0xffffffffU; |
3a9d8b17 PB |
1471 | goto and_const; |
1472 | ||
1473 | CASE_OP_32_64(and): | |
b1fde411 | 1474 | z_mask = arg_info(op->args[2])->z_mask; |
6349039d | 1475 | if (arg_is_const(op->args[2])) { |
3a9d8b17 | 1476 | and_const: |
b1fde411 | 1477 | affected = arg_info(op->args[1])->z_mask & ~z_mask; |
3a9d8b17 | 1478 | } |
b1fde411 | 1479 | z_mask = arg_info(op->args[1])->z_mask & z_mask; |
3a9d8b17 PB |
1480 | break; |
1481 | ||
8bcb5c8f | 1482 | case INDEX_op_ext_i32_i64: |
b1fde411 | 1483 | if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) { |
8bcb5c8f AJ |
1484 | break; |
1485 | } | |
d84568b7 | 1486 | QEMU_FALLTHROUGH; |
8bcb5c8f AJ |
1487 | case INDEX_op_extu_i32_i64: |
1488 | /* We do not compute affected as it is a size changing op. */ | |
b1fde411 | 1489 | z_mask = (uint32_t)arg_info(op->args[1])->z_mask; |
8bcb5c8f AJ |
1490 | break; |
1491 | ||
23ec69ed RH |
1492 | CASE_OP_32_64(andc): |
1493 | /* Known-zeros does not imply known-ones. Therefore unless | |
acd93701 | 1494 | op->args[2] is constant, we can't infer anything from it. */ |
6349039d | 1495 | if (arg_is_const(op->args[2])) { |
b1fde411 | 1496 | z_mask = ~arg_info(op->args[2])->z_mask; |
23ec69ed RH |
1497 | goto and_const; |
1498 | } | |
6349039d | 1499 | /* But we certainly know nothing outside args[1] may be set. */ |
b1fde411 | 1500 | z_mask = arg_info(op->args[1])->z_mask; |
23ec69ed RH |
1501 | break; |
1502 | ||
e46b225a | 1503 | case INDEX_op_sar_i32: |
6349039d RH |
1504 | if (arg_is_const(op->args[2])) { |
1505 | tmp = arg_info(op->args[2])->val & 31; | |
b1fde411 | 1506 | z_mask = (int32_t)arg_info(op->args[1])->z_mask >> tmp; |
e46b225a AJ |
1507 | } |
1508 | break; | |
1509 | case INDEX_op_sar_i64: | |
6349039d RH |
1510 | if (arg_is_const(op->args[2])) { |
1511 | tmp = arg_info(op->args[2])->val & 63; | |
b1fde411 | 1512 | z_mask = (int64_t)arg_info(op->args[1])->z_mask >> tmp; |
3a9d8b17 PB |
1513 | } |
1514 | break; | |
1515 | ||
e46b225a | 1516 | case INDEX_op_shr_i32: |
6349039d RH |
1517 | if (arg_is_const(op->args[2])) { |
1518 | tmp = arg_info(op->args[2])->val & 31; | |
b1fde411 | 1519 | z_mask = (uint32_t)arg_info(op->args[1])->z_mask >> tmp; |
e46b225a AJ |
1520 | } |
1521 | break; | |
1522 | case INDEX_op_shr_i64: | |
6349039d RH |
1523 | if (arg_is_const(op->args[2])) { |
1524 | tmp = arg_info(op->args[2])->val & 63; | |
b1fde411 | 1525 | z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> tmp; |
3a9d8b17 PB |
1526 | } |
1527 | break; | |
1528 | ||
609ad705 | 1529 | case INDEX_op_extrl_i64_i32: |
b1fde411 | 1530 | z_mask = (uint32_t)arg_info(op->args[1])->z_mask; |
609ad705 RH |
1531 | break; |
1532 | case INDEX_op_extrh_i64_i32: | |
b1fde411 | 1533 | z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> 32; |
4bb7a41e RH |
1534 | break; |
1535 | ||
3a9d8b17 | 1536 | CASE_OP_32_64(shl): |
6349039d RH |
1537 | if (arg_is_const(op->args[2])) { |
1538 | tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1); | |
b1fde411 | 1539 | z_mask = arg_info(op->args[1])->z_mask << tmp; |
3a9d8b17 PB |
1540 | } |
1541 | break; | |
1542 | ||
1543 | CASE_OP_32_64(neg): | |
1544 | /* Set to 1 all bits to the left of the rightmost. */ | |
b1fde411 RH |
1545 | z_mask = -(arg_info(op->args[1])->z_mask |
1546 | & -arg_info(op->args[1])->z_mask); | |
3a9d8b17 PB |
1547 | break; |
1548 | ||
1549 | CASE_OP_32_64(deposit): | |
b1fde411 RH |
1550 | z_mask = deposit64(arg_info(op->args[1])->z_mask, |
1551 | op->args[3], op->args[4], | |
1552 | arg_info(op->args[2])->z_mask); | |
3a9d8b17 PB |
1553 | break; |
1554 | ||
7ec8bab3 | 1555 | CASE_OP_32_64(extract): |
b1fde411 RH |
1556 | z_mask = extract64(arg_info(op->args[1])->z_mask, |
1557 | op->args[2], op->args[3]); | |
acd93701 | 1558 | if (op->args[2] == 0) { |
b1fde411 | 1559 | affected = arg_info(op->args[1])->z_mask & ~z_mask; |
7ec8bab3 RH |
1560 | } |
1561 | break; | |
1562 | CASE_OP_32_64(sextract): | |
b1fde411 RH |
1563 | z_mask = sextract64(arg_info(op->args[1])->z_mask, |
1564 | op->args[2], op->args[3]); | |
1565 | if (op->args[2] == 0 && (tcg_target_long)z_mask >= 0) { | |
1566 | affected = arg_info(op->args[1])->z_mask & ~z_mask; | |
7ec8bab3 RH |
1567 | } |
1568 | break; | |
1569 | ||
3a9d8b17 PB |
1570 | CASE_OP_32_64(or): |
1571 | CASE_OP_32_64(xor): | |
b1fde411 RH |
1572 | z_mask = arg_info(op->args[1])->z_mask |
1573 | | arg_info(op->args[2])->z_mask; | |
3a9d8b17 PB |
1574 | break; |
1575 | ||
0e28d006 RH |
1576 | case INDEX_op_clz_i32: |
1577 | case INDEX_op_ctz_i32: | |
b1fde411 | 1578 | z_mask = arg_info(op->args[2])->z_mask | 31; |
0e28d006 RH |
1579 | break; |
1580 | ||
1581 | case INDEX_op_clz_i64: | |
1582 | case INDEX_op_ctz_i64: | |
b1fde411 | 1583 | z_mask = arg_info(op->args[2])->z_mask | 63; |
0e28d006 RH |
1584 | break; |
1585 | ||
a768e4e9 | 1586 | case INDEX_op_ctpop_i32: |
b1fde411 | 1587 | z_mask = 32 | 31; |
a768e4e9 RH |
1588 | break; |
1589 | case INDEX_op_ctpop_i64: | |
b1fde411 | 1590 | z_mask = 64 | 63; |
a768e4e9 RH |
1591 | break; |
1592 | ||
3a9d8b17 | 1593 | CASE_OP_32_64(setcond): |
a763551a | 1594 | case INDEX_op_setcond2_i32: |
b1fde411 | 1595 | z_mask = 1; |
3a9d8b17 PB |
1596 | break; |
1597 | ||
1598 | CASE_OP_32_64(movcond): | |
b1fde411 RH |
1599 | z_mask = arg_info(op->args[3])->z_mask |
1600 | | arg_info(op->args[4])->z_mask; | |
3a9d8b17 PB |
1601 | break; |
1602 | ||
c8d70272 | 1603 | CASE_OP_32_64(ld8u): |
b1fde411 | 1604 | z_mask = 0xff; |
c8d70272 AJ |
1605 | break; |
1606 | CASE_OP_32_64(ld16u): | |
b1fde411 | 1607 | z_mask = 0xffff; |
c8d70272 AJ |
1608 | break; |
1609 | case INDEX_op_ld32u_i64: | |
b1fde411 | 1610 | z_mask = 0xffffffffu; |
c8d70272 AJ |
1611 | break; |
1612 | ||
1613 | CASE_OP_32_64(qemu_ld): | |
1614 | { | |
ec5d4cbe | 1615 | MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs]; |
14776ab5 | 1616 | MemOp mop = get_memop(oi); |
c8d70272 | 1617 | if (!(mop & MO_SIGN)) { |
b1fde411 | 1618 | z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1; |
c8d70272 AJ |
1619 | } |
1620 | } | |
1621 | break; | |
1622 | ||
0b76ff8f | 1623 | CASE_OP_32_64(bswap16): |
b1fde411 RH |
1624 | z_mask = arg_info(op->args[1])->z_mask; |
1625 | if (z_mask <= 0xffff) { | |
0b76ff8f RH |
1626 | op->args[2] |= TCG_BSWAP_IZ; |
1627 | } | |
b1fde411 | 1628 | z_mask = bswap16(z_mask); |
0b76ff8f RH |
1629 | switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { |
1630 | case TCG_BSWAP_OZ: | |
1631 | break; | |
1632 | case TCG_BSWAP_OS: | |
b1fde411 | 1633 | z_mask = (int16_t)z_mask; |
0b76ff8f RH |
1634 | break; |
1635 | default: /* undefined high bits */ | |
b1fde411 | 1636 | z_mask |= MAKE_64BIT_MASK(16, 48); |
0b76ff8f RH |
1637 | break; |
1638 | } | |
1639 | break; | |
1640 | ||
1641 | case INDEX_op_bswap32_i64: | |
b1fde411 RH |
1642 | z_mask = arg_info(op->args[1])->z_mask; |
1643 | if (z_mask <= 0xffffffffu) { | |
0b76ff8f RH |
1644 | op->args[2] |= TCG_BSWAP_IZ; |
1645 | } | |
b1fde411 | 1646 | z_mask = bswap32(z_mask); |
0b76ff8f RH |
1647 | switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { |
1648 | case TCG_BSWAP_OZ: | |
1649 | break; | |
1650 | case TCG_BSWAP_OS: | |
b1fde411 | 1651 | z_mask = (int32_t)z_mask; |
0b76ff8f RH |
1652 | break; |
1653 | default: /* undefined high bits */ | |
b1fde411 | 1654 | z_mask |= MAKE_64BIT_MASK(32, 32); |
0b76ff8f RH |
1655 | break; |
1656 | } | |
1657 | break; | |
1658 | ||
3a9d8b17 PB |
1659 | default: |
1660 | break; | |
1661 | } | |
1662 | ||
bc8d688f RH |
1663 | /* 32-bit ops generate 32-bit results. For the result is zero test |
1664 | below, we can ignore high bits, but for further optimizations we | |
1665 | need to record that the high bits contain garbage. */ | |
b1fde411 | 1666 | partmask = z_mask; |
bc8d688f | 1667 | if (!(def->flags & TCG_OPF_64BIT)) { |
b1fde411 | 1668 | z_mask |= ~(tcg_target_ulong)0xffffffffu; |
24666baf RH |
1669 | partmask &= 0xffffffffu; |
1670 | affected &= 0xffffffffu; | |
f096dc96 | 1671 | } |
137f1f44 | 1672 | ctx.z_mask = z_mask; |
f096dc96 | 1673 | |
24666baf | 1674 | if (partmask == 0) { |
dc84988a | 1675 | tcg_opt_gen_movi(&ctx, op, op->args[0], 0); |
633f6502 PB |
1676 | continue; |
1677 | } | |
1678 | if (affected == 0) { | |
dc84988a | 1679 | tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); |
633f6502 PB |
1680 | continue; |
1681 | } | |
1682 | ||
56e49438 | 1683 | /* Simplify expression for "op r, a, 0 => movi r, 0" cases */ |
c45cb8bb | 1684 | switch (opc) { |
170ba88f RH |
1685 | CASE_OP_32_64_VEC(and): |
1686 | CASE_OP_32_64_VEC(mul): | |
03271524 RH |
1687 | CASE_OP_32_64(muluh): |
1688 | CASE_OP_32_64(mulsh): | |
6349039d RH |
1689 | if (arg_is_const(op->args[2]) |
1690 | && arg_info(op->args[2])->val == 0) { | |
dc84988a | 1691 | tcg_opt_gen_movi(&ctx, op, op->args[0], 0); |
53108fb5 KB |
1692 | continue; |
1693 | } | |
1694 | break; | |
56e49438 AJ |
1695 | default: |
1696 | break; | |
1697 | } | |
1698 | ||
1699 | /* Simplify expression for "op r, a, a => mov r, a" cases */ | |
c45cb8bb | 1700 | switch (opc) { |
170ba88f RH |
1701 | CASE_OP_32_64_VEC(or): |
1702 | CASE_OP_32_64_VEC(and): | |
6349039d | 1703 | if (args_are_copies(op->args[1], op->args[2])) { |
dc84988a | 1704 | tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); |
9a81090b KB |
1705 | continue; |
1706 | } | |
1707 | break; | |
fe0de7aa BS |
1708 | default: |
1709 | break; | |
53108fb5 KB |
1710 | } |
1711 | ||
3c94193e | 1712 | /* Simplify expression for "op r, a, a => movi r, 0" cases */ |
c45cb8bb | 1713 | switch (opc) { |
170ba88f RH |
1714 | CASE_OP_32_64_VEC(andc): |
1715 | CASE_OP_32_64_VEC(sub): | |
1716 | CASE_OP_32_64_VEC(xor): | |
6349039d | 1717 | if (args_are_copies(op->args[1], op->args[2])) { |
dc84988a | 1718 | tcg_opt_gen_movi(&ctx, op, op->args[0], 0); |
3c94193e AJ |
1719 | continue; |
1720 | } | |
1721 | break; | |
1722 | default: | |
1723 | break; | |
1724 | } | |
1725 | ||
22613af4 KB |
1726 | /* Propagate constants through copy operations and do constant |
1727 | folding. Constants will be substituted to arguments by register | |
1728 | allocator where needed and possible. Also detect copies. */ | |
c45cb8bb | 1729 | switch (opc) { |
170ba88f | 1730 | CASE_OP_32_64_VEC(mov): |
404a148d RH |
1731 | done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]); |
1732 | break; | |
6e14e91b | 1733 | |
170ba88f RH |
1734 | case INDEX_op_dup_vec: |
1735 | if (arg_is_const(op->args[1])) { | |
1736 | tmp = arg_info(op->args[1])->val; | |
1737 | tmp = dup_const(TCGOP_VECE(op), tmp); | |
dc84988a | 1738 | tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); |
b10f3833 | 1739 | continue; |
170ba88f | 1740 | } |
b10f3833 | 1741 | break; |
170ba88f | 1742 | |
1dc4fe70 RH |
1743 | case INDEX_op_dup2_vec: |
1744 | assert(TCG_TARGET_REG_BITS == 32); | |
1745 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
dc84988a | 1746 | tcg_opt_gen_movi(&ctx, op, op->args[0], |
0b4286dd RH |
1747 | deposit64(arg_info(op->args[1])->val, 32, 32, |
1748 | arg_info(op->args[2])->val)); | |
b10f3833 | 1749 | continue; |
1dc4fe70 RH |
1750 | } else if (args_are_copies(op->args[1], op->args[2])) { |
1751 | op->opc = INDEX_op_dup_vec; | |
1752 | TCGOP_VECE(op) = MO_32; | |
1dc4fe70 | 1753 | } |
b10f3833 | 1754 | break; |
1dc4fe70 | 1755 | |
2f9f08ba RH |
1756 | default: |
1757 | break; | |
1758 | ||
1759 | /* ---------------------------------------------------------- */ | |
1760 | /* Sorted alphabetically by opcode as much as possible. */ | |
1761 | ||
1762 | CASE_OP_32_64_VEC(add): | |
1763 | done = fold_add(&ctx, op); | |
1764 | break; | |
e3f7dc21 RH |
1765 | case INDEX_op_add2_i32: |
1766 | done = fold_add2_i32(&ctx, op); | |
1767 | break; | |
2f9f08ba RH |
1768 | CASE_OP_32_64_VEC(and): |
1769 | done = fold_and(&ctx, op); | |
1770 | break; | |
1771 | CASE_OP_32_64_VEC(andc): | |
1772 | done = fold_andc(&ctx, op); | |
1773 | break; | |
079b0804 RH |
1774 | CASE_OP_32_64(brcond): |
1775 | done = fold_brcond(&ctx, op); | |
1776 | break; | |
764d2aba RH |
1777 | case INDEX_op_brcond2_i32: |
1778 | done = fold_brcond2(&ctx, op); | |
1779 | break; | |
09bacdc2 RH |
1780 | CASE_OP_32_64(bswap16): |
1781 | CASE_OP_32_64(bswap32): | |
1782 | case INDEX_op_bswap64_i64: | |
1783 | done = fold_bswap(&ctx, op); | |
1784 | break; | |
30dd0bfe RH |
1785 | CASE_OP_32_64(clz): |
1786 | CASE_OP_32_64(ctz): | |
1787 | done = fold_count_zeros(&ctx, op); | |
1788 | break; | |
2f9f08ba RH |
1789 | CASE_OP_32_64(ctpop): |
1790 | done = fold_ctpop(&ctx, op); | |
1791 | break; | |
1b1907b8 RH |
1792 | CASE_OP_32_64(deposit): |
1793 | done = fold_deposit(&ctx, op); | |
1794 | break; | |
2f9f08ba RH |
1795 | CASE_OP_32_64(div): |
1796 | CASE_OP_32_64(divu): | |
1797 | done = fold_divide(&ctx, op); | |
1798 | break; | |
1799 | CASE_OP_32_64(eqv): | |
1800 | done = fold_eqv(&ctx, op); | |
1801 | break; | |
b6617c88 RH |
1802 | CASE_OP_32_64(extract): |
1803 | done = fold_extract(&ctx, op); | |
1804 | break; | |
dcd08996 RH |
1805 | CASE_OP_32_64(extract2): |
1806 | done = fold_extract2(&ctx, op); | |
1807 | break; | |
2f9f08ba RH |
1808 | CASE_OP_32_64(ext8s): |
1809 | CASE_OP_32_64(ext16s): | |
1810 | case INDEX_op_ext32s_i64: | |
1811 | case INDEX_op_ext_i32_i64: | |
1812 | done = fold_exts(&ctx, op); | |
1813 | break; | |
1814 | CASE_OP_32_64(ext8u): | |
1815 | CASE_OP_32_64(ext16u): | |
1816 | case INDEX_op_ext32u_i64: | |
1817 | case INDEX_op_extu_i32_i64: | |
1818 | case INDEX_op_extrl_i64_i32: | |
1819 | case INDEX_op_extrh_i64_i32: | |
1820 | done = fold_extu(&ctx, op); | |
1821 | break; | |
3eefdf2b RH |
1822 | case INDEX_op_mb: |
1823 | done = fold_mb(&ctx, op); | |
0c310a30 RH |
1824 | break; |
1825 | CASE_OP_32_64(movcond): | |
1826 | done = fold_movcond(&ctx, op); | |
3eefdf2b | 1827 | break; |
2f9f08ba RH |
1828 | CASE_OP_32_64(mul): |
1829 | done = fold_mul(&ctx, op); | |
1830 | break; | |
1831 | CASE_OP_32_64(mulsh): | |
1832 | CASE_OP_32_64(muluh): | |
1833 | done = fold_mul_highpart(&ctx, op); | |
1834 | break; | |
6b8ac0d1 RH |
1835 | case INDEX_op_mulu2_i32: |
1836 | done = fold_mulu2_i32(&ctx, op); | |
1837 | break; | |
2f9f08ba RH |
1838 | CASE_OP_32_64(nand): |
1839 | done = fold_nand(&ctx, op); | |
1840 | break; | |
1841 | CASE_OP_32_64(neg): | |
1842 | done = fold_neg(&ctx, op); | |
1843 | break; | |
1844 | CASE_OP_32_64(nor): | |
1845 | done = fold_nor(&ctx, op); | |
1846 | break; | |
1847 | CASE_OP_32_64_VEC(not): | |
1848 | done = fold_not(&ctx, op); | |
1849 | break; | |
1850 | CASE_OP_32_64_VEC(or): | |
1851 | done = fold_or(&ctx, op); | |
1852 | break; | |
1853 | CASE_OP_32_64_VEC(orc): | |
1854 | done = fold_orc(&ctx, op); | |
1855 | break; | |
3eefdf2b RH |
1856 | case INDEX_op_qemu_ld_i32: |
1857 | case INDEX_op_qemu_ld_i64: | |
1858 | done = fold_qemu_ld(&ctx, op); | |
1859 | break; | |
1860 | case INDEX_op_qemu_st_i32: | |
1861 | case INDEX_op_qemu_st8_i32: | |
1862 | case INDEX_op_qemu_st_i64: | |
1863 | done = fold_qemu_st(&ctx, op); | |
1864 | break; | |
2f9f08ba RH |
1865 | CASE_OP_32_64(rem): |
1866 | CASE_OP_32_64(remu): | |
1867 | done = fold_remainder(&ctx, op); | |
1868 | break; | |
1869 | CASE_OP_32_64(rotl): | |
1870 | CASE_OP_32_64(rotr): | |
1871 | CASE_OP_32_64(sar): | |
1872 | CASE_OP_32_64(shl): | |
1873 | CASE_OP_32_64(shr): | |
1874 | done = fold_shift(&ctx, op); | |
1875 | break; | |
c63ff55c RH |
1876 | CASE_OP_32_64(setcond): |
1877 | done = fold_setcond(&ctx, op); | |
1878 | break; | |
bc47b1aa RH |
1879 | case INDEX_op_setcond2_i32: |
1880 | done = fold_setcond2(&ctx, op); | |
1881 | break; | |
b6617c88 RH |
1882 | CASE_OP_32_64(sextract): |
1883 | done = fold_sextract(&ctx, op); | |
1884 | break; | |
2f9f08ba RH |
1885 | CASE_OP_32_64_VEC(sub): |
1886 | done = fold_sub(&ctx, op); | |
1887 | break; | |
e3f7dc21 RH |
1888 | case INDEX_op_sub2_i32: |
1889 | done = fold_sub2_i32(&ctx, op); | |
1890 | break; | |
2f9f08ba RH |
1891 | CASE_OP_32_64_VEC(xor): |
1892 | done = fold_xor(&ctx, op); | |
b10f3833 RH |
1893 | break; |
1894 | } | |
1895 | ||
404a148d RH |
1896 | if (!done) { |
1897 | finish_folding(&ctx, op); | |
1898 | } | |
8f2e8c07 | 1899 | } |
8f2e8c07 | 1900 | } |