]>
Commit | Line | Data |
---|---|---|
8f2e8c07 KB |
1 | /* |
2 | * Optimizations for Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2010 Samsung Electronics. | |
5 | * Contributed by Kirill Batuzov <[email protected]> | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
8 | * of this software and associated documentation files (the "Software"), to deal | |
9 | * in the Software without restriction, including without limitation the rights | |
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
11 | * copies of the Software, and to permit persons to whom the Software is | |
12 | * furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice shall be included in | |
15 | * all copies or substantial portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
23 | * THE SOFTWARE. | |
24 | */ | |
25 | ||
757e725b | 26 | #include "qemu/osdep.h" |
9531c078 | 27 | #include "qemu/int128.h" |
dcb32f1d | 28 | #include "tcg/tcg-op.h" |
90163900 | 29 | #include "tcg-internal.h" |
8f2e8c07 | 30 | |
8f2e8c07 KB |
31 | #define CASE_OP_32_64(x) \ |
32 | glue(glue(case INDEX_op_, x), _i32): \ | |
33 | glue(glue(case INDEX_op_, x), _i64) | |
8f2e8c07 | 34 | |
170ba88f RH |
35 | #define CASE_OP_32_64_VEC(x) \ |
36 | glue(glue(case INDEX_op_, x), _i32): \ | |
37 | glue(glue(case INDEX_op_, x), _i64): \ | |
38 | glue(glue(case INDEX_op_, x), _vec) | |
39 | ||
6fcb98ed | 40 | typedef struct TempOptInfo { |
b41059dd | 41 | bool is_const; |
6349039d RH |
42 | TCGTemp *prev_copy; |
43 | TCGTemp *next_copy; | |
54795544 | 44 | uint64_t val; |
b1fde411 | 45 | uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */ |
6fcb98ed | 46 | } TempOptInfo; |
22613af4 | 47 | |
3b3f847d | 48 | typedef struct OptContext { |
dc84988a | 49 | TCGContext *tcg; |
d0ed5151 | 50 | TCGOp *prev_mb; |
3b3f847d | 51 | TCGTempSet temps_used; |
137f1f44 RH |
52 | |
53 | /* In flight values from optimization. */ | |
fae450ba RH |
54 | uint64_t a_mask; /* mask bit is 0 iff value identical to first input */ |
55 | uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */ | |
67f84c96 | 56 | TCGType type; |
3b3f847d RH |
57 | } OptContext; |
58 | ||
6fcb98ed | 59 | static inline TempOptInfo *ts_info(TCGTemp *ts) |
d9c769c6 | 60 | { |
6349039d | 61 | return ts->state_ptr; |
d9c769c6 AJ |
62 | } |
63 | ||
6fcb98ed | 64 | static inline TempOptInfo *arg_info(TCGArg arg) |
d9c769c6 | 65 | { |
6349039d RH |
66 | return ts_info(arg_temp(arg)); |
67 | } | |
68 | ||
69 | static inline bool ts_is_const(TCGTemp *ts) | |
70 | { | |
71 | return ts_info(ts)->is_const; | |
72 | } | |
73 | ||
74 | static inline bool arg_is_const(TCGArg arg) | |
75 | { | |
76 | return ts_is_const(arg_temp(arg)); | |
77 | } | |
78 | ||
79 | static inline bool ts_is_copy(TCGTemp *ts) | |
80 | { | |
81 | return ts_info(ts)->next_copy != ts; | |
d9c769c6 AJ |
82 | } |
83 | ||
b41059dd | 84 | /* Reset TEMP's state, possibly removing the temp for the list of copies. */ |
6349039d RH |
85 | static void reset_ts(TCGTemp *ts) |
86 | { | |
6fcb98ed RH |
87 | TempOptInfo *ti = ts_info(ts); |
88 | TempOptInfo *pi = ts_info(ti->prev_copy); | |
89 | TempOptInfo *ni = ts_info(ti->next_copy); | |
6349039d RH |
90 | |
91 | ni->prev_copy = ti->prev_copy; | |
92 | pi->next_copy = ti->next_copy; | |
93 | ti->next_copy = ts; | |
94 | ti->prev_copy = ts; | |
95 | ti->is_const = false; | |
b1fde411 | 96 | ti->z_mask = -1; |
6349039d RH |
97 | } |
98 | ||
99 | static void reset_temp(TCGArg arg) | |
22613af4 | 100 | { |
6349039d | 101 | reset_ts(arg_temp(arg)); |
22613af4 KB |
102 | } |
103 | ||
1208d7dd | 104 | /* Initialize and activate a temporary. */ |
3b3f847d | 105 | static void init_ts_info(OptContext *ctx, TCGTemp *ts) |
1208d7dd | 106 | { |
6349039d | 107 | size_t idx = temp_idx(ts); |
8f17a975 | 108 | TempOptInfo *ti; |
6349039d | 109 | |
3b3f847d | 110 | if (test_bit(idx, ctx->temps_used.l)) { |
8f17a975 RH |
111 | return; |
112 | } | |
3b3f847d | 113 | set_bit(idx, ctx->temps_used.l); |
8f17a975 RH |
114 | |
115 | ti = ts->state_ptr; | |
116 | if (ti == NULL) { | |
117 | ti = tcg_malloc(sizeof(TempOptInfo)); | |
6349039d | 118 | ts->state_ptr = ti; |
8f17a975 RH |
119 | } |
120 | ||
121 | ti->next_copy = ts; | |
122 | ti->prev_copy = ts; | |
123 | if (ts->kind == TEMP_CONST) { | |
124 | ti->is_const = true; | |
125 | ti->val = ts->val; | |
b1fde411 | 126 | ti->z_mask = ts->val; |
8f17a975 RH |
127 | } else { |
128 | ti->is_const = false; | |
b1fde411 | 129 | ti->z_mask = -1; |
1208d7dd AJ |
130 | } |
131 | } | |
132 | ||
6349039d | 133 | static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts) |
e590d4e6 | 134 | { |
4c868ce6 | 135 | TCGTemp *i, *g, *l; |
e590d4e6 | 136 | |
4c868ce6 RH |
137 | /* If this is already readonly, we can't do better. */ |
138 | if (temp_readonly(ts)) { | |
6349039d | 139 | return ts; |
e590d4e6 AJ |
140 | } |
141 | ||
4c868ce6 | 142 | g = l = NULL; |
6349039d | 143 | for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) { |
4c868ce6 | 144 | if (temp_readonly(i)) { |
e590d4e6 | 145 | return i; |
4c868ce6 RH |
146 | } else if (i->kind > ts->kind) { |
147 | if (i->kind == TEMP_GLOBAL) { | |
148 | g = i; | |
149 | } else if (i->kind == TEMP_LOCAL) { | |
150 | l = i; | |
e590d4e6 AJ |
151 | } |
152 | } | |
153 | } | |
154 | ||
4c868ce6 RH |
155 | /* If we didn't find a better representation, return the same temp. */ |
156 | return g ? g : l ? l : ts; | |
e590d4e6 AJ |
157 | } |
158 | ||
6349039d | 159 | static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2) |
e590d4e6 | 160 | { |
6349039d | 161 | TCGTemp *i; |
e590d4e6 | 162 | |
6349039d | 163 | if (ts1 == ts2) { |
e590d4e6 AJ |
164 | return true; |
165 | } | |
166 | ||
6349039d | 167 | if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) { |
e590d4e6 AJ |
168 | return false; |
169 | } | |
170 | ||
6349039d RH |
171 | for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) { |
172 | if (i == ts2) { | |
e590d4e6 AJ |
173 | return true; |
174 | } | |
175 | } | |
176 | ||
177 | return false; | |
178 | } | |
179 | ||
6349039d RH |
180 | static bool args_are_copies(TCGArg arg1, TCGArg arg2) |
181 | { | |
182 | return ts_are_copies(arg_temp(arg1), arg_temp(arg2)); | |
183 | } | |
184 | ||
6b99d5bf | 185 | static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) |
22613af4 | 186 | { |
6349039d RH |
187 | TCGTemp *dst_ts = arg_temp(dst); |
188 | TCGTemp *src_ts = arg_temp(src); | |
6fcb98ed RH |
189 | TempOptInfo *di; |
190 | TempOptInfo *si; | |
6349039d RH |
191 | TCGOpcode new_op; |
192 | ||
193 | if (ts_are_copies(dst_ts, src_ts)) { | |
dc84988a | 194 | tcg_op_remove(ctx->tcg, op); |
6b99d5bf | 195 | return true; |
5365718a AJ |
196 | } |
197 | ||
6349039d RH |
198 | reset_ts(dst_ts); |
199 | di = ts_info(dst_ts); | |
200 | si = ts_info(src_ts); | |
67f84c96 RH |
201 | |
202 | switch (ctx->type) { | |
203 | case TCG_TYPE_I32: | |
170ba88f | 204 | new_op = INDEX_op_mov_i32; |
67f84c96 RH |
205 | break; |
206 | case TCG_TYPE_I64: | |
207 | new_op = INDEX_op_mov_i64; | |
208 | break; | |
209 | case TCG_TYPE_V64: | |
210 | case TCG_TYPE_V128: | |
211 | case TCG_TYPE_V256: | |
212 | /* TCGOP_VECL and TCGOP_VECE remain unchanged. */ | |
213 | new_op = INDEX_op_mov_vec; | |
214 | break; | |
215 | default: | |
216 | g_assert_not_reached(); | |
170ba88f | 217 | } |
c45cb8bb | 218 | op->opc = new_op; |
6349039d RH |
219 | op->args[0] = dst; |
220 | op->args[1] = src; | |
a62f6f56 | 221 | |
faa2e100 | 222 | di->z_mask = si->z_mask; |
e590d4e6 | 223 | |
6349039d | 224 | if (src_ts->type == dst_ts->type) { |
6fcb98ed | 225 | TempOptInfo *ni = ts_info(si->next_copy); |
6349039d RH |
226 | |
227 | di->next_copy = si->next_copy; | |
228 | di->prev_copy = src_ts; | |
229 | ni->prev_copy = dst_ts; | |
230 | si->next_copy = dst_ts; | |
231 | di->is_const = si->is_const; | |
232 | di->val = si->val; | |
233 | } | |
6b99d5bf | 234 | return true; |
22613af4 KB |
235 | } |
236 | ||
6b99d5bf | 237 | static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op, |
dc84988a | 238 | TCGArg dst, uint64_t val) |
8fe35e04 | 239 | { |
faa2e100 | 240 | TCGTemp *tv; |
67f84c96 | 241 | |
faa2e100 RH |
242 | if (ctx->type == TCG_TYPE_I32) { |
243 | val = (int32_t)val; | |
244 | } | |
245 | ||
246 | /* Convert movi to mov with constant temp. */ | |
247 | tv = tcg_constant_internal(ctx->type, val); | |
3b3f847d | 248 | init_ts_info(ctx, tv); |
6b99d5bf | 249 | return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv)); |
8fe35e04 RH |
250 | } |
251 | ||
54795544 | 252 | static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) |
53108fb5 | 253 | { |
03271524 RH |
254 | uint64_t l64, h64; |
255 | ||
53108fb5 KB |
256 | switch (op) { |
257 | CASE_OP_32_64(add): | |
258 | return x + y; | |
259 | ||
260 | CASE_OP_32_64(sub): | |
261 | return x - y; | |
262 | ||
263 | CASE_OP_32_64(mul): | |
264 | return x * y; | |
265 | ||
9a81090b KB |
266 | CASE_OP_32_64(and): |
267 | return x & y; | |
268 | ||
269 | CASE_OP_32_64(or): | |
270 | return x | y; | |
271 | ||
272 | CASE_OP_32_64(xor): | |
273 | return x ^ y; | |
274 | ||
55c0975c | 275 | case INDEX_op_shl_i32: |
50c5c4d1 | 276 | return (uint32_t)x << (y & 31); |
55c0975c | 277 | |
55c0975c | 278 | case INDEX_op_shl_i64: |
50c5c4d1 | 279 | return (uint64_t)x << (y & 63); |
55c0975c KB |
280 | |
281 | case INDEX_op_shr_i32: | |
50c5c4d1 | 282 | return (uint32_t)x >> (y & 31); |
55c0975c | 283 | |
55c0975c | 284 | case INDEX_op_shr_i64: |
50c5c4d1 | 285 | return (uint64_t)x >> (y & 63); |
55c0975c KB |
286 | |
287 | case INDEX_op_sar_i32: | |
50c5c4d1 | 288 | return (int32_t)x >> (y & 31); |
55c0975c | 289 | |
55c0975c | 290 | case INDEX_op_sar_i64: |
50c5c4d1 | 291 | return (int64_t)x >> (y & 63); |
55c0975c KB |
292 | |
293 | case INDEX_op_rotr_i32: | |
50c5c4d1 | 294 | return ror32(x, y & 31); |
55c0975c | 295 | |
55c0975c | 296 | case INDEX_op_rotr_i64: |
50c5c4d1 | 297 | return ror64(x, y & 63); |
55c0975c KB |
298 | |
299 | case INDEX_op_rotl_i32: | |
50c5c4d1 | 300 | return rol32(x, y & 31); |
55c0975c | 301 | |
55c0975c | 302 | case INDEX_op_rotl_i64: |
50c5c4d1 | 303 | return rol64(x, y & 63); |
25c4d9cc RH |
304 | |
305 | CASE_OP_32_64(not): | |
a640f031 | 306 | return ~x; |
25c4d9cc | 307 | |
cb25c80a RH |
308 | CASE_OP_32_64(neg): |
309 | return -x; | |
310 | ||
311 | CASE_OP_32_64(andc): | |
312 | return x & ~y; | |
313 | ||
314 | CASE_OP_32_64(orc): | |
315 | return x | ~y; | |
316 | ||
317 | CASE_OP_32_64(eqv): | |
318 | return ~(x ^ y); | |
319 | ||
320 | CASE_OP_32_64(nand): | |
321 | return ~(x & y); | |
322 | ||
323 | CASE_OP_32_64(nor): | |
324 | return ~(x | y); | |
325 | ||
0e28d006 RH |
326 | case INDEX_op_clz_i32: |
327 | return (uint32_t)x ? clz32(x) : y; | |
328 | ||
329 | case INDEX_op_clz_i64: | |
330 | return x ? clz64(x) : y; | |
331 | ||
332 | case INDEX_op_ctz_i32: | |
333 | return (uint32_t)x ? ctz32(x) : y; | |
334 | ||
335 | case INDEX_op_ctz_i64: | |
336 | return x ? ctz64(x) : y; | |
337 | ||
a768e4e9 RH |
338 | case INDEX_op_ctpop_i32: |
339 | return ctpop32(x); | |
340 | ||
341 | case INDEX_op_ctpop_i64: | |
342 | return ctpop64(x); | |
343 | ||
25c4d9cc | 344 | CASE_OP_32_64(ext8s): |
a640f031 | 345 | return (int8_t)x; |
25c4d9cc RH |
346 | |
347 | CASE_OP_32_64(ext16s): | |
a640f031 | 348 | return (int16_t)x; |
25c4d9cc RH |
349 | |
350 | CASE_OP_32_64(ext8u): | |
a640f031 | 351 | return (uint8_t)x; |
25c4d9cc RH |
352 | |
353 | CASE_OP_32_64(ext16u): | |
a640f031 KB |
354 | return (uint16_t)x; |
355 | ||
6498594c | 356 | CASE_OP_32_64(bswap16): |
0b76ff8f RH |
357 | x = bswap16(x); |
358 | return y & TCG_BSWAP_OS ? (int16_t)x : x; | |
6498594c RH |
359 | |
360 | CASE_OP_32_64(bswap32): | |
0b76ff8f RH |
361 | x = bswap32(x); |
362 | return y & TCG_BSWAP_OS ? (int32_t)x : x; | |
6498594c RH |
363 | |
364 | case INDEX_op_bswap64_i64: | |
365 | return bswap64(x); | |
366 | ||
8bcb5c8f | 367 | case INDEX_op_ext_i32_i64: |
a640f031 KB |
368 | case INDEX_op_ext32s_i64: |
369 | return (int32_t)x; | |
370 | ||
8bcb5c8f | 371 | case INDEX_op_extu_i32_i64: |
609ad705 | 372 | case INDEX_op_extrl_i64_i32: |
a640f031 KB |
373 | case INDEX_op_ext32u_i64: |
374 | return (uint32_t)x; | |
a640f031 | 375 | |
609ad705 RH |
376 | case INDEX_op_extrh_i64_i32: |
377 | return (uint64_t)x >> 32; | |
378 | ||
03271524 RH |
379 | case INDEX_op_muluh_i32: |
380 | return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32; | |
381 | case INDEX_op_mulsh_i32: | |
382 | return ((int64_t)(int32_t)x * (int32_t)y) >> 32; | |
383 | ||
384 | case INDEX_op_muluh_i64: | |
385 | mulu64(&l64, &h64, x, y); | |
386 | return h64; | |
387 | case INDEX_op_mulsh_i64: | |
388 | muls64(&l64, &h64, x, y); | |
389 | return h64; | |
390 | ||
01547f7f RH |
391 | case INDEX_op_div_i32: |
392 | /* Avoid crashing on divide by zero, otherwise undefined. */ | |
393 | return (int32_t)x / ((int32_t)y ? : 1); | |
394 | case INDEX_op_divu_i32: | |
395 | return (uint32_t)x / ((uint32_t)y ? : 1); | |
396 | case INDEX_op_div_i64: | |
397 | return (int64_t)x / ((int64_t)y ? : 1); | |
398 | case INDEX_op_divu_i64: | |
399 | return (uint64_t)x / ((uint64_t)y ? : 1); | |
400 | ||
401 | case INDEX_op_rem_i32: | |
402 | return (int32_t)x % ((int32_t)y ? : 1); | |
403 | case INDEX_op_remu_i32: | |
404 | return (uint32_t)x % ((uint32_t)y ? : 1); | |
405 | case INDEX_op_rem_i64: | |
406 | return (int64_t)x % ((int64_t)y ? : 1); | |
407 | case INDEX_op_remu_i64: | |
408 | return (uint64_t)x % ((uint64_t)y ? : 1); | |
409 | ||
53108fb5 KB |
410 | default: |
411 | fprintf(stderr, | |
412 | "Unrecognized operation %d in do_constant_folding.\n", op); | |
413 | tcg_abort(); | |
414 | } | |
415 | } | |
416 | ||
67f84c96 RH |
417 | static uint64_t do_constant_folding(TCGOpcode op, TCGType type, |
418 | uint64_t x, uint64_t y) | |
53108fb5 | 419 | { |
54795544 | 420 | uint64_t res = do_constant_folding_2(op, x, y); |
67f84c96 | 421 | if (type == TCG_TYPE_I32) { |
29f3ff8d | 422 | res = (int32_t)res; |
53108fb5 | 423 | } |
53108fb5 KB |
424 | return res; |
425 | } | |
426 | ||
9519da7e RH |
427 | static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c) |
428 | { | |
429 | switch (c) { | |
430 | case TCG_COND_EQ: | |
431 | return x == y; | |
432 | case TCG_COND_NE: | |
433 | return x != y; | |
434 | case TCG_COND_LT: | |
435 | return (int32_t)x < (int32_t)y; | |
436 | case TCG_COND_GE: | |
437 | return (int32_t)x >= (int32_t)y; | |
438 | case TCG_COND_LE: | |
439 | return (int32_t)x <= (int32_t)y; | |
440 | case TCG_COND_GT: | |
441 | return (int32_t)x > (int32_t)y; | |
442 | case TCG_COND_LTU: | |
443 | return x < y; | |
444 | case TCG_COND_GEU: | |
445 | return x >= y; | |
446 | case TCG_COND_LEU: | |
447 | return x <= y; | |
448 | case TCG_COND_GTU: | |
449 | return x > y; | |
450 | default: | |
451 | tcg_abort(); | |
452 | } | |
453 | } | |
454 | ||
455 | static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c) | |
456 | { | |
457 | switch (c) { | |
458 | case TCG_COND_EQ: | |
459 | return x == y; | |
460 | case TCG_COND_NE: | |
461 | return x != y; | |
462 | case TCG_COND_LT: | |
463 | return (int64_t)x < (int64_t)y; | |
464 | case TCG_COND_GE: | |
465 | return (int64_t)x >= (int64_t)y; | |
466 | case TCG_COND_LE: | |
467 | return (int64_t)x <= (int64_t)y; | |
468 | case TCG_COND_GT: | |
469 | return (int64_t)x > (int64_t)y; | |
470 | case TCG_COND_LTU: | |
471 | return x < y; | |
472 | case TCG_COND_GEU: | |
473 | return x >= y; | |
474 | case TCG_COND_LEU: | |
475 | return x <= y; | |
476 | case TCG_COND_GTU: | |
477 | return x > y; | |
478 | default: | |
479 | tcg_abort(); | |
480 | } | |
481 | } | |
482 | ||
483 | static bool do_constant_folding_cond_eq(TCGCond c) | |
484 | { | |
485 | switch (c) { | |
486 | case TCG_COND_GT: | |
487 | case TCG_COND_LTU: | |
488 | case TCG_COND_LT: | |
489 | case TCG_COND_GTU: | |
490 | case TCG_COND_NE: | |
491 | return 0; | |
492 | case TCG_COND_GE: | |
493 | case TCG_COND_GEU: | |
494 | case TCG_COND_LE: | |
495 | case TCG_COND_LEU: | |
496 | case TCG_COND_EQ: | |
497 | return 1; | |
498 | default: | |
499 | tcg_abort(); | |
500 | } | |
501 | } | |
502 | ||
8d57bf1e RH |
503 | /* |
504 | * Return -1 if the condition can't be simplified, | |
505 | * and the result of the condition (0 or 1) if it can. | |
506 | */ | |
67f84c96 | 507 | static int do_constant_folding_cond(TCGType type, TCGArg x, |
8d57bf1e | 508 | TCGArg y, TCGCond c) |
f8dd19e5 | 509 | { |
54795544 RH |
510 | uint64_t xv = arg_info(x)->val; |
511 | uint64_t yv = arg_info(y)->val; | |
512 | ||
6349039d | 513 | if (arg_is_const(x) && arg_is_const(y)) { |
67f84c96 RH |
514 | switch (type) { |
515 | case TCG_TYPE_I32: | |
170ba88f | 516 | return do_constant_folding_cond_32(xv, yv, c); |
67f84c96 RH |
517 | case TCG_TYPE_I64: |
518 | return do_constant_folding_cond_64(xv, yv, c); | |
519 | default: | |
520 | /* Only scalar comparisons are optimizable */ | |
521 | return -1; | |
b336ceb6 | 522 | } |
6349039d | 523 | } else if (args_are_copies(x, y)) { |
9519da7e | 524 | return do_constant_folding_cond_eq(c); |
6349039d | 525 | } else if (arg_is_const(y) && yv == 0) { |
b336ceb6 | 526 | switch (c) { |
f8dd19e5 | 527 | case TCG_COND_LTU: |
b336ceb6 | 528 | return 0; |
f8dd19e5 | 529 | case TCG_COND_GEU: |
b336ceb6 AJ |
530 | return 1; |
531 | default: | |
8d57bf1e | 532 | return -1; |
f8dd19e5 | 533 | } |
f8dd19e5 | 534 | } |
8d57bf1e | 535 | return -1; |
f8dd19e5 AJ |
536 | } |
537 | ||
8d57bf1e RH |
538 | /* |
539 | * Return -1 if the condition can't be simplified, | |
540 | * and the result of the condition (0 or 1) if it can. | |
541 | */ | |
542 | static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c) | |
6c4382f8 RH |
543 | { |
544 | TCGArg al = p1[0], ah = p1[1]; | |
545 | TCGArg bl = p2[0], bh = p2[1]; | |
546 | ||
6349039d RH |
547 | if (arg_is_const(bl) && arg_is_const(bh)) { |
548 | tcg_target_ulong blv = arg_info(bl)->val; | |
549 | tcg_target_ulong bhv = arg_info(bh)->val; | |
550 | uint64_t b = deposit64(blv, 32, 32, bhv); | |
6c4382f8 | 551 | |
6349039d RH |
552 | if (arg_is_const(al) && arg_is_const(ah)) { |
553 | tcg_target_ulong alv = arg_info(al)->val; | |
554 | tcg_target_ulong ahv = arg_info(ah)->val; | |
555 | uint64_t a = deposit64(alv, 32, 32, ahv); | |
6c4382f8 RH |
556 | return do_constant_folding_cond_64(a, b, c); |
557 | } | |
558 | if (b == 0) { | |
559 | switch (c) { | |
560 | case TCG_COND_LTU: | |
561 | return 0; | |
562 | case TCG_COND_GEU: | |
563 | return 1; | |
564 | default: | |
565 | break; | |
566 | } | |
567 | } | |
568 | } | |
6349039d | 569 | if (args_are_copies(al, bl) && args_are_copies(ah, bh)) { |
6c4382f8 RH |
570 | return do_constant_folding_cond_eq(c); |
571 | } | |
8d57bf1e | 572 | return -1; |
6c4382f8 RH |
573 | } |
574 | ||
7a2f7084 RH |
575 | /** |
576 | * swap_commutative: | |
577 | * @dest: TCGArg of the destination argument, or NO_DEST. | |
578 | * @p1: first paired argument | |
579 | * @p2: second paired argument | |
580 | * | |
581 | * If *@p1 is a constant and *@p2 is not, swap. | |
582 | * If *@p2 matches @dest, swap. | |
583 | * Return true if a swap was performed. | |
584 | */ | |
585 | ||
586 | #define NO_DEST temp_arg(NULL) | |
587 | ||
24c9ae4e RH |
588 | static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2) |
589 | { | |
590 | TCGArg a1 = *p1, a2 = *p2; | |
591 | int sum = 0; | |
6349039d RH |
592 | sum += arg_is_const(a1); |
593 | sum -= arg_is_const(a2); | |
24c9ae4e RH |
594 | |
595 | /* Prefer the constant in second argument, and then the form | |
596 | op a, a, b, which is better handled on non-RISC hosts. */ | |
597 | if (sum > 0 || (sum == 0 && dest == a2)) { | |
598 | *p1 = a2; | |
599 | *p2 = a1; | |
600 | return true; | |
601 | } | |
602 | return false; | |
603 | } | |
604 | ||
0bfcb865 RH |
605 | static bool swap_commutative2(TCGArg *p1, TCGArg *p2) |
606 | { | |
607 | int sum = 0; | |
6349039d RH |
608 | sum += arg_is_const(p1[0]); |
609 | sum += arg_is_const(p1[1]); | |
610 | sum -= arg_is_const(p2[0]); | |
611 | sum -= arg_is_const(p2[1]); | |
0bfcb865 RH |
612 | if (sum > 0) { |
613 | TCGArg t; | |
614 | t = p1[0], p1[0] = p2[0], p2[0] = t; | |
615 | t = p1[1], p1[1] = p2[1], p2[1] = t; | |
616 | return true; | |
617 | } | |
618 | return false; | |
619 | } | |
620 | ||
e2577ea2 RH |
621 | static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args) |
622 | { | |
623 | for (int i = 0; i < nb_args; i++) { | |
624 | TCGTemp *ts = arg_temp(op->args[i]); | |
625 | if (ts) { | |
626 | init_ts_info(ctx, ts); | |
627 | } | |
628 | } | |
629 | } | |
630 | ||
8774dded RH |
631 | static void copy_propagate(OptContext *ctx, TCGOp *op, |
632 | int nb_oargs, int nb_iargs) | |
633 | { | |
634 | TCGContext *s = ctx->tcg; | |
635 | ||
636 | for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) { | |
637 | TCGTemp *ts = arg_temp(op->args[i]); | |
638 | if (ts && ts_is_copy(ts)) { | |
639 | op->args[i] = temp_arg(find_better_copy(s, ts)); | |
640 | } | |
641 | } | |
642 | } | |
643 | ||
137f1f44 RH |
644 | static void finish_folding(OptContext *ctx, TCGOp *op) |
645 | { | |
646 | const TCGOpDef *def = &tcg_op_defs[op->opc]; | |
647 | int i, nb_oargs; | |
648 | ||
649 | /* | |
650 | * For an opcode that ends a BB, reset all temp data. | |
651 | * We do no cross-BB optimization. | |
652 | */ | |
653 | if (def->flags & TCG_OPF_BB_END) { | |
654 | memset(&ctx->temps_used, 0, sizeof(ctx->temps_used)); | |
655 | ctx->prev_mb = NULL; | |
656 | return; | |
657 | } | |
658 | ||
659 | nb_oargs = def->nb_oargs; | |
660 | for (i = 0; i < nb_oargs; i++) { | |
661 | reset_temp(op->args[i]); | |
662 | /* | |
663 | * Save the corresponding known-zero bits mask for the | |
664 | * first output argument (only one supported so far). | |
665 | */ | |
666 | if (i == 0) { | |
667 | arg_info(op->args[i])->z_mask = ctx->z_mask; | |
668 | } | |
669 | } | |
670 | } | |
671 | ||
2f9f08ba RH |
672 | /* |
673 | * The fold_* functions return true when processing is complete, | |
674 | * usually by folding the operation to a constant or to a copy, | |
675 | * and calling tcg_opt_gen_{mov,movi}. They may do other things, | |
676 | * like collect information about the value produced, for use in | |
677 | * optimizing a subsequent operation. | |
678 | * | |
679 | * These first fold_* functions are all helpers, used by other | |
680 | * folders for more specific operations. | |
681 | */ | |
682 | ||
683 | static bool fold_const1(OptContext *ctx, TCGOp *op) | |
684 | { | |
685 | if (arg_is_const(op->args[1])) { | |
686 | uint64_t t; | |
687 | ||
688 | t = arg_info(op->args[1])->val; | |
67f84c96 | 689 | t = do_constant_folding(op->opc, ctx->type, t, 0); |
2f9f08ba RH |
690 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); |
691 | } | |
692 | return false; | |
693 | } | |
694 | ||
695 | static bool fold_const2(OptContext *ctx, TCGOp *op) | |
696 | { | |
697 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
698 | uint64_t t1 = arg_info(op->args[1])->val; | |
699 | uint64_t t2 = arg_info(op->args[2])->val; | |
700 | ||
67f84c96 | 701 | t1 = do_constant_folding(op->opc, ctx->type, t1, t2); |
2f9f08ba RH |
702 | return tcg_opt_gen_movi(ctx, op, op->args[0], t1); |
703 | } | |
704 | return false; | |
705 | } | |
706 | ||
7a2f7084 RH |
707 | static bool fold_const2_commutative(OptContext *ctx, TCGOp *op) |
708 | { | |
709 | swap_commutative(op->args[0], &op->args[1], &op->args[2]); | |
710 | return fold_const2(ctx, op); | |
711 | } | |
712 | ||
fae450ba RH |
713 | static bool fold_masks(OptContext *ctx, TCGOp *op) |
714 | { | |
715 | uint64_t a_mask = ctx->a_mask; | |
716 | uint64_t z_mask = ctx->z_mask; | |
717 | ||
718 | /* | |
faa2e100 RH |
719 | * 32-bit ops generate 32-bit results, which for the purpose of |
720 | * simplifying tcg are sign-extended. Certainly that's how we | |
721 | * represent our constants elsewhere. Note that the bits will | |
722 | * be reset properly for a 64-bit value when encountering the | |
723 | * type changing opcodes. | |
fae450ba RH |
724 | */ |
725 | if (ctx->type == TCG_TYPE_I32) { | |
faa2e100 RH |
726 | a_mask = (int32_t)a_mask; |
727 | z_mask = (int32_t)z_mask; | |
728 | ctx->z_mask = z_mask; | |
fae450ba RH |
729 | } |
730 | ||
731 | if (z_mask == 0) { | |
732 | return tcg_opt_gen_movi(ctx, op, op->args[0], 0); | |
733 | } | |
734 | if (a_mask == 0) { | |
735 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | |
736 | } | |
737 | return false; | |
738 | } | |
739 | ||
0e0a32ba RH |
740 | /* |
741 | * Convert @op to NOT, if NOT is supported by the host. | |
742 | * Return true f the conversion is successful, which will still | |
743 | * indicate that the processing is complete. | |
744 | */ | |
745 | static bool fold_not(OptContext *ctx, TCGOp *op); | |
746 | static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx) | |
747 | { | |
748 | TCGOpcode not_op; | |
749 | bool have_not; | |
750 | ||
751 | switch (ctx->type) { | |
752 | case TCG_TYPE_I32: | |
753 | not_op = INDEX_op_not_i32; | |
754 | have_not = TCG_TARGET_HAS_not_i32; | |
755 | break; | |
756 | case TCG_TYPE_I64: | |
757 | not_op = INDEX_op_not_i64; | |
758 | have_not = TCG_TARGET_HAS_not_i64; | |
759 | break; | |
760 | case TCG_TYPE_V64: | |
761 | case TCG_TYPE_V128: | |
762 | case TCG_TYPE_V256: | |
763 | not_op = INDEX_op_not_vec; | |
764 | have_not = TCG_TARGET_HAS_not_vec; | |
765 | break; | |
766 | default: | |
767 | g_assert_not_reached(); | |
768 | } | |
769 | if (have_not) { | |
770 | op->opc = not_op; | |
771 | op->args[1] = op->args[idx]; | |
772 | return fold_not(ctx, op); | |
773 | } | |
774 | return false; | |
775 | } | |
776 | ||
da48e272 RH |
777 | /* If the binary operation has first argument @i, fold to @i. */ |
778 | static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | |
779 | { | |
780 | if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) { | |
781 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
782 | } | |
783 | return false; | |
784 | } | |
785 | ||
0e0a32ba RH |
786 | /* If the binary operation has first argument @i, fold to NOT. */ |
787 | static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i) | |
788 | { | |
789 | if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) { | |
790 | return fold_to_not(ctx, op, 2); | |
791 | } | |
792 | return false; | |
793 | } | |
794 | ||
e8679955 RH |
795 | /* If the binary operation has second argument @i, fold to @i. */ |
796 | static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | |
797 | { | |
798 | if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { | |
799 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
800 | } | |
801 | return false; | |
802 | } | |
803 | ||
a63ce0e9 RH |
804 | /* If the binary operation has second argument @i, fold to identity. */ |
805 | static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i) | |
806 | { | |
807 | if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { | |
808 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | |
809 | } | |
810 | return false; | |
811 | } | |
812 | ||
0e0a32ba RH |
813 | /* If the binary operation has second argument @i, fold to NOT. */ |
814 | static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i) | |
815 | { | |
816 | if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { | |
817 | return fold_to_not(ctx, op, 1); | |
818 | } | |
819 | return false; | |
820 | } | |
821 | ||
cbe42fb2 RH |
822 | /* If the binary operation has both arguments equal, fold to @i. */ |
823 | static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | |
824 | { | |
825 | if (args_are_copies(op->args[1], op->args[2])) { | |
826 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
827 | } | |
828 | return false; | |
829 | } | |
830 | ||
ca7bb049 RH |
831 | /* If the binary operation has both arguments equal, fold to identity. */ |
832 | static bool fold_xx_to_x(OptContext *ctx, TCGOp *op) | |
833 | { | |
834 | if (args_are_copies(op->args[1], op->args[2])) { | |
835 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | |
836 | } | |
837 | return false; | |
838 | } | |
839 | ||
2f9f08ba RH |
840 | /* |
841 | * These outermost fold_<op> functions are sorted alphabetically. | |
ca7bb049 RH |
842 | * |
843 | * The ordering of the transformations should be: | |
844 | * 1) those that produce a constant | |
845 | * 2) those that produce a copy | |
846 | * 3) those that produce information about the result value. | |
2f9f08ba RH |
847 | */ |
848 | ||
849 | static bool fold_add(OptContext *ctx, TCGOp *op) | |
850 | { | |
7a2f7084 | 851 | if (fold_const2_commutative(ctx, op) || |
a63ce0e9 RH |
852 | fold_xi_to_x(ctx, op, 0)) { |
853 | return true; | |
854 | } | |
855 | return false; | |
2f9f08ba RH |
856 | } |
857 | ||
9531c078 | 858 | static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add) |
e3f7dc21 RH |
859 | { |
860 | if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) && | |
861 | arg_is_const(op->args[4]) && arg_is_const(op->args[5])) { | |
9531c078 RH |
862 | uint64_t al = arg_info(op->args[2])->val; |
863 | uint64_t ah = arg_info(op->args[3])->val; | |
864 | uint64_t bl = arg_info(op->args[4])->val; | |
865 | uint64_t bh = arg_info(op->args[5])->val; | |
e3f7dc21 | 866 | TCGArg rl, rh; |
9531c078 RH |
867 | TCGOp *op2; |
868 | ||
869 | if (ctx->type == TCG_TYPE_I32) { | |
870 | uint64_t a = deposit64(al, 32, 32, ah); | |
871 | uint64_t b = deposit64(bl, 32, 32, bh); | |
872 | ||
873 | if (add) { | |
874 | a += b; | |
875 | } else { | |
876 | a -= b; | |
877 | } | |
e3f7dc21 | 878 | |
9531c078 RH |
879 | al = sextract64(a, 0, 32); |
880 | ah = sextract64(a, 32, 32); | |
e3f7dc21 | 881 | } else { |
9531c078 RH |
882 | Int128 a = int128_make128(al, ah); |
883 | Int128 b = int128_make128(bl, bh); | |
884 | ||
885 | if (add) { | |
886 | a = int128_add(a, b); | |
887 | } else { | |
888 | a = int128_sub(a, b); | |
889 | } | |
890 | ||
891 | al = int128_getlo(a); | |
892 | ah = int128_gethi(a); | |
e3f7dc21 RH |
893 | } |
894 | ||
895 | rl = op->args[0]; | |
896 | rh = op->args[1]; | |
9531c078 RH |
897 | |
898 | /* The proper opcode is supplied by tcg_opt_gen_mov. */ | |
899 | op2 = tcg_op_insert_before(ctx->tcg, op, 0); | |
900 | ||
901 | tcg_opt_gen_movi(ctx, op, rl, al); | |
902 | tcg_opt_gen_movi(ctx, op2, rh, ah); | |
e3f7dc21 RH |
903 | return true; |
904 | } | |
905 | return false; | |
906 | } | |
907 | ||
9531c078 | 908 | static bool fold_add2(OptContext *ctx, TCGOp *op) |
e3f7dc21 | 909 | { |
7a2f7084 RH |
910 | /* Note that the high and low parts may be independently swapped. */ |
911 | swap_commutative(op->args[0], &op->args[2], &op->args[4]); | |
912 | swap_commutative(op->args[1], &op->args[3], &op->args[5]); | |
913 | ||
9531c078 | 914 | return fold_addsub2(ctx, op, true); |
e3f7dc21 RH |
915 | } |
916 | ||
2f9f08ba RH |
917 | static bool fold_and(OptContext *ctx, TCGOp *op) |
918 | { | |
fae450ba RH |
919 | uint64_t z1, z2; |
920 | ||
7a2f7084 | 921 | if (fold_const2_commutative(ctx, op) || |
e8679955 | 922 | fold_xi_to_i(ctx, op, 0) || |
a63ce0e9 | 923 | fold_xi_to_x(ctx, op, -1) || |
ca7bb049 RH |
924 | fold_xx_to_x(ctx, op)) { |
925 | return true; | |
926 | } | |
fae450ba RH |
927 | |
928 | z1 = arg_info(op->args[1])->z_mask; | |
929 | z2 = arg_info(op->args[2])->z_mask; | |
930 | ctx->z_mask = z1 & z2; | |
931 | ||
932 | /* | |
933 | * Known-zeros does not imply known-ones. Therefore unless | |
934 | * arg2 is constant, we can't infer affected bits from it. | |
935 | */ | |
936 | if (arg_is_const(op->args[2])) { | |
937 | ctx->a_mask = z1 & ~z2; | |
938 | } | |
939 | ||
940 | return fold_masks(ctx, op); | |
2f9f08ba RH |
941 | } |
942 | ||
943 | static bool fold_andc(OptContext *ctx, TCGOp *op) | |
944 | { | |
fae450ba RH |
945 | uint64_t z1; |
946 | ||
cbe42fb2 | 947 | if (fold_const2(ctx, op) || |
0e0a32ba | 948 | fold_xx_to_i(ctx, op, 0) || |
a63ce0e9 | 949 | fold_xi_to_x(ctx, op, 0) || |
0e0a32ba | 950 | fold_ix_to_not(ctx, op, -1)) { |
cbe42fb2 RH |
951 | return true; |
952 | } | |
fae450ba RH |
953 | |
954 | z1 = arg_info(op->args[1])->z_mask; | |
955 | ||
956 | /* | |
957 | * Known-zeros does not imply known-ones. Therefore unless | |
958 | * arg2 is constant, we can't infer anything from it. | |
959 | */ | |
960 | if (arg_is_const(op->args[2])) { | |
961 | uint64_t z2 = ~arg_info(op->args[2])->z_mask; | |
962 | ctx->a_mask = z1 & ~z2; | |
963 | z1 &= z2; | |
964 | } | |
965 | ctx->z_mask = z1; | |
966 | ||
967 | return fold_masks(ctx, op); | |
2f9f08ba RH |
968 | } |
969 | ||
079b0804 RH |
970 | static bool fold_brcond(OptContext *ctx, TCGOp *op) |
971 | { | |
972 | TCGCond cond = op->args[2]; | |
7a2f7084 | 973 | int i; |
079b0804 | 974 | |
7a2f7084 RH |
975 | if (swap_commutative(NO_DEST, &op->args[0], &op->args[1])) { |
976 | op->args[2] = cond = tcg_swap_cond(cond); | |
977 | } | |
978 | ||
979 | i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond); | |
079b0804 RH |
980 | if (i == 0) { |
981 | tcg_op_remove(ctx->tcg, op); | |
982 | return true; | |
983 | } | |
984 | if (i > 0) { | |
985 | op->opc = INDEX_op_br; | |
986 | op->args[0] = op->args[3]; | |
987 | } | |
988 | return false; | |
989 | } | |
990 | ||
764d2aba RH |
991 | static bool fold_brcond2(OptContext *ctx, TCGOp *op) |
992 | { | |
993 | TCGCond cond = op->args[4]; | |
764d2aba | 994 | TCGArg label = op->args[5]; |
7a2f7084 RH |
995 | int i, inv = 0; |
996 | ||
997 | if (swap_commutative2(&op->args[0], &op->args[2])) { | |
998 | op->args[4] = cond = tcg_swap_cond(cond); | |
999 | } | |
764d2aba | 1000 | |
7a2f7084 | 1001 | i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond); |
764d2aba RH |
1002 | if (i >= 0) { |
1003 | goto do_brcond_const; | |
1004 | } | |
1005 | ||
1006 | switch (cond) { | |
1007 | case TCG_COND_LT: | |
1008 | case TCG_COND_GE: | |
1009 | /* | |
1010 | * Simplify LT/GE comparisons vs zero to a single compare | |
1011 | * vs the high word of the input. | |
1012 | */ | |
1013 | if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 && | |
1014 | arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) { | |
1015 | goto do_brcond_high; | |
1016 | } | |
1017 | break; | |
1018 | ||
1019 | case TCG_COND_NE: | |
1020 | inv = 1; | |
1021 | QEMU_FALLTHROUGH; | |
1022 | case TCG_COND_EQ: | |
1023 | /* | |
1024 | * Simplify EQ/NE comparisons where one of the pairs | |
1025 | * can be simplified. | |
1026 | */ | |
67f84c96 | 1027 | i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0], |
764d2aba RH |
1028 | op->args[2], cond); |
1029 | switch (i ^ inv) { | |
1030 | case 0: | |
1031 | goto do_brcond_const; | |
1032 | case 1: | |
1033 | goto do_brcond_high; | |
1034 | } | |
1035 | ||
67f84c96 | 1036 | i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1], |
764d2aba RH |
1037 | op->args[3], cond); |
1038 | switch (i ^ inv) { | |
1039 | case 0: | |
1040 | goto do_brcond_const; | |
1041 | case 1: | |
1042 | op->opc = INDEX_op_brcond_i32; | |
1043 | op->args[1] = op->args[2]; | |
1044 | op->args[2] = cond; | |
1045 | op->args[3] = label; | |
1046 | break; | |
1047 | } | |
1048 | break; | |
1049 | ||
1050 | default: | |
1051 | break; | |
1052 | ||
1053 | do_brcond_high: | |
1054 | op->opc = INDEX_op_brcond_i32; | |
1055 | op->args[0] = op->args[1]; | |
1056 | op->args[1] = op->args[3]; | |
1057 | op->args[2] = cond; | |
1058 | op->args[3] = label; | |
1059 | break; | |
1060 | ||
1061 | do_brcond_const: | |
1062 | if (i == 0) { | |
1063 | tcg_op_remove(ctx->tcg, op); | |
1064 | return true; | |
1065 | } | |
1066 | op->opc = INDEX_op_br; | |
1067 | op->args[0] = label; | |
1068 | break; | |
1069 | } | |
1070 | return false; | |
1071 | } | |
1072 | ||
09bacdc2 RH |
1073 | static bool fold_bswap(OptContext *ctx, TCGOp *op) |
1074 | { | |
fae450ba RH |
1075 | uint64_t z_mask, sign; |
1076 | ||
09bacdc2 RH |
1077 | if (arg_is_const(op->args[1])) { |
1078 | uint64_t t = arg_info(op->args[1])->val; | |
1079 | ||
67f84c96 | 1080 | t = do_constant_folding(op->opc, ctx->type, t, op->args[2]); |
09bacdc2 RH |
1081 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); |
1082 | } | |
fae450ba RH |
1083 | |
1084 | z_mask = arg_info(op->args[1])->z_mask; | |
1085 | switch (op->opc) { | |
1086 | case INDEX_op_bswap16_i32: | |
1087 | case INDEX_op_bswap16_i64: | |
1088 | z_mask = bswap16(z_mask); | |
1089 | sign = INT16_MIN; | |
1090 | break; | |
1091 | case INDEX_op_bswap32_i32: | |
1092 | case INDEX_op_bswap32_i64: | |
1093 | z_mask = bswap32(z_mask); | |
1094 | sign = INT32_MIN; | |
1095 | break; | |
1096 | case INDEX_op_bswap64_i64: | |
1097 | z_mask = bswap64(z_mask); | |
1098 | sign = INT64_MIN; | |
1099 | break; | |
1100 | default: | |
1101 | g_assert_not_reached(); | |
1102 | } | |
1103 | ||
1104 | switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { | |
1105 | case TCG_BSWAP_OZ: | |
1106 | break; | |
1107 | case TCG_BSWAP_OS: | |
1108 | /* If the sign bit may be 1, force all the bits above to 1. */ | |
1109 | if (z_mask & sign) { | |
1110 | z_mask |= sign; | |
1111 | } | |
1112 | break; | |
1113 | default: | |
1114 | /* The high bits are undefined: force all bits above the sign to 1. */ | |
1115 | z_mask |= sign << 1; | |
1116 | break; | |
1117 | } | |
1118 | ctx->z_mask = z_mask; | |
1119 | ||
1120 | return fold_masks(ctx, op); | |
09bacdc2 RH |
1121 | } |
1122 | ||
5cf32be7 RH |
1123 | static bool fold_call(OptContext *ctx, TCGOp *op) |
1124 | { | |
1125 | TCGContext *s = ctx->tcg; | |
1126 | int nb_oargs = TCGOP_CALLO(op); | |
1127 | int nb_iargs = TCGOP_CALLI(op); | |
1128 | int flags, i; | |
1129 | ||
1130 | init_arguments(ctx, op, nb_oargs + nb_iargs); | |
1131 | copy_propagate(ctx, op, nb_oargs, nb_iargs); | |
1132 | ||
1133 | /* If the function reads or writes globals, reset temp data. */ | |
1134 | flags = tcg_call_flags(op); | |
1135 | if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) { | |
1136 | int nb_globals = s->nb_globals; | |
1137 | ||
1138 | for (i = 0; i < nb_globals; i++) { | |
1139 | if (test_bit(i, ctx->temps_used.l)) { | |
1140 | reset_ts(&ctx->tcg->temps[i]); | |
1141 | } | |
1142 | } | |
1143 | } | |
1144 | ||
1145 | /* Reset temp data for outputs. */ | |
1146 | for (i = 0; i < nb_oargs; i++) { | |
1147 | reset_temp(op->args[i]); | |
1148 | } | |
1149 | ||
1150 | /* Stop optimizing MB across calls. */ | |
1151 | ctx->prev_mb = NULL; | |
1152 | return true; | |
1153 | } | |
1154 | ||
30dd0bfe RH |
1155 | static bool fold_count_zeros(OptContext *ctx, TCGOp *op) |
1156 | { | |
fae450ba RH |
1157 | uint64_t z_mask; |
1158 | ||
30dd0bfe RH |
1159 | if (arg_is_const(op->args[1])) { |
1160 | uint64_t t = arg_info(op->args[1])->val; | |
1161 | ||
1162 | if (t != 0) { | |
67f84c96 | 1163 | t = do_constant_folding(op->opc, ctx->type, t, 0); |
30dd0bfe RH |
1164 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); |
1165 | } | |
1166 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]); | |
1167 | } | |
fae450ba RH |
1168 | |
1169 | switch (ctx->type) { | |
1170 | case TCG_TYPE_I32: | |
1171 | z_mask = 31; | |
1172 | break; | |
1173 | case TCG_TYPE_I64: | |
1174 | z_mask = 63; | |
1175 | break; | |
1176 | default: | |
1177 | g_assert_not_reached(); | |
1178 | } | |
1179 | ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask; | |
1180 | ||
30dd0bfe RH |
1181 | return false; |
1182 | } | |
1183 | ||
2f9f08ba RH |
1184 | static bool fold_ctpop(OptContext *ctx, TCGOp *op) |
1185 | { | |
fae450ba RH |
1186 | if (fold_const1(ctx, op)) { |
1187 | return true; | |
1188 | } | |
1189 | ||
1190 | switch (ctx->type) { | |
1191 | case TCG_TYPE_I32: | |
1192 | ctx->z_mask = 32 | 31; | |
1193 | break; | |
1194 | case TCG_TYPE_I64: | |
1195 | ctx->z_mask = 64 | 63; | |
1196 | break; | |
1197 | default: | |
1198 | g_assert_not_reached(); | |
1199 | } | |
1200 | return false; | |
2f9f08ba RH |
1201 | } |
1202 | ||
1b1907b8 RH |
1203 | static bool fold_deposit(OptContext *ctx, TCGOp *op) |
1204 | { | |
1205 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
1206 | uint64_t t1 = arg_info(op->args[1])->val; | |
1207 | uint64_t t2 = arg_info(op->args[2])->val; | |
1208 | ||
1209 | t1 = deposit64(t1, op->args[3], op->args[4], t2); | |
1210 | return tcg_opt_gen_movi(ctx, op, op->args[0], t1); | |
1211 | } | |
fae450ba RH |
1212 | |
1213 | ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask, | |
1214 | op->args[3], op->args[4], | |
1215 | arg_info(op->args[2])->z_mask); | |
1b1907b8 RH |
1216 | return false; |
1217 | } | |
1218 | ||
2f9f08ba RH |
1219 | static bool fold_divide(OptContext *ctx, TCGOp *op) |
1220 | { | |
2f9d9a34 RH |
1221 | if (fold_const2(ctx, op) || |
1222 | fold_xi_to_x(ctx, op, 1)) { | |
1223 | return true; | |
1224 | } | |
1225 | return false; | |
2f9f08ba RH |
1226 | } |
1227 | ||
8cdb3fcb RH |
1228 | static bool fold_dup(OptContext *ctx, TCGOp *op) |
1229 | { | |
1230 | if (arg_is_const(op->args[1])) { | |
1231 | uint64_t t = arg_info(op->args[1])->val; | |
1232 | t = dup_const(TCGOP_VECE(op), t); | |
1233 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | |
1234 | } | |
1235 | return false; | |
1236 | } | |
1237 | ||
1238 | static bool fold_dup2(OptContext *ctx, TCGOp *op) | |
1239 | { | |
1240 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
1241 | uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32, | |
1242 | arg_info(op->args[2])->val); | |
1243 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | |
1244 | } | |
1245 | ||
1246 | if (args_are_copies(op->args[1], op->args[2])) { | |
1247 | op->opc = INDEX_op_dup_vec; | |
1248 | TCGOP_VECE(op) = MO_32; | |
1249 | } | |
1250 | return false; | |
1251 | } | |
1252 | ||
2f9f08ba RH |
1253 | static bool fold_eqv(OptContext *ctx, TCGOp *op) |
1254 | { | |
7a2f7084 | 1255 | if (fold_const2_commutative(ctx, op) || |
a63ce0e9 | 1256 | fold_xi_to_x(ctx, op, -1) || |
0e0a32ba RH |
1257 | fold_xi_to_not(ctx, op, 0)) { |
1258 | return true; | |
1259 | } | |
1260 | return false; | |
2f9f08ba RH |
1261 | } |
1262 | ||
b6617c88 RH |
1263 | static bool fold_extract(OptContext *ctx, TCGOp *op) |
1264 | { | |
fae450ba RH |
1265 | uint64_t z_mask_old, z_mask; |
1266 | ||
b6617c88 RH |
1267 | if (arg_is_const(op->args[1])) { |
1268 | uint64_t t; | |
1269 | ||
1270 | t = arg_info(op->args[1])->val; | |
1271 | t = extract64(t, op->args[2], op->args[3]); | |
1272 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | |
1273 | } | |
fae450ba RH |
1274 | |
1275 | z_mask_old = arg_info(op->args[1])->z_mask; | |
1276 | z_mask = extract64(z_mask_old, op->args[2], op->args[3]); | |
1277 | if (op->args[2] == 0) { | |
1278 | ctx->a_mask = z_mask_old ^ z_mask; | |
1279 | } | |
1280 | ctx->z_mask = z_mask; | |
1281 | ||
1282 | return fold_masks(ctx, op); | |
b6617c88 RH |
1283 | } |
1284 | ||
dcd08996 RH |
1285 | static bool fold_extract2(OptContext *ctx, TCGOp *op) |
1286 | { | |
1287 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
1288 | uint64_t v1 = arg_info(op->args[1])->val; | |
1289 | uint64_t v2 = arg_info(op->args[2])->val; | |
1290 | int shr = op->args[3]; | |
1291 | ||
1292 | if (op->opc == INDEX_op_extract2_i64) { | |
1293 | v1 >>= shr; | |
1294 | v2 <<= 64 - shr; | |
1295 | } else { | |
1296 | v1 = (uint32_t)v1 >> shr; | |
1297 | v2 = (int32_t)v2 << (32 - shr); | |
1298 | } | |
1299 | return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2); | |
1300 | } | |
1301 | return false; | |
1302 | } | |
1303 | ||
2f9f08ba RH |
1304 | static bool fold_exts(OptContext *ctx, TCGOp *op) |
1305 | { | |
fae450ba RH |
1306 | uint64_t z_mask_old, z_mask, sign; |
1307 | bool type_change = false; | |
1308 | ||
1309 | if (fold_const1(ctx, op)) { | |
1310 | return true; | |
1311 | } | |
1312 | ||
1313 | z_mask_old = z_mask = arg_info(op->args[1])->z_mask; | |
1314 | ||
1315 | switch (op->opc) { | |
1316 | CASE_OP_32_64(ext8s): | |
1317 | sign = INT8_MIN; | |
1318 | z_mask = (uint8_t)z_mask; | |
1319 | break; | |
1320 | CASE_OP_32_64(ext16s): | |
1321 | sign = INT16_MIN; | |
1322 | z_mask = (uint16_t)z_mask; | |
1323 | break; | |
1324 | case INDEX_op_ext_i32_i64: | |
1325 | type_change = true; | |
1326 | QEMU_FALLTHROUGH; | |
1327 | case INDEX_op_ext32s_i64: | |
1328 | sign = INT32_MIN; | |
1329 | z_mask = (uint32_t)z_mask; | |
1330 | break; | |
1331 | default: | |
1332 | g_assert_not_reached(); | |
1333 | } | |
1334 | ||
1335 | if (z_mask & sign) { | |
1336 | z_mask |= sign; | |
1337 | } else if (!type_change) { | |
1338 | ctx->a_mask = z_mask_old ^ z_mask; | |
1339 | } | |
1340 | ctx->z_mask = z_mask; | |
1341 | ||
1342 | return fold_masks(ctx, op); | |
2f9f08ba RH |
1343 | } |
1344 | ||
1345 | static bool fold_extu(OptContext *ctx, TCGOp *op) | |
1346 | { | |
fae450ba RH |
1347 | uint64_t z_mask_old, z_mask; |
1348 | bool type_change = false; | |
1349 | ||
1350 | if (fold_const1(ctx, op)) { | |
1351 | return true; | |
1352 | } | |
1353 | ||
1354 | z_mask_old = z_mask = arg_info(op->args[1])->z_mask; | |
1355 | ||
1356 | switch (op->opc) { | |
1357 | CASE_OP_32_64(ext8u): | |
1358 | z_mask = (uint8_t)z_mask; | |
1359 | break; | |
1360 | CASE_OP_32_64(ext16u): | |
1361 | z_mask = (uint16_t)z_mask; | |
1362 | break; | |
1363 | case INDEX_op_extrl_i64_i32: | |
1364 | case INDEX_op_extu_i32_i64: | |
1365 | type_change = true; | |
1366 | QEMU_FALLTHROUGH; | |
1367 | case INDEX_op_ext32u_i64: | |
1368 | z_mask = (uint32_t)z_mask; | |
1369 | break; | |
1370 | case INDEX_op_extrh_i64_i32: | |
1371 | type_change = true; | |
1372 | z_mask >>= 32; | |
1373 | break; | |
1374 | default: | |
1375 | g_assert_not_reached(); | |
1376 | } | |
1377 | ||
1378 | ctx->z_mask = z_mask; | |
1379 | if (!type_change) { | |
1380 | ctx->a_mask = z_mask_old ^ z_mask; | |
1381 | } | |
1382 | return fold_masks(ctx, op); | |
2f9f08ba RH |
1383 | } |
1384 | ||
3eefdf2b RH |
1385 | static bool fold_mb(OptContext *ctx, TCGOp *op) |
1386 | { | |
1387 | /* Eliminate duplicate and redundant fence instructions. */ | |
1388 | if (ctx->prev_mb) { | |
1389 | /* | |
1390 | * Merge two barriers of the same type into one, | |
1391 | * or a weaker barrier into a stronger one, | |
1392 | * or two weaker barriers into a stronger one. | |
1393 | * mb X; mb Y => mb X|Y | |
1394 | * mb; strl => mb; st | |
1395 | * ldaq; mb => ld; mb | |
1396 | * ldaq; strl => ld; mb; st | |
1397 | * Other combinations are also merged into a strong | |
1398 | * barrier. This is stricter than specified but for | |
1399 | * the purposes of TCG is better than not optimizing. | |
1400 | */ | |
1401 | ctx->prev_mb->args[0] |= op->args[0]; | |
1402 | tcg_op_remove(ctx->tcg, op); | |
1403 | } else { | |
1404 | ctx->prev_mb = op; | |
1405 | } | |
1406 | return true; | |
1407 | } | |
1408 | ||
2cfac7fa RH |
1409 | static bool fold_mov(OptContext *ctx, TCGOp *op) |
1410 | { | |
1411 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | |
1412 | } | |
1413 | ||
0c310a30 RH |
1414 | static bool fold_movcond(OptContext *ctx, TCGOp *op) |
1415 | { | |
0c310a30 | 1416 | TCGCond cond = op->args[5]; |
7a2f7084 RH |
1417 | int i; |
1418 | ||
1419 | if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) { | |
1420 | op->args[5] = cond = tcg_swap_cond(cond); | |
1421 | } | |
1422 | /* | |
1423 | * Canonicalize the "false" input reg to match the destination reg so | |
1424 | * that the tcg backend can implement a "move if true" operation. | |
1425 | */ | |
1426 | if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { | |
1427 | op->args[5] = cond = tcg_invert_cond(cond); | |
1428 | } | |
0c310a30 | 1429 | |
7a2f7084 | 1430 | i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); |
0c310a30 RH |
1431 | if (i >= 0) { |
1432 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]); | |
1433 | } | |
1434 | ||
fae450ba RH |
1435 | ctx->z_mask = arg_info(op->args[3])->z_mask |
1436 | | arg_info(op->args[4])->z_mask; | |
1437 | ||
0c310a30 RH |
1438 | if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { |
1439 | uint64_t tv = arg_info(op->args[3])->val; | |
1440 | uint64_t fv = arg_info(op->args[4])->val; | |
67f84c96 | 1441 | TCGOpcode opc; |
0c310a30 | 1442 | |
67f84c96 RH |
1443 | switch (ctx->type) { |
1444 | case TCG_TYPE_I32: | |
1445 | opc = INDEX_op_setcond_i32; | |
1446 | break; | |
1447 | case TCG_TYPE_I64: | |
1448 | opc = INDEX_op_setcond_i64; | |
1449 | break; | |
1450 | default: | |
1451 | g_assert_not_reached(); | |
1452 | } | |
0c310a30 RH |
1453 | |
1454 | if (tv == 1 && fv == 0) { | |
1455 | op->opc = opc; | |
1456 | op->args[3] = cond; | |
1457 | } else if (fv == 1 && tv == 0) { | |
1458 | op->opc = opc; | |
1459 | op->args[3] = tcg_invert_cond(cond); | |
1460 | } | |
1461 | } | |
1462 | return false; | |
1463 | } | |
1464 | ||
2f9f08ba RH |
1465 | static bool fold_mul(OptContext *ctx, TCGOp *op) |
1466 | { | |
e8679955 | 1467 | if (fold_const2(ctx, op) || |
5b5cf479 RH |
1468 | fold_xi_to_i(ctx, op, 0) || |
1469 | fold_xi_to_x(ctx, op, 1)) { | |
e8679955 RH |
1470 | return true; |
1471 | } | |
1472 | return false; | |
2f9f08ba RH |
1473 | } |
1474 | ||
1475 | static bool fold_mul_highpart(OptContext *ctx, TCGOp *op) | |
1476 | { | |
7a2f7084 | 1477 | if (fold_const2_commutative(ctx, op) || |
e8679955 RH |
1478 | fold_xi_to_i(ctx, op, 0)) { |
1479 | return true; | |
1480 | } | |
1481 | return false; | |
2f9f08ba RH |
1482 | } |
1483 | ||
407112b0 | 1484 | static bool fold_multiply2(OptContext *ctx, TCGOp *op) |
6b8ac0d1 | 1485 | { |
7a2f7084 RH |
1486 | swap_commutative(op->args[0], &op->args[2], &op->args[3]); |
1487 | ||
6b8ac0d1 | 1488 | if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { |
407112b0 RH |
1489 | uint64_t a = arg_info(op->args[2])->val; |
1490 | uint64_t b = arg_info(op->args[3])->val; | |
1491 | uint64_t h, l; | |
6b8ac0d1 | 1492 | TCGArg rl, rh; |
407112b0 RH |
1493 | TCGOp *op2; |
1494 | ||
1495 | switch (op->opc) { | |
1496 | case INDEX_op_mulu2_i32: | |
1497 | l = (uint64_t)(uint32_t)a * (uint32_t)b; | |
1498 | h = (int32_t)(l >> 32); | |
1499 | l = (int32_t)l; | |
1500 | break; | |
1501 | case INDEX_op_muls2_i32: | |
1502 | l = (int64_t)(int32_t)a * (int32_t)b; | |
1503 | h = l >> 32; | |
1504 | l = (int32_t)l; | |
1505 | break; | |
1506 | case INDEX_op_mulu2_i64: | |
1507 | mulu64(&l, &h, a, b); | |
1508 | break; | |
1509 | case INDEX_op_muls2_i64: | |
1510 | muls64(&l, &h, a, b); | |
1511 | break; | |
1512 | default: | |
1513 | g_assert_not_reached(); | |
1514 | } | |
6b8ac0d1 RH |
1515 | |
1516 | rl = op->args[0]; | |
1517 | rh = op->args[1]; | |
407112b0 RH |
1518 | |
1519 | /* The proper opcode is supplied by tcg_opt_gen_mov. */ | |
1520 | op2 = tcg_op_insert_before(ctx->tcg, op, 0); | |
1521 | ||
1522 | tcg_opt_gen_movi(ctx, op, rl, l); | |
1523 | tcg_opt_gen_movi(ctx, op2, rh, h); | |
6b8ac0d1 RH |
1524 | return true; |
1525 | } | |
1526 | return false; | |
1527 | } | |
1528 | ||
2f9f08ba RH |
1529 | static bool fold_nand(OptContext *ctx, TCGOp *op) |
1530 | { | |
7a2f7084 | 1531 | if (fold_const2_commutative(ctx, op) || |
0e0a32ba RH |
1532 | fold_xi_to_not(ctx, op, -1)) { |
1533 | return true; | |
1534 | } | |
1535 | return false; | |
2f9f08ba RH |
1536 | } |
1537 | ||
1538 | static bool fold_neg(OptContext *ctx, TCGOp *op) | |
1539 | { | |
fae450ba RH |
1540 | uint64_t z_mask; |
1541 | ||
9caca88a RH |
1542 | if (fold_const1(ctx, op)) { |
1543 | return true; | |
1544 | } | |
fae450ba RH |
1545 | |
1546 | /* Set to 1 all bits to the left of the rightmost. */ | |
1547 | z_mask = arg_info(op->args[1])->z_mask; | |
1548 | ctx->z_mask = -(z_mask & -z_mask); | |
1549 | ||
9caca88a RH |
1550 | /* |
1551 | * Because of fold_sub_to_neg, we want to always return true, | |
1552 | * via finish_folding. | |
1553 | */ | |
1554 | finish_folding(ctx, op); | |
1555 | return true; | |
2f9f08ba RH |
1556 | } |
1557 | ||
1558 | static bool fold_nor(OptContext *ctx, TCGOp *op) | |
1559 | { | |
7a2f7084 | 1560 | if (fold_const2_commutative(ctx, op) || |
0e0a32ba RH |
1561 | fold_xi_to_not(ctx, op, 0)) { |
1562 | return true; | |
1563 | } | |
1564 | return false; | |
2f9f08ba RH |
1565 | } |
1566 | ||
1567 | static bool fold_not(OptContext *ctx, TCGOp *op) | |
1568 | { | |
0e0a32ba RH |
1569 | if (fold_const1(ctx, op)) { |
1570 | return true; | |
1571 | } | |
1572 | ||
1573 | /* Because of fold_to_not, we want to always return true, via finish. */ | |
1574 | finish_folding(ctx, op); | |
1575 | return true; | |
2f9f08ba RH |
1576 | } |
1577 | ||
1578 | static bool fold_or(OptContext *ctx, TCGOp *op) | |
1579 | { | |
7a2f7084 | 1580 | if (fold_const2_commutative(ctx, op) || |
a63ce0e9 | 1581 | fold_xi_to_x(ctx, op, 0) || |
ca7bb049 RH |
1582 | fold_xx_to_x(ctx, op)) { |
1583 | return true; | |
1584 | } | |
fae450ba RH |
1585 | |
1586 | ctx->z_mask = arg_info(op->args[1])->z_mask | |
1587 | | arg_info(op->args[2])->z_mask; | |
1588 | return fold_masks(ctx, op); | |
2f9f08ba RH |
1589 | } |
1590 | ||
1591 | static bool fold_orc(OptContext *ctx, TCGOp *op) | |
1592 | { | |
0e0a32ba | 1593 | if (fold_const2(ctx, op) || |
4e858d96 | 1594 | fold_xx_to_i(ctx, op, -1) || |
a63ce0e9 | 1595 | fold_xi_to_x(ctx, op, -1) || |
0e0a32ba RH |
1596 | fold_ix_to_not(ctx, op, 0)) { |
1597 | return true; | |
1598 | } | |
1599 | return false; | |
2f9f08ba RH |
1600 | } |
1601 | ||
3eefdf2b RH |
1602 | static bool fold_qemu_ld(OptContext *ctx, TCGOp *op) |
1603 | { | |
fae450ba RH |
1604 | const TCGOpDef *def = &tcg_op_defs[op->opc]; |
1605 | MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs]; | |
1606 | MemOp mop = get_memop(oi); | |
1607 | int width = 8 * memop_size(mop); | |
1608 | ||
1609 | if (!(mop & MO_SIGN) && width < 64) { | |
1610 | ctx->z_mask = MAKE_64BIT_MASK(0, width); | |
1611 | } | |
1612 | ||
3eefdf2b RH |
1613 | /* Opcodes that touch guest memory stop the mb optimization. */ |
1614 | ctx->prev_mb = NULL; | |
1615 | return false; | |
1616 | } | |
1617 | ||
1618 | static bool fold_qemu_st(OptContext *ctx, TCGOp *op) | |
1619 | { | |
1620 | /* Opcodes that touch guest memory stop the mb optimization. */ | |
1621 | ctx->prev_mb = NULL; | |
1622 | return false; | |
1623 | } | |
1624 | ||
2f9f08ba RH |
1625 | static bool fold_remainder(OptContext *ctx, TCGOp *op) |
1626 | { | |
1627 | return fold_const2(ctx, op); | |
1628 | } | |
1629 | ||
c63ff55c RH |
1630 | static bool fold_setcond(OptContext *ctx, TCGOp *op) |
1631 | { | |
1632 | TCGCond cond = op->args[3]; | |
7a2f7084 RH |
1633 | int i; |
1634 | ||
1635 | if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) { | |
1636 | op->args[3] = cond = tcg_swap_cond(cond); | |
1637 | } | |
c63ff55c | 1638 | |
7a2f7084 | 1639 | i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); |
c63ff55c RH |
1640 | if (i >= 0) { |
1641 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
1642 | } | |
fae450ba RH |
1643 | |
1644 | ctx->z_mask = 1; | |
c63ff55c RH |
1645 | return false; |
1646 | } | |
1647 | ||
bc47b1aa RH |
1648 | static bool fold_setcond2(OptContext *ctx, TCGOp *op) |
1649 | { | |
1650 | TCGCond cond = op->args[5]; | |
7a2f7084 | 1651 | int i, inv = 0; |
bc47b1aa | 1652 | |
7a2f7084 RH |
1653 | if (swap_commutative2(&op->args[1], &op->args[3])) { |
1654 | op->args[5] = cond = tcg_swap_cond(cond); | |
1655 | } | |
1656 | ||
1657 | i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond); | |
bc47b1aa RH |
1658 | if (i >= 0) { |
1659 | goto do_setcond_const; | |
1660 | } | |
1661 | ||
1662 | switch (cond) { | |
1663 | case TCG_COND_LT: | |
1664 | case TCG_COND_GE: | |
1665 | /* | |
1666 | * Simplify LT/GE comparisons vs zero to a single compare | |
1667 | * vs the high word of the input. | |
1668 | */ | |
1669 | if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 && | |
1670 | arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) { | |
1671 | goto do_setcond_high; | |
1672 | } | |
1673 | break; | |
1674 | ||
1675 | case TCG_COND_NE: | |
1676 | inv = 1; | |
1677 | QEMU_FALLTHROUGH; | |
1678 | case TCG_COND_EQ: | |
1679 | /* | |
1680 | * Simplify EQ/NE comparisons where one of the pairs | |
1681 | * can be simplified. | |
1682 | */ | |
67f84c96 | 1683 | i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1], |
bc47b1aa RH |
1684 | op->args[3], cond); |
1685 | switch (i ^ inv) { | |
1686 | case 0: | |
1687 | goto do_setcond_const; | |
1688 | case 1: | |
1689 | goto do_setcond_high; | |
1690 | } | |
1691 | ||
67f84c96 | 1692 | i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2], |
bc47b1aa RH |
1693 | op->args[4], cond); |
1694 | switch (i ^ inv) { | |
1695 | case 0: | |
1696 | goto do_setcond_const; | |
1697 | case 1: | |
1698 | op->args[2] = op->args[3]; | |
1699 | op->args[3] = cond; | |
1700 | op->opc = INDEX_op_setcond_i32; | |
1701 | break; | |
1702 | } | |
1703 | break; | |
1704 | ||
1705 | default: | |
1706 | break; | |
1707 | ||
1708 | do_setcond_high: | |
1709 | op->args[1] = op->args[2]; | |
1710 | op->args[2] = op->args[4]; | |
1711 | op->args[3] = cond; | |
1712 | op->opc = INDEX_op_setcond_i32; | |
1713 | break; | |
1714 | } | |
fae450ba RH |
1715 | |
1716 | ctx->z_mask = 1; | |
bc47b1aa RH |
1717 | return false; |
1718 | ||
1719 | do_setcond_const: | |
1720 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
1721 | } | |
1722 | ||
b6617c88 RH |
1723 | static bool fold_sextract(OptContext *ctx, TCGOp *op) |
1724 | { | |
fae450ba RH |
1725 | int64_t z_mask_old, z_mask; |
1726 | ||
b6617c88 RH |
1727 | if (arg_is_const(op->args[1])) { |
1728 | uint64_t t; | |
1729 | ||
1730 | t = arg_info(op->args[1])->val; | |
1731 | t = sextract64(t, op->args[2], op->args[3]); | |
1732 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | |
1733 | } | |
fae450ba RH |
1734 | |
1735 | z_mask_old = arg_info(op->args[1])->z_mask; | |
1736 | z_mask = sextract64(z_mask_old, op->args[2], op->args[3]); | |
1737 | if (op->args[2] == 0 && z_mask >= 0) { | |
1738 | ctx->a_mask = z_mask_old ^ z_mask; | |
1739 | } | |
1740 | ctx->z_mask = z_mask; | |
1741 | ||
1742 | return fold_masks(ctx, op); | |
b6617c88 RH |
1743 | } |
1744 | ||
2f9f08ba RH |
1745 | static bool fold_shift(OptContext *ctx, TCGOp *op) |
1746 | { | |
a63ce0e9 | 1747 | if (fold_const2(ctx, op) || |
da48e272 | 1748 | fold_ix_to_i(ctx, op, 0) || |
a63ce0e9 RH |
1749 | fold_xi_to_x(ctx, op, 0)) { |
1750 | return true; | |
1751 | } | |
fae450ba RH |
1752 | |
1753 | if (arg_is_const(op->args[2])) { | |
1754 | ctx->z_mask = do_constant_folding(op->opc, ctx->type, | |
1755 | arg_info(op->args[1])->z_mask, | |
1756 | arg_info(op->args[2])->val); | |
1757 | return fold_masks(ctx, op); | |
1758 | } | |
a63ce0e9 | 1759 | return false; |
2f9f08ba RH |
1760 | } |
1761 | ||
9caca88a RH |
1762 | static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op) |
1763 | { | |
1764 | TCGOpcode neg_op; | |
1765 | bool have_neg; | |
1766 | ||
1767 | if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) { | |
1768 | return false; | |
1769 | } | |
1770 | ||
1771 | switch (ctx->type) { | |
1772 | case TCG_TYPE_I32: | |
1773 | neg_op = INDEX_op_neg_i32; | |
1774 | have_neg = TCG_TARGET_HAS_neg_i32; | |
1775 | break; | |
1776 | case TCG_TYPE_I64: | |
1777 | neg_op = INDEX_op_neg_i64; | |
1778 | have_neg = TCG_TARGET_HAS_neg_i64; | |
1779 | break; | |
1780 | case TCG_TYPE_V64: | |
1781 | case TCG_TYPE_V128: | |
1782 | case TCG_TYPE_V256: | |
1783 | neg_op = INDEX_op_neg_vec; | |
1784 | have_neg = (TCG_TARGET_HAS_neg_vec && | |
1785 | tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0); | |
1786 | break; | |
1787 | default: | |
1788 | g_assert_not_reached(); | |
1789 | } | |
1790 | if (have_neg) { | |
1791 | op->opc = neg_op; | |
1792 | op->args[1] = op->args[2]; | |
1793 | return fold_neg(ctx, op); | |
1794 | } | |
1795 | return false; | |
1796 | } | |
1797 | ||
2f9f08ba RH |
1798 | static bool fold_sub(OptContext *ctx, TCGOp *op) |
1799 | { | |
cbe42fb2 | 1800 | if (fold_const2(ctx, op) || |
9caca88a | 1801 | fold_xx_to_i(ctx, op, 0) || |
a63ce0e9 | 1802 | fold_xi_to_x(ctx, op, 0) || |
9caca88a | 1803 | fold_sub_to_neg(ctx, op)) { |
cbe42fb2 RH |
1804 | return true; |
1805 | } | |
1806 | return false; | |
2f9f08ba RH |
1807 | } |
1808 | ||
9531c078 | 1809 | static bool fold_sub2(OptContext *ctx, TCGOp *op) |
e3f7dc21 | 1810 | { |
9531c078 | 1811 | return fold_addsub2(ctx, op, false); |
e3f7dc21 RH |
1812 | } |
1813 | ||
fae450ba RH |
1814 | static bool fold_tcg_ld(OptContext *ctx, TCGOp *op) |
1815 | { | |
1816 | /* We can't do any folding with a load, but we can record bits. */ | |
1817 | switch (op->opc) { | |
1818 | CASE_OP_32_64(ld8u): | |
1819 | ctx->z_mask = MAKE_64BIT_MASK(0, 8); | |
1820 | break; | |
1821 | CASE_OP_32_64(ld16u): | |
1822 | ctx->z_mask = MAKE_64BIT_MASK(0, 16); | |
1823 | break; | |
1824 | case INDEX_op_ld32u_i64: | |
1825 | ctx->z_mask = MAKE_64BIT_MASK(0, 32); | |
1826 | break; | |
1827 | default: | |
1828 | g_assert_not_reached(); | |
1829 | } | |
1830 | return false; | |
1831 | } | |
1832 | ||
2f9f08ba RH |
1833 | static bool fold_xor(OptContext *ctx, TCGOp *op) |
1834 | { | |
7a2f7084 | 1835 | if (fold_const2_commutative(ctx, op) || |
0e0a32ba | 1836 | fold_xx_to_i(ctx, op, 0) || |
a63ce0e9 | 1837 | fold_xi_to_x(ctx, op, 0) || |
0e0a32ba | 1838 | fold_xi_to_not(ctx, op, -1)) { |
cbe42fb2 RH |
1839 | return true; |
1840 | } | |
fae450ba RH |
1841 | |
1842 | ctx->z_mask = arg_info(op->args[1])->z_mask | |
1843 | | arg_info(op->args[2])->z_mask; | |
1844 | return fold_masks(ctx, op); | |
2f9f08ba RH |
1845 | } |
1846 | ||
22613af4 | 1847 | /* Propagate constants and copies, fold constant expressions. */ |
36e60ef6 | 1848 | void tcg_optimize(TCGContext *s) |
8f2e8c07 | 1849 | { |
5cf32be7 | 1850 | int nb_temps, i; |
d0ed5151 | 1851 | TCGOp *op, *op_next; |
dc84988a | 1852 | OptContext ctx = { .tcg = s }; |
5d8f5363 | 1853 | |
22613af4 KB |
1854 | /* Array VALS has an element for each temp. |
1855 | If this temp holds a constant then its value is kept in VALS' element. | |
e590d4e6 AJ |
1856 | If this temp is a copy of other ones then the other copies are |
1857 | available through the doubly linked circular list. */ | |
8f2e8c07 KB |
1858 | |
1859 | nb_temps = s->nb_temps; | |
8f17a975 RH |
1860 | for (i = 0; i < nb_temps; ++i) { |
1861 | s->temps[i].state_ptr = NULL; | |
1862 | } | |
8f2e8c07 | 1863 | |
15fa08f8 | 1864 | QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { |
c45cb8bb | 1865 | TCGOpcode opc = op->opc; |
5cf32be7 | 1866 | const TCGOpDef *def; |
404a148d | 1867 | bool done = false; |
c45cb8bb | 1868 | |
5cf32be7 | 1869 | /* Calls are special. */ |
c45cb8bb | 1870 | if (opc == INDEX_op_call) { |
5cf32be7 RH |
1871 | fold_call(&ctx, op); |
1872 | continue; | |
cf066674 | 1873 | } |
5cf32be7 RH |
1874 | |
1875 | def = &tcg_op_defs[opc]; | |
ec5d4cbe RH |
1876 | init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs); |
1877 | copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs); | |
22613af4 | 1878 | |
67f84c96 RH |
1879 | /* Pre-compute the type of the operation. */ |
1880 | if (def->flags & TCG_OPF_VECTOR) { | |
1881 | ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op); | |
1882 | } else if (def->flags & TCG_OPF_64BIT) { | |
1883 | ctx.type = TCG_TYPE_I64; | |
1884 | } else { | |
1885 | ctx.type = TCG_TYPE_I32; | |
1886 | } | |
1887 | ||
fae450ba RH |
1888 | /* Assume all bits affected, and no bits known zero. */ |
1889 | ctx.a_mask = -1; | |
1890 | ctx.z_mask = -1; | |
633f6502 | 1891 | |
2cfac7fa RH |
1892 | /* |
1893 | * Process each opcode. | |
1894 | * Sorted alphabetically by opcode as much as possible. | |
1895 | */ | |
c45cb8bb | 1896 | switch (opc) { |
2f9f08ba RH |
1897 | CASE_OP_32_64_VEC(add): |
1898 | done = fold_add(&ctx, op); | |
1899 | break; | |
9531c078 RH |
1900 | CASE_OP_32_64(add2): |
1901 | done = fold_add2(&ctx, op); | |
e3f7dc21 | 1902 | break; |
2f9f08ba RH |
1903 | CASE_OP_32_64_VEC(and): |
1904 | done = fold_and(&ctx, op); | |
1905 | break; | |
1906 | CASE_OP_32_64_VEC(andc): | |
1907 | done = fold_andc(&ctx, op); | |
1908 | break; | |
079b0804 RH |
1909 | CASE_OP_32_64(brcond): |
1910 | done = fold_brcond(&ctx, op); | |
1911 | break; | |
764d2aba RH |
1912 | case INDEX_op_brcond2_i32: |
1913 | done = fold_brcond2(&ctx, op); | |
1914 | break; | |
09bacdc2 RH |
1915 | CASE_OP_32_64(bswap16): |
1916 | CASE_OP_32_64(bswap32): | |
1917 | case INDEX_op_bswap64_i64: | |
1918 | done = fold_bswap(&ctx, op); | |
1919 | break; | |
30dd0bfe RH |
1920 | CASE_OP_32_64(clz): |
1921 | CASE_OP_32_64(ctz): | |
1922 | done = fold_count_zeros(&ctx, op); | |
1923 | break; | |
2f9f08ba RH |
1924 | CASE_OP_32_64(ctpop): |
1925 | done = fold_ctpop(&ctx, op); | |
1926 | break; | |
1b1907b8 RH |
1927 | CASE_OP_32_64(deposit): |
1928 | done = fold_deposit(&ctx, op); | |
1929 | break; | |
2f9f08ba RH |
1930 | CASE_OP_32_64(div): |
1931 | CASE_OP_32_64(divu): | |
1932 | done = fold_divide(&ctx, op); | |
1933 | break; | |
8cdb3fcb RH |
1934 | case INDEX_op_dup_vec: |
1935 | done = fold_dup(&ctx, op); | |
1936 | break; | |
1937 | case INDEX_op_dup2_vec: | |
1938 | done = fold_dup2(&ctx, op); | |
1939 | break; | |
2f9f08ba RH |
1940 | CASE_OP_32_64(eqv): |
1941 | done = fold_eqv(&ctx, op); | |
1942 | break; | |
b6617c88 RH |
1943 | CASE_OP_32_64(extract): |
1944 | done = fold_extract(&ctx, op); | |
1945 | break; | |
dcd08996 RH |
1946 | CASE_OP_32_64(extract2): |
1947 | done = fold_extract2(&ctx, op); | |
1948 | break; | |
2f9f08ba RH |
1949 | CASE_OP_32_64(ext8s): |
1950 | CASE_OP_32_64(ext16s): | |
1951 | case INDEX_op_ext32s_i64: | |
1952 | case INDEX_op_ext_i32_i64: | |
1953 | done = fold_exts(&ctx, op); | |
1954 | break; | |
1955 | CASE_OP_32_64(ext8u): | |
1956 | CASE_OP_32_64(ext16u): | |
1957 | case INDEX_op_ext32u_i64: | |
1958 | case INDEX_op_extu_i32_i64: | |
1959 | case INDEX_op_extrl_i64_i32: | |
1960 | case INDEX_op_extrh_i64_i32: | |
1961 | done = fold_extu(&ctx, op); | |
1962 | break; | |
fae450ba RH |
1963 | CASE_OP_32_64(ld8u): |
1964 | CASE_OP_32_64(ld16u): | |
1965 | case INDEX_op_ld32u_i64: | |
1966 | done = fold_tcg_ld(&ctx, op); | |
1967 | break; | |
3eefdf2b RH |
1968 | case INDEX_op_mb: |
1969 | done = fold_mb(&ctx, op); | |
0c310a30 | 1970 | break; |
2cfac7fa RH |
1971 | CASE_OP_32_64_VEC(mov): |
1972 | done = fold_mov(&ctx, op); | |
1973 | break; | |
0c310a30 RH |
1974 | CASE_OP_32_64(movcond): |
1975 | done = fold_movcond(&ctx, op); | |
3eefdf2b | 1976 | break; |
2f9f08ba RH |
1977 | CASE_OP_32_64(mul): |
1978 | done = fold_mul(&ctx, op); | |
1979 | break; | |
1980 | CASE_OP_32_64(mulsh): | |
1981 | CASE_OP_32_64(muluh): | |
1982 | done = fold_mul_highpart(&ctx, op); | |
1983 | break; | |
407112b0 RH |
1984 | CASE_OP_32_64(muls2): |
1985 | CASE_OP_32_64(mulu2): | |
1986 | done = fold_multiply2(&ctx, op); | |
6b8ac0d1 | 1987 | break; |
2f9f08ba RH |
1988 | CASE_OP_32_64(nand): |
1989 | done = fold_nand(&ctx, op); | |
1990 | break; | |
1991 | CASE_OP_32_64(neg): | |
1992 | done = fold_neg(&ctx, op); | |
1993 | break; | |
1994 | CASE_OP_32_64(nor): | |
1995 | done = fold_nor(&ctx, op); | |
1996 | break; | |
1997 | CASE_OP_32_64_VEC(not): | |
1998 | done = fold_not(&ctx, op); | |
1999 | break; | |
2000 | CASE_OP_32_64_VEC(or): | |
2001 | done = fold_or(&ctx, op); | |
2002 | break; | |
2003 | CASE_OP_32_64_VEC(orc): | |
2004 | done = fold_orc(&ctx, op); | |
2005 | break; | |
3eefdf2b RH |
2006 | case INDEX_op_qemu_ld_i32: |
2007 | case INDEX_op_qemu_ld_i64: | |
2008 | done = fold_qemu_ld(&ctx, op); | |
2009 | break; | |
2010 | case INDEX_op_qemu_st_i32: | |
2011 | case INDEX_op_qemu_st8_i32: | |
2012 | case INDEX_op_qemu_st_i64: | |
2013 | done = fold_qemu_st(&ctx, op); | |
2014 | break; | |
2f9f08ba RH |
2015 | CASE_OP_32_64(rem): |
2016 | CASE_OP_32_64(remu): | |
2017 | done = fold_remainder(&ctx, op); | |
2018 | break; | |
2019 | CASE_OP_32_64(rotl): | |
2020 | CASE_OP_32_64(rotr): | |
2021 | CASE_OP_32_64(sar): | |
2022 | CASE_OP_32_64(shl): | |
2023 | CASE_OP_32_64(shr): | |
2024 | done = fold_shift(&ctx, op); | |
2025 | break; | |
c63ff55c RH |
2026 | CASE_OP_32_64(setcond): |
2027 | done = fold_setcond(&ctx, op); | |
2028 | break; | |
bc47b1aa RH |
2029 | case INDEX_op_setcond2_i32: |
2030 | done = fold_setcond2(&ctx, op); | |
2031 | break; | |
b6617c88 RH |
2032 | CASE_OP_32_64(sextract): |
2033 | done = fold_sextract(&ctx, op); | |
2034 | break; | |
2f9f08ba RH |
2035 | CASE_OP_32_64_VEC(sub): |
2036 | done = fold_sub(&ctx, op); | |
2037 | break; | |
9531c078 RH |
2038 | CASE_OP_32_64(sub2): |
2039 | done = fold_sub2(&ctx, op); | |
e3f7dc21 | 2040 | break; |
2f9f08ba RH |
2041 | CASE_OP_32_64_VEC(xor): |
2042 | done = fold_xor(&ctx, op); | |
b10f3833 | 2043 | break; |
2cfac7fa RH |
2044 | default: |
2045 | break; | |
b10f3833 RH |
2046 | } |
2047 | ||
404a148d RH |
2048 | if (!done) { |
2049 | finish_folding(&ctx, op); | |
2050 | } | |
8f2e8c07 | 2051 | } |
8f2e8c07 | 2052 | } |